--- /dev/null
+From 001e59f3c22f0560e8378bf0c2acca66255f7ff6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 23:11:39 -0700
+Subject: blk-crypto: use dynamic lock class for blk_crypto_profile::lock
+
+From: Eric Biggers <ebiggers@google.com>
+
+[ Upstream commit 2fb48d88e77f29bf9d278f25bcfe82cf59a0e09b ]
+
+When a device-mapper device is passing through the inline encryption
+support of an underlying device, calls to blk_crypto_evict_key() take
+the blk_crypto_profile::lock of the device-mapper device, then take the
+blk_crypto_profile::lock of the underlying device (nested). This isn't
+a real deadlock, but it causes a lockdep report because there is only
+one lock class for all instances of this lock.
+
+Lockdep subclasses don't really work here because the hierarchy of block
+devices is dynamic and could have more than 2 levels.
+
+Instead, register a dynamic lock class for each blk_crypto_profile, and
+associate that with the lock.
+
+This avoids false-positive lockdep reports like the following:
+
+ ============================================
+ WARNING: possible recursive locking detected
+ 6.4.0-rc5 #2 Not tainted
+ --------------------------------------------
+ fscryptctl/1421 is trying to acquire lock:
+ ffffff80829ca418 (&profile->lock){++++}-{3:3}, at: __blk_crypto_evict_key+0x44/0x1c0
+
+ but task is already holding lock:
+ ffffff8086b68ca8 (&profile->lock){++++}-{3:3}, at: __blk_crypto_evict_key+0xc8/0x1c0
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&profile->lock);
+ lock(&profile->lock);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+Fixes: 1b2628397058 ("block: Keyslot Manager for Inline Encryption")
+Reported-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20230610061139.212085-1-ebiggers@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-crypto-profile.c | 12 ++++++++++--
+ include/linux/blk-crypto-profile.h | 1 +
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
+index 3290c03c9918d..aa7fc1436893c 100644
+--- a/block/blk-crypto-profile.c
++++ b/block/blk-crypto-profile.c
+@@ -79,7 +79,14 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int slot_hashtable_size;
+
+ memset(profile, 0, sizeof(*profile));
+- init_rwsem(&profile->lock);
++
++ /*
++ * profile->lock of an underlying device can nest inside profile->lock
++ * of a device-mapper device, so use a dynamic lock class to avoid
++ * false-positive lockdep reports.
++ */
++ lockdep_register_key(&profile->lockdep_key);
++ __init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key);
+
+ if (num_slots == 0)
+ return 0;
+@@ -89,7 +96,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
+ GFP_KERNEL);
+ if (!profile->slots)
+- return -ENOMEM;
++ goto err_destroy;
+
+ profile->num_slots = num_slots;
+
+@@ -441,6 +448,7 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
+ {
+ if (!profile)
+ return;
++ lockdep_unregister_key(&profile->lockdep_key);
+ kvfree(profile->slot_hashtable);
+ kvfree_sensitive(profile->slots,
+ sizeof(profile->slots[0]) * profile->num_slots);
+diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
+index e6802b69cdd64..90ab33cb5d0ef 100644
+--- a/include/linux/blk-crypto-profile.h
++++ b/include/linux/blk-crypto-profile.h
+@@ -111,6 +111,7 @@ struct blk_crypto_profile {
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
++ struct lock_class_key lockdep_key;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+--
+2.39.2
+
--- /dev/null
+From 7dd7b5690a5af8d39e4179235e3f8c29b2c8d39c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jul 2023 19:58:48 +0800
+Subject: bpf: cpumap: Fix memory leak in cpu_map_update_elem
+
+From: Pu Lehui <pulehui@huawei.com>
+
+[ Upstream commit 4369016497319a9635702da010d02af1ebb1849d ]
+
+Syzkaller reported a memory leak as follows:
+
+BUG: memory leak
+unreferenced object 0xff110001198ef748 (size 192):
+ comm "syz-executor.3", pid 17672, jiffies 4298118891 (age 9.906s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 4a 19 00 00 80 ad e3 e4 fe ff c0 00 ....J...........
+ 00 b2 d3 0c 01 00 11 ff 28 f5 8e 19 01 00 11 ff ........(.......
+ backtrace:
+ [<ffffffffadd28087>] __cpu_map_entry_alloc+0xf7/0xb00
+ [<ffffffffadd28d8e>] cpu_map_update_elem+0x2fe/0x3d0
+ [<ffffffffadc6d0fd>] bpf_map_update_value.isra.0+0x2bd/0x520
+ [<ffffffffadc7349b>] map_update_elem+0x4cb/0x720
+ [<ffffffffadc7d983>] __se_sys_bpf+0x8c3/0xb90
+ [<ffffffffb029cc80>] do_syscall_64+0x30/0x40
+ [<ffffffffb0400099>] entry_SYSCALL_64_after_hwframe+0x61/0xc6
+
+BUG: memory leak
+unreferenced object 0xff110001198ef528 (size 192):
+ comm "syz-executor.3", pid 17672, jiffies 4298118891 (age 9.906s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<ffffffffadd281f0>] __cpu_map_entry_alloc+0x260/0xb00
+ [<ffffffffadd28d8e>] cpu_map_update_elem+0x2fe/0x3d0
+ [<ffffffffadc6d0fd>] bpf_map_update_value.isra.0+0x2bd/0x520
+ [<ffffffffadc7349b>] map_update_elem+0x4cb/0x720
+ [<ffffffffadc7d983>] __se_sys_bpf+0x8c3/0xb90
+ [<ffffffffb029cc80>] do_syscall_64+0x30/0x40
+ [<ffffffffb0400099>] entry_SYSCALL_64_after_hwframe+0x61/0xc6
+
+BUG: memory leak
+unreferenced object 0xff1100010fd93d68 (size 8):
+ comm "syz-executor.3", pid 17672, jiffies 4298118891 (age 9.906s)
+ hex dump (first 8 bytes):
+ 00 00 00 00 00 00 00 00 ........
+ backtrace:
+ [<ffffffffade5db3e>] kvmalloc_node+0x11e/0x170
+ [<ffffffffadd28280>] __cpu_map_entry_alloc+0x2f0/0xb00
+ [<ffffffffadd28d8e>] cpu_map_update_elem+0x2fe/0x3d0
+ [<ffffffffadc6d0fd>] bpf_map_update_value.isra.0+0x2bd/0x520
+ [<ffffffffadc7349b>] map_update_elem+0x4cb/0x720
+ [<ffffffffadc7d983>] __se_sys_bpf+0x8c3/0xb90
+ [<ffffffffb029cc80>] do_syscall_64+0x30/0x40
+ [<ffffffffb0400099>] entry_SYSCALL_64_after_hwframe+0x61/0xc6
+
+In the cpu_map_update_elem flow, when kthread_stop is called before
+calling the threadfn of rcpu->kthread, since the KTHREAD_SHOULD_STOP bit
+of kthread has been set by kthread_stop, the threadfn of rcpu->kthread
+will never be executed, and rcpu->refcnt will never be 0, which will
+lead to the allocated rcpu, rcpu->queue and rcpu->queue->queue cannot be
+released.
+
+Calling kthread_stop before executing kthread's threadfn will return
+-EINTR. We can complete the release of memory resources in this state.
+
+Fixes: 6710e1126934 ("bpf: introduce new bpf cpu map type BPF_MAP_TYPE_CPUMAP")
+Signed-off-by: Pu Lehui <pulehui@huawei.com>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Acked-by: Hou Tao <houtao1@huawei.com>
+Link: https://lore.kernel.org/r/20230711115848.2701559-1-pulehui@huaweicloud.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/cpumap.c | 40 ++++++++++++++++++++++++----------------
+ 1 file changed, 24 insertions(+), 16 deletions(-)
+
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index b5ba34ddd4b64..09141351d5457 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -127,22 +127,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+ atomic_inc(&rcpu->refcnt);
+ }
+
+-/* called from workqueue, to workaround syscall using preempt_disable */
+-static void cpu_map_kthread_stop(struct work_struct *work)
+-{
+- struct bpf_cpu_map_entry *rcpu;
+-
+- rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
+-
+- /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
+- * as it waits until all in-flight call_rcu() callbacks complete.
+- */
+- rcu_barrier();
+-
+- /* kthread_stop will wake_up_process and wait for it to complete */
+- kthread_stop(rcpu->kthread);
+-}
+-
+ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
+ {
+ /* The tear-down procedure should have made sure that queue is
+@@ -170,6 +154,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+ }
+ }
+
++/* called from workqueue, to workaround syscall using preempt_disable */
++static void cpu_map_kthread_stop(struct work_struct *work)
++{
++ struct bpf_cpu_map_entry *rcpu;
++ int err;
++
++ rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
++
++ /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
++ * as it waits until all in-flight call_rcu() callbacks complete.
++ */
++ rcu_barrier();
++
++ /* kthread_stop will wake_up_process and wait for it to complete */
++ err = kthread_stop(rcpu->kthread);
++ if (err) {
++ /* kthread_stop may be called before cpu_map_kthread_run
++ * is executed, so we need to release the memory related
++ * to rcpu.
++ */
++ put_cpu_map_entry(rcpu);
++ }
++}
++
+ static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
+ struct list_head *listp,
+ struct xdp_cpumap_stats *stats)
+--
+2.39.2
+
--- /dev/null
+From 629021e337dc94d5ec14f7bf5446f42484a771b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 20:17:29 +0530
+Subject: bpf: Fix max stack depth check for async callbacks
+
+From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+
+[ Upstream commit 5415ccd50a8620c8cbaa32d6f18c946c453566f5 ]
+
+The check_max_stack_depth pass happens after the verifier's symbolic
+execution, and attempts to walk the call graph of the BPF program,
+ensuring that the stack usage stays within bounds for all possible call
+chains. There are two cases to consider: bpf_pseudo_func and
+bpf_pseudo_call. In the former case, the callback pointer is loaded into
+a register, and is assumed that it is passed to some helper later which
+calls it (however there is no way to be sure), but the check remains
+conservative and accounts the stack usage anyway. For this particular
+case, asynchronous callbacks are skipped as they execute asynchronously
+when their corresponding event fires.
+
+The case of bpf_pseudo_call is simpler and we know that the call is
+definitely made, hence the stack depth of the subprog is accounted for.
+
+However, the current check still skips an asynchronous callback even if
+a bpf_pseudo_call was made for it. This is erroneous, as it will miss
+accounting for the stack usage of the asynchronous callback, which can
+be used to breach the maximum stack depth limit.
+
+Fix this by only skipping asynchronous callbacks when the instruction is
+not a pseudo call to the subprog.
+
+Fixes: 7ddc80a476c2 ("bpf: Teach stack depth check about async callbacks.")
+Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Link: https://lore.kernel.org/r/20230705144730.235802-2-memxor@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 49c6b5e0855cd..8c3ededef3172 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4357,8 +4357,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
+ verbose(env, "verifier bug. subprog has tail_call and async cb\n");
+ return -EFAULT;
+ }
+- /* async callbacks don't increase bpf prog stack size */
+- continue;
++ /* async callbacks don't increase bpf prog stack size unless called directly */
++ if (!bpf_pseudo_call(insn + i))
++ continue;
+ }
+ i = next_insn;
+
+--
+2.39.2
+
--- /dev/null
+From f0b080c9bf0bbc1647af4ded16c4222153ea047c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jun 2023 06:58:13 -0700
+Subject: drm/bridge: ti-sn65dsi86: Fix auxiliary bus lifetime
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit 7aa83fbd712a6f08ffa67890061f26d140c2a84f ]
+
+Memory for the "struct device" for any given device isn't supposed to
+be released until the device's release() is called. This is important
+because someone might be holding a kobject reference to the "struct
+device" and might try to access one of its members even after any
+other cleanup/uninitialization has happened.
+
+Code analysis of ti-sn65dsi86 shows that this isn't quite right. When
+the code was written, it was believed that we could rely on the fact
+that the child devices would all be freed before the parent devices
+and thus we didn't need to worry about a release() function. While I
+still believe that the parent's "struct device" is guaranteed to
+outlive the child's "struct device" (because the child holds a kobject
+reference to the parent), the parent's "devm" allocated memory is a
+different story. That appears to be freed much earlier.
+
+Let's make this better for ti-sn65dsi86 by allocating each auxiliary
+with kzalloc and then free that memory in the release().
+
+Fixes: bf73537f411b ("drm/bridge: ti-sn65dsi86: Break GPIO and MIPI-to-eDP bridge into sub-drivers")
+Suggested-by: Stephen Boyd <swboyd@chromium.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230613065812.v2.1.I24b838a5b4151fb32bccd6f36397998ea2df9fbb@changeid
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi86.c | 35 +++++++++++++++++----------
+ 1 file changed, 22 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index d16775c973c4e..b89f7f7ca1885 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -170,10 +170,10 @@
+ * @pwm_refclk_freq: Cache for the reference clock input to the PWM.
+ */
+ struct ti_sn65dsi86 {
+- struct auxiliary_device bridge_aux;
+- struct auxiliary_device gpio_aux;
+- struct auxiliary_device aux_aux;
+- struct auxiliary_device pwm_aux;
++ struct auxiliary_device *bridge_aux;
++ struct auxiliary_device *gpio_aux;
++ struct auxiliary_device *aux_aux;
++ struct auxiliary_device *pwm_aux;
+
+ struct device *dev;
+ struct regmap *regmap;
+@@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
+ auxiliary_device_delete(data);
+ }
+
+-/*
+- * AUX bus docs say that a non-NULL release is mandatory, but it makes no
+- * sense for the model used here where all of the aux devices are allocated
+- * in the single shared structure. We'll use this noop as a workaround.
+- */
+-static void ti_sn65dsi86_noop(struct device *dev) {}
++static void ti_sn65dsi86_aux_device_release(struct device *dev)
++{
++ struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
++
++ kfree(aux);
++}
+
+ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+- struct auxiliary_device *aux,
++ struct auxiliary_device **aux_out,
+ const char *name)
+ {
+ struct device *dev = pdata->dev;
++ struct auxiliary_device *aux;
+ int ret;
+
++ aux = kzalloc(sizeof(*aux), GFP_KERNEL);
++ if (!aux)
++ return -ENOMEM;
++
+ aux->name = name;
+ aux->dev.parent = dev;
+- aux->dev.release = ti_sn65dsi86_noop;
++ aux->dev.release = ti_sn65dsi86_aux_device_release;
+ device_set_of_node_from_dev(&aux->dev, dev);
+ ret = auxiliary_device_init(aux);
+- if (ret)
++ if (ret) {
++ kfree(aux);
+ return ret;
++ }
+ ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
+ if (ret)
+ return ret;
+@@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
++ if (!ret)
++ *aux_out = aux;
+
+ return ret;
+ }
+--
+2.39.2
+
--- /dev/null
+From 8904663c93f143e08f994361105e4c026edf709c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jun 2023 17:10:17 +0300
+Subject: drm/i915: Don't preserve dpll_hw_state for slave crtc in Bigjoiner
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+
+[ Upstream commit 5c413188c68da0e4bffc93de1c80257e20741e69 ]
+
+If we are using Bigjoiner dpll_hw_state is supposed to be exactly
+same as for master crtc, so no need to save it's state for slave crtc.
+
+Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Fixes: 0ff0e219d9b8 ("drm/i915: Compute clocks earlier")
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230628141017.18937-1-stanislav.lisovskiy@intel.com
+(cherry picked from commit cbaf758809952c95ec00e796695049babb08bb60)
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_display.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 455d9ae6c41c9..da9b995b54c8f 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5133,7 +5133,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
+ saved_state->uapi = slave_crtc_state->uapi;
+ saved_state->scaler_state = slave_crtc_state->scaler_state;
+ saved_state->shared_dpll = slave_crtc_state->shared_dpll;
+- saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
+ saved_state->crc_enabled = slave_crtc_state->crc_enabled;
+
+ intel_crtc_free_hw_state(slave_crtc_state);
+--
+2.39.2
+
--- /dev/null
+From af77ab19f6ed0aaca5fe1aedcbf30b39aa643961 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jul 2023 13:55:03 +0100
+Subject: drm/i915: Fix one wrong caching mode enum usage
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+[ Upstream commit 113899c2669dff148b2a5bea4780123811aecc13 ]
+
+Commit a4d86249c773 ("drm/i915/gt: Provide a utility to create a scratch
+buffer") mistakenly passed in uapi I915_CACHING_CACHED as argument to
+i915_gem_object_set_cache_coherency(), which actually takes internal
+enum i915_cache_level.
+
+No functional issue since the value matches I915_CACHE_LLC (1 == 1), which
+is the intended caching mode, but lets clean it up nevertheless.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Fixes: a4d86249c773 ("drm/i915/gt: Provide a utility to create a scratch buffer")
+Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Reviewed-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230707125503.3965817-1-tvrtko.ursulin@linux.intel.com
+(cherry picked from commit 49c60b2f0867ac36fd54d513882a48431aeccae7)
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_gtt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
+index 2eaeba14319e9..f4879f437bfa3 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
+@@ -611,7 +611,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
++ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+--
+2.39.2
+
--- /dev/null
+From fbc486a8ab2fc5a60890acd817ecaf9dc6abb97e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 08:22:02 -0300
+Subject: drm/panel: simple: Add connector_type for innolux_at043tn24
+
+From: Fabio Estevam <festevam@denx.de>
+
+[ Upstream commit 2c56a751845ddfd3078ebe79981aaaa182629163 ]
+
+The innolux at043tn24 display is a parallel LCD. Pass the 'connector_type'
+information to avoid the following warning:
+
+panel-simple panel: Specify missing connector_type
+
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Fixes: 41bcceb4de9c ("drm/panel: simple: Add support for Innolux AT043TN24")
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230620112202.654981-1-festevam@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-simple.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 7ca00b0323362..e49d352339ff8 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2117,6 +2117,7 @@ static const struct panel_desc innolux_at043tn24 = {
+ .height = 54,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
++ .connector_type = DRM_MODE_CONNECTOR_DPI,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ };
+
+--
+2.39.2
+
--- /dev/null
+From 202fd1cdf183a7a28f2c6f0238e9f598b00352a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 22:16:02 +0200
+Subject: drm/panel: simple: Add Powertip PH800480T013 drm_display_mode flags
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 1c519980aced3da1fae37c1339cf43b24eccdee7 ]
+
+Add missing drm_display_mode DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC
+flags. Those are used by various bridges in the pipeline to correctly
+configure its sync signals polarity.
+
+Fixes: d69de69f2be1 ("drm/panel: simple: Add Powertip PH800480T013 panel")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230615201602.565948-1-marex@denx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-simple.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index e49d352339ff8..f851aaf2c5917 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -3110,6 +3110,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
+ .vsync_start = 480 + 49,
+ .vsync_end = 480 + 49 + 2,
+ .vtotal = 480 + 49 + 2 + 22,
++ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ };
+
+ static const struct panel_desc powertip_ph800480t013_idf02 = {
+--
+2.39.2
+
--- /dev/null
+From 4321b4202567033173fa81cb819ed58544ae9b38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 17:34:10 +0800
+Subject: erofs: avoid infinite loop in z_erofs_do_read_page() when reading
+ beyond EOF
+
+From: Chunhai Guo <guochunhai@vivo.com>
+
+[ Upstream commit 8191213a5835b0317c5e4d0d337ae1ae00c75253 ]
+
+z_erofs_do_read_page() may loop infinitely due to the inappropriate
+truncation in the below statement. Since the offset is 64 bits and min_t()
+truncates the result to 32 bits. The solution is to replace unsigned int
+with a 64-bit type, such as erofs_off_t.
+ cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+
+ - For example:
+ - offset = 0x400160000
+ - end = 0x370
+ - map->m_la = 0x160370
+ - offset + end - map->m_la = 0x400000000
+ - offset + end - map->m_la = 0x00000000 (truncated as unsigned int)
+ - Expected result:
+ - cur = 0
+ - Actual result:
+ - cur = 0x370
+
+Signed-off-by: Chunhai Guo <guochunhai@vivo.com>
+Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support")
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20230710093410.44071-1-guochunhai@vivo.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/zdata.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index bf6a369f9c696..533e612b6a486 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -866,7 +866,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
+ */
+ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+
+- cur = end - min_t(unsigned int, offset + end - map->m_la, end);
++ cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ zero_user_segment(page, cur, end);
+ goto next_part;
+--
+2.39.2
+
--- /dev/null
+From 7c3875a4dd9a05d9ab6ada7ab1fa2ea5e83926bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 12:25:31 +0800
+Subject: erofs: avoid useless loops in z_erofs_pcluster_readmore() when
+ reading beyond EOF
+
+From: Chunhai Guo <guochunhai@vivo.com>
+
+[ Upstream commit 936aa701d82d397c2d1afcd18ce2c739471d978d ]
+
+z_erofs_pcluster_readmore() may take a long time to loop when the page
+offset is large enough, which is unnecessary should be prevented.
+
+For example, when the following case is encountered, it will loop 4691368
+times, taking about 27 seconds:
+ - offset = 19217289215
+ - inode_size = 1442672
+
+Signed-off-by: Chunhai Guo <guochunhai@vivo.com>
+Fixes: 386292919c25 ("erofs: introduce readmore decompression strategy")
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Yue Hu <huyue2@coolpad.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20230710042531.28761-1-guochunhai@vivo.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/zdata.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 92b2e4ddb7ce9..bf6a369f9c696 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1660,7 +1660,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
+ }
+
+ cur = map->m_la + map->m_llen - 1;
+- while (cur >= end) {
++ while ((cur >= end) && (cur < i_size_read(inode))) {
+ pgoff_t index = cur >> PAGE_SHIFT;
+ struct page *page;
+
+--
+2.39.2
+
--- /dev/null
+From 105663b0f62d1b5cf58993ea946c5aab4f8b49c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jul 2023 14:21:30 +0800
+Subject: erofs: fix fsdax unavailability for chunk-based regular files
+
+From: Xin Yin <yinxin.x@bytedance.com>
+
+[ Upstream commit 18bddc5b67038722cb88fcf51fbf41a0277092cb ]
+
+DAX can be used to share page cache between VMs, reducing guest memory
+overhead. And chunk based data format is widely used for VM and
+container image. So enable dax support for it, make erofs better used
+for VM scenarios.
+
+Fixes: c5aa903a59db ("erofs: support reading chunk-based uncompressed files")
+Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20230711062130.7860-1-yinxin.x@bytedance.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/inode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index 5aadc73d57652..e090bcd46db14 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -186,7 +186,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
+
+ inode->i_flags &= ~S_DAX;
+ if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
+- vi->datalayout == EROFS_INODE_FLAT_PLAIN)
++ (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
++ vi->datalayout == EROFS_INODE_CHUNK_BASED))
+ inode->i_flags |= S_DAX;
+ if (!nblks)
+ /* measure inode.i_blocks as generic filesystems */
+--
+2.39.2
+
--- /dev/null
+From 44c1e1654c5421636043f5ca657491f8f9658bf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jul 2023 12:41:28 +0800
+Subject: gve: Set default duplex configuration to full
+
+From: Junfeng Guo <junfeng.guo@intel.com>
+
+[ Upstream commit 0503efeadbf6bb8bf24397613a73b67e665eac5f ]
+
+Current duplex mode was unset in the driver, resulting in the default
+parameter being set to 0, which corresponds to half duplex. It might
+mislead users to have incorrect expectation about the driver's
+transmission capabilities.
+Set the default duplex configuration to full, as the driver runs in
+full duplex mode at this point.
+
+Fixes: 7e074d5a76ca ("gve: Enable Link Speed Reporting in the driver.")
+Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Message-ID: <20230706044128.2726747-1-junfeng.guo@intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_ethtool.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index 38df602f2869c..033f17cb96be0 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -541,6 +541,9 @@ static int gve_get_link_ksettings(struct net_device *netdev,
+ err = gve_adminq_report_link_speed(priv);
+
+ cmd->base.speed = priv->link_speed;
++
++ cmd->base.duplex = DUPLEX_FULL;
++
+ return err;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 0197e963bf602a4769b8fd245d427b957dd2a121 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 17:40:23 -0700
+Subject: ice: Fix max_rate check while configuring TX rate limits
+
+From: Sridhar Samudrala <sridhar.samudrala@intel.com>
+
+[ Upstream commit 5f16da6ee6ac32e6c8098bc4cfcc4f170694f9da ]
+
+Remove incorrect check in ice_validate_mqprio_opt() that limits
+filter configuration when sum of max_rates of all TCs exceeds
+the link speed. The max rate of each TC is unrelated to value
+used by other TCs and is valid as long as it is less than link
+speed.
+
+Fixes: fbc7b27af0f9 ("ice: enable ndo_setup_tc support for mqprio_qdisc")
+Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
+Signed-off-by: Sudheer Mogilappagari <sudheer.mogilappagari@intel.com>
+Tested-by: Bharathi Sreenivas <bharathi.sreenivas@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7a5ec3ce3407a..8f77088900e94 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -7852,10 +7852,10 @@ static int
+ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+ {
+- u64 sum_max_rate = 0, sum_min_rate = 0;
+ int non_power_of_2_qcount = 0;
+ struct ice_pf *pf = vsi->back;
+ int max_rss_q_cnt = 0;
++ u64 sum_min_rate = 0;
+ struct device *dev;
+ int i, speed;
+ u8 num_tc;
+@@ -7871,6 +7871,7 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ dev = ice_pf_to_dev(pf);
+ vsi->ch_rss_size = 0;
+ num_tc = mqprio_qopt->qopt.num_tc;
++ speed = ice_get_link_speed_kbps(vsi);
+
+ for (i = 0; num_tc; i++) {
+ int qcount = mqprio_qopt->qopt.count[i];
+@@ -7911,7 +7912,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ */
+ max_rate = mqprio_qopt->max_rate[i];
+ max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
+- sum_max_rate += max_rate;
+
+ /* min_rate is minimum guaranteed rate and it can't be zero */
+ min_rate = mqprio_qopt->min_rate[i];
+@@ -7924,6 +7924,12 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ return -EINVAL;
+ }
+
++ if (max_rate && max_rate > speed) {
++ dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
++ i, max_rate, speed);
++ return -EINVAL;
++ }
++
+ iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
+@@ -7961,12 +7967,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+
+- speed = ice_get_link_speed_kbps(vsi);
+- if (sum_max_rate && sum_max_rate > (u64)speed) {
+- dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
+- sum_max_rate, speed);
+- return -EINVAL;
+- }
+ if (sum_min_rate && sum_min_rate > (u64)speed) {
+ dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
+ sum_min_rate, speed);
+--
+2.39.2
+
--- /dev/null
+From bc87d431a818780513bdb58a18be88e653cb3a49 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jul 2023 18:43:27 -0700
+Subject: icmp6: Fix null-ptr-deref of ip6_null_entry->rt6i_idev in
+ icmp6_dev().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 2aaa8a15de73874847d62eb595c6683bface80fd ]
+
+With some IPv6 Ext Hdr (RPL, SRv6, etc.), we can send a packet that
+has the link-local address as src and dst IP and will be forwarded to
+an external IP in the IPv6 Ext Hdr.
+
+For example, the script below generates a packet whose src IP is the
+link-local address and dst is updated to 11::.
+
+ # for f in $(find /proc/sys/net/ -name *seg6_enabled*); do echo 1 > $f; done
+ # python3
+ >>> from socket import *
+ >>> from scapy.all import *
+ >>>
+ >>> SRC_ADDR = DST_ADDR = "fe80::5054:ff:fe12:3456"
+ >>>
+ >>> pkt = IPv6(src=SRC_ADDR, dst=DST_ADDR)
+ >>> pkt /= IPv6ExtHdrSegmentRouting(type=4, addresses=["11::", "22::"], segleft=1)
+ >>>
+ >>> sk = socket(AF_INET6, SOCK_RAW, IPPROTO_RAW)
+ >>> sk.sendto(bytes(pkt), (DST_ADDR, 0))
+
+For such a packet, we call ip6_route_input() to look up a route for the
+next destination in these three functions depending on the header type.
+
+ * ipv6_rthdr_rcv()
+ * ipv6_rpl_srh_rcv()
+ * ipv6_srh_rcv()
+
+If no route is found, ip6_null_entry is set to skb, and the following
+dst_input(skb) calls ip6_pkt_drop().
+
+Finally, in icmp6_dev(), we dereference skb_rt6_info(skb)->rt6i_idev->dev
+as the input device is the loopback interface. Then, we have to check if
+skb_rt6_info(skb)->rt6i_idev is NULL or not to avoid NULL pointer deref
+for ip6_null_entry.
+
+BUG: kernel NULL pointer dereference, address: 0000000000000000
+ PF: supervisor read access in kernel mode
+ PF: error_code(0x0000) - not-present page
+PGD 0 P4D 0
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 0 PID: 157 Comm: python3 Not tainted 6.4.0-11996-gb121d614371c #35
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+RIP: 0010:icmp6_send (net/ipv6/icmp.c:436 net/ipv6/icmp.c:503)
+Code: fe ff ff 48 c7 40 30 c0 86 5d 83 e8 c6 44 1c 00 e9 c8 fc ff ff 49 8b 46 58 48 83 e0 fe 0f 84 4a fb ff ff 48 8b 80 d0 00 00 00 <48> 8b 00 44 8b 88 e0 00 00 00 e9 34 fb ff ff 4d 85 ed 0f 85 69 01
+RSP: 0018:ffffc90000003c70 EFLAGS: 00000286
+RAX: 0000000000000000 RBX: 0000000000000001 RCX: 00000000000000e0
+RDX: 0000000000000021 RSI: 0000000000000000 RDI: ffff888006d72a18
+RBP: ffffc90000003d80 R08: 0000000000000000 R09: 0000000000000001
+R10: ffffc90000003d98 R11: 0000000000000040 R12: ffff888006d72a10
+R13: 0000000000000000 R14: ffff8880057fb800 R15: ffffffff835d86c0
+FS: 00007f9dc72ee740(0000) GS:ffff88807dc00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000000 CR3: 00000000057b2000 CR4: 00000000007506f0
+PKRU: 55555554
+Call Trace:
+ <IRQ>
+ ip6_pkt_drop (net/ipv6/route.c:4513)
+ ipv6_rthdr_rcv (net/ipv6/exthdrs.c:640 net/ipv6/exthdrs.c:686)
+ ip6_protocol_deliver_rcu (net/ipv6/ip6_input.c:437 (discriminator 5))
+ ip6_input_finish (./include/linux/rcupdate.h:781 net/ipv6/ip6_input.c:483)
+ __netif_receive_skb_one_core (net/core/dev.c:5455)
+ process_backlog (./include/linux/rcupdate.h:781 net/core/dev.c:5895)
+ __napi_poll (net/core/dev.c:6460)
+ net_rx_action (net/core/dev.c:6529 net/core/dev.c:6660)
+ __do_softirq (./arch/x86/include/asm/jump_label.h:27 ./include/linux/jump_label.h:207 ./include/trace/events/irq.h:142 kernel/softirq.c:554)
+ do_softirq (kernel/softirq.c:454 kernel/softirq.c:441)
+ </IRQ>
+ <TASK>
+ __local_bh_enable_ip (kernel/softirq.c:381)
+ __dev_queue_xmit (net/core/dev.c:4231)
+ ip6_finish_output2 (./include/net/neighbour.h:544 net/ipv6/ip6_output.c:135)
+ rawv6_sendmsg (./include/net/dst.h:458 ./include/linux/netfilter.h:303 net/ipv6/raw.c:656 net/ipv6/raw.c:914)
+ sock_sendmsg (net/socket.c:725 net/socket.c:748)
+ __sys_sendto (net/socket.c:2134)
+ __x64_sys_sendto (net/socket.c:2146 net/socket.c:2142 net/socket.c:2142)
+ do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
+ entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
+RIP: 0033:0x7f9dc751baea
+Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 f3 0f 1e fa 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 7e c3 0f 1f 44 00 00 41 54 48 83 ec 30 44 89
+RSP: 002b:00007ffe98712c38 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+RAX: ffffffffffffffda RBX: 00007ffe98712cf8 RCX: 00007f9dc751baea
+RDX: 0000000000000060 RSI: 00007f9dc6460b90 RDI: 0000000000000003
+RBP: 00007f9dc56e8be0 R08: 00007ffe98712d70 R09: 000000000000001c
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: ffffffffc4653600 R14: 0000000000000001 R15: 00007f9dc6af5d1b
+ </TASK>
+Modules linked in:
+CR2: 0000000000000000
+ ---[ end trace 0000000000000000 ]---
+RIP: 0010:icmp6_send (net/ipv6/icmp.c:436 net/ipv6/icmp.c:503)
+Code: fe ff ff 48 c7 40 30 c0 86 5d 83 e8 c6 44 1c 00 e9 c8 fc ff ff 49 8b 46 58 48 83 e0 fe 0f 84 4a fb ff ff 48 8b 80 d0 00 00 00 <48> 8b 00 44 8b 88 e0 00 00 00 e9 34 fb ff ff 4d 85 ed 0f 85 69 01
+RSP: 0018:ffffc90000003c70 EFLAGS: 00000286
+RAX: 0000000000000000 RBX: 0000000000000001 RCX: 00000000000000e0
+RDX: 0000000000000021 RSI: 0000000000000000 RDI: ffff888006d72a18
+RBP: ffffc90000003d80 R08: 0000000000000000 R09: 0000000000000001
+R10: ffffc90000003d98 R11: 0000000000000040 R12: ffff888006d72a10
+R13: 0000000000000000 R14: ffff8880057fb800 R15: ffffffff835d86c0
+FS: 00007f9dc72ee740(0000) GS:ffff88807dc00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000000000 CR3: 00000000057b2000 CR4: 00000000007506f0
+PKRU: 55555554
+Kernel panic - not syncing: Fatal exception in interrupt
+Kernel Offset: disabled
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reported-by: Wang Yufen <wangyufen@huawei.com>
+Closes: https://lore.kernel.org/netdev/c41403a9-c2f6-3b7e-0c96-e1901e605cd0@huawei.com/
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/icmp.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 9d92d51c47577..e2af7ab992821 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -422,7 +422,10 @@ static struct net_device *icmp6_dev(const struct sk_buff *skb)
+ if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
+ const struct rt6_info *rt6 = skb_rt6_info(skb);
+
+- if (rt6)
++ /* The destination could be an external IP in Ext Hdr (SRv6, RPL, etc.),
++ * and ip6_null_entry could be set to skb if no route is found.
++ */
++ if (rt6 && rt6->rt6i_idev)
+ dev = rt6->rt6i_idev->dev;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From fa18e0267c199c88a21333e0bd3e8375ce87c3e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 16:07:14 +0200
+Subject: igc: Fix inserting of empty frame for launchtime
+
+From: Florian Kauer <florian.kauer@linutronix.de>
+
+[ Upstream commit 0bcc62858d6ba62cbade957d69745e6adeed5f3d ]
+
+The insertion of an empty frame was introduced with
+commit db0b124f02ba ("igc: Enhance Qbv scheduling by using first flag bit")
+in order to ensure that the current cycle has at least one packet if
+there is some packet to be scheduled for the next cycle.
+
+However, the current implementation does not properly check if
+a packet is already scheduled for the current cycle. Currently,
+an empty packet is always inserted if and only if
+txtime >= end_of_cycle && txtime > last_tx_cycle
+but since last_tx_cycle is always either the end of the current
+cycle (end_of_cycle) or the end of a previous cycle, the
+second part (txtime > last_tx_cycle) is always true unless
+txtime == last_tx_cycle.
+
+What actually needs to be checked here is if the last_tx_cycle
+was already written within the current cycle, so an empty frame
+should only be inserted if and only if
+txtime >= end_of_cycle && end_of_cycle > last_tx_cycle.
+
+This patch does not only avoid an unnecessary insertion, but it
+can actually be harmful to insert an empty packet if packets
+are already scheduled in the current cycle, because it can lead
+to a situation where the empty packet is actually processed
+as the first packet in the upcoming cycle shifting the packet
+with the first_flag even one cycle into the future, finally leading
+to a TX hang.
+
+The TX hang can be reproduced on a i225 with:
+
+ sudo tc qdisc replace dev enp1s0 parent root handle 100 taprio \
+ num_tc 1 \
+ map 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 \
+ queues 1@0 \
+ base-time 0 \
+ sched-entry S 01 300000 \
+ flags 0x1 \
+ txtime-delay 500000 \
+ clockid CLOCK_TAI
+ sudo tc qdisc replace dev enp1s0 parent 100:1 etf \
+ clockid CLOCK_TAI \
+ delta 500000 \
+ offload \
+ skip_sock_check
+
+and traffic generator
+
+ sudo trafgen -i traffic.cfg -o enp1s0 --cpp -n0 -q -t1400ns
+
+with traffic.cfg
+
+ #define ETH_P_IP 0x0800
+
+ {
+ /* Ethernet Header */
+ 0x30, 0x1f, 0x9a, 0xd0, 0xf0, 0x0e, # MAC Dest - adapt as needed
+ 0x24, 0x5e, 0xbe, 0x57, 0x2e, 0x36, # MAC Src - adapt as needed
+ const16(ETH_P_IP),
+
+ /* IPv4 Header */
+ 0b01000101, 0, # IPv4 version, IHL, TOS
+ const16(1028), # IPv4 total length (UDP length + 20 bytes (IP header))
+ const16(2), # IPv4 ident
+ 0b01000000, 0, # IPv4 flags, fragmentation off
+ 64, # IPv4 TTL
+ 17, # Protocol UDP
+ csumip(14, 33), # IPv4 checksum
+
+ /* UDP Header */
+ 10, 0, 48, 1, # IP Src - adapt as needed
+ 10, 0, 48, 10, # IP Dest - adapt as needed
+ const16(5555), # UDP Src Port
+ const16(6666), # UDP Dest Port
+ const16(1008), # UDP length (UDP header 8 bytes + payload length)
+ csumudp(14, 34), # UDP checksum
+
+ /* Payload */
+ fill('W', 1000),
+ }
+
+and the observed message with that is for example
+
+ igc 0000:01:00.0 enp1s0: Detected Tx Unit Hang
+ Tx Queue <0>
+ TDH <32>
+ TDT <3c>
+ next_to_use <3c>
+ next_to_clean <32>
+ buffer_info[next_to_clean]
+ time_stamp <ffff26a8>
+ next_to_watch <00000000632a1828>
+ jiffies <ffff27f8>
+ desc.status <1048000>
+
+Fixes: db0b124f02ba ("igc: Enhance Qbv scheduling by using first flag bit")
+Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
+Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 743d277896792..273941f90f066 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1027,7 +1027,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
+ *first_flag = true;
+ ring->last_ff_cycle = baset_est;
+
+- if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
++ if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
+ *insert_empty = true;
+ }
+ }
+--
+2.39.2
+
--- /dev/null
+From c78a2bd7b765701cc9a7a12d7da2e028c4e87bd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jun 2023 16:07:13 +0200
+Subject: igc: Fix launchtime before start of cycle
+
+From: Florian Kauer <florian.kauer@linutronix.de>
+
+[ Upstream commit c1bca9ac0bcb355be11354c2e68bc7bf31f5ac5a ]
+
+It is possible (verified on a running system) that frames are processed
+by igc_tx_launchtime with a txtime before the start of the cycle
+(baset_est).
+
+However, the result of txtime - baset_est is written into a u32,
+leading to a wrap around to a positive number. The following
+launchtime > 0 check will only branch to executing launchtime = 0
+if launchtime is already 0.
+
+Fix it by using a s32 before checking launchtime > 0.
+
+Fixes: db0b124f02ba ("igc: Enhance Qbv scheduling by using first flag bit")
+Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
+Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 93e90c353f1a8..743d277896792 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1014,7 +1014,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
+ ktime_t base_time = adapter->base_time;
+ ktime_t now = ktime_get_clocktai();
+ ktime_t baset_est, end_of_cycle;
+- u32 launchtime;
++ s32 launchtime;
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
+--
+2.39.2
+
--- /dev/null
+From b724b2e932ce9dbc5f882562c96c8e3186e26a77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 12:00:43 +0530
+Subject: igc: Handle PPS start time programming for past time values
+
+From: Aravindhan Gunasekaran <aravindhan.gunasekaran@intel.com>
+
+[ Upstream commit 84a192e46106355de1a314d709e657231d4b1026 ]
+
+I225/6 hardware can be programmed to start PPS output once
+the time in Target Time registers is reached. The time
+programmed in these registers should always be into future.
+Only then PPS output is triggered when SYSTIM register
+reaches the programmed value. There are two modes in i225/6
+hardware to program PPS, pulse and clock mode.
+
+There were issues reported where PPS is not generated when
+start time is in past.
+
+Example 1, "echo 0 0 0 2 0 > /sys/class/ptp/ptp0/period"
+
+In the current implementation, a value of '0' is programmed
+into Target time registers and PPS output is in pulse mode.
+Eventually an interrupt which is triggered upon SYSTIM
+register reaching Target time is not fired. Thus no PPS
+output is generated.
+
+Example 2, "echo 0 0 0 1 0 > /sys/class/ptp/ptp0/period"
+
+Above case, a value of '0' is programmed into Target time
+registers and PPS output is in clock mode. Here, HW tries to
+catch-up the current time by incrementing Target Time
+register. This catch-up time seem to vary according to
+programmed PPS period time as per the HW design. In my
+experiments, the delay ranged between few tens of seconds to
+few minutes. The PPS output is only generated after the
+Target time register reaches current time.
+
+In my experiments, I also observed PPS stopped working with
+below test and could not recover until module is removed and
+loaded again.
+
+1) echo 0 <future time> 0 1 0 > /sys/class/ptp/ptp1/period
+2) echo 0 0 0 1 0 > /sys/class/ptp/ptp1/period
+3) echo 0 0 0 1 0 > /sys/class/ptp/ptp1/period
+
+After this PPS did not work even if i re-program with proper
+values. I could only get this back working by reloading the
+driver.
+
+This patch takes care of calculating and programming
+appropriate future time value into Target Time registers.
+
+Fixes: 5e91c72e560c ("igc: Fix PPS delta between two synchronized end-points")
+Signed-off-by: Aravindhan Gunasekaran <aravindhan.gunasekaran@intel.com>
+Reviewed-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_ptp.c | 25 +++++++++++++++++++++---
+ 1 file changed, 22 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 4e10ced736dbb..d96cdccdc1e1e 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -356,16 +356,35 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ tsim &= ~IGC_TSICR_TT0;
+ }
+ if (on) {
++ struct timespec64 safe_start;
+ int i = rq->perout.index;
+
+ igc_pin_perout(igc, i, pin, use_freq);
+- igc->perout[i].start.tv_sec = rq->perout.start.sec;
++ igc_ptp_read(igc, &safe_start);
++
++ /* PPS output start time is triggered by Target time(TT)
++ * register. Programming any past time value into TT
++ * register will cause PPS to never start. Need to make
++ * sure we program the TT register a time ahead in
++ * future. There isn't a stringent need to fire PPS out
++ * right away. Adding +2 seconds should take care of
++ * corner cases. Let's say if the SYSTIML is close to
++ * wrap up and the timer keeps ticking as we program the
++ * register, adding +2seconds is safe bet.
++ */
++ safe_start.tv_sec += 2;
++
++ if (rq->perout.start.sec < safe_start.tv_sec)
++ igc->perout[i].start.tv_sec = safe_start.tv_sec;
++ else
++ igc->perout[i].start.tv_sec = rq->perout.start.sec;
+ igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
+ igc->perout[i].period.tv_sec = ts.tv_sec;
+ igc->perout[i].period.tv_nsec = ts.tv_nsec;
+- wr32(trgttimh, rq->perout.start.sec);
++ wr32(trgttimh, (u32)igc->perout[i].start.tv_sec);
+ /* For now, always select timer 0 as source. */
+- wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
++ wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec |
++ IGC_TT_IO_TIMER_SEL_SYSTIM0));
+ if (use_freq)
+ wr32(freqout, ns);
+ tsauxc |= tsauxc_mask;
+--
+2.39.2
+
--- /dev/null
+From 4b3e566f4663b75cd0ed25bdedce8dd3a45b9133 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 May 2023 08:18:12 +0800
+Subject: igc: Remove delay during TX ring configuration
+
+From: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+
+[ Upstream commit cca28ceac7c7857bc2d313777017585aef00bcc4 ]
+
+Remove unnecessary delay during the TX ring configuration.
+This will cause delay, especially during link down and
+link up activity.
+
+Furthermore, old SKUs like as I225 will call the reset_adapter
+to reset the controller during TSN mode Gate Control List (GCL)
+setting. This will add more time to the configuration of the
+real-time use case.
+
+It doesn't mentioned about this delay in the Software User Manual.
+It might have been ported from legacy code I210 in the past.
+
+Fixes: 13b5b7fd6a4a ("igc: Add support for Tx/Rx rings")
+Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Acked-by: Sasha Neftin <sasha.neftin@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index b67a6a81474f5..93e90c353f1a8 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -709,7 +709,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
+ /* disable the queue */
+ wr32(IGC_TXDCTL(reg_idx), 0);
+ wrfl();
+- mdelay(10);
+
+ wr32(IGC_TDLEN(reg_idx),
+ ring->count * sizeof(union igc_adv_tx_desc));
+--
+2.39.2
+
--- /dev/null
+From c828e9fb40babbf0b703cf242797dae92f5060fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 11:09:01 -0700
+Subject: igc: set TP bit in 'supported' and 'advertising' fields of
+ ethtool_link_ksettings
+
+From: Prasad Koya <prasad@arista.com>
+
+[ Upstream commit 9ac3fc2f42e5ffa1e927dcbffb71b15fa81459e2 ]
+
+set TP bit in the 'supported' and 'advertising' fields. i225/226 parts
+only support twisted pair copper.
+
+Fixes: 8c5ad0dae93c ("igc: Add ethtool support")
+Signed-off-by: Prasad Koya <prasad@arista.com>
+Acked-by: Sasha Neftin <sasha.neftin@intel.com>
+Tested-by: Naama Meir <naamax.meir@linux.intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_ethtool.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 8cc077b712add..511fc3f412087 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -1707,6 +1707,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
+ /* twisted pair */
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
++ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
++ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+ /* advertising link modes */
+ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+--
+2.39.2
+
--- /dev/null
+From 68bda054f4602f04bd6383c1263654613d5c4dca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jul 2023 11:20:06 -0700
+Subject: ionic: remove WARN_ON to prevent panic_on_warn
+
+From: Nitya Sunkad <nitya.sunkad@amd.com>
+
+[ Upstream commit abfb2a58a5377ebab717d4362d6180f901b6e5c1 ]
+
+Remove unnecessary early code development check and the WARN_ON
+that it uses. The irq alloc and free paths have long been
+cleaned up and this check shouldn't have stuck around so long.
+
+Fixes: 77ceb68e29cc ("ionic: Add notifyq support")
+Signed-off-by: Nitya Sunkad <nitya.sunkad@amd.com>
+Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 159bfcc76498c..a89ab455af67d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -462,11 +462,6 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
+ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
+ struct ionic_qcq *n_qcq)
+ {
+- if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
+- ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
+- n_qcq->flags &= ~IONIC_QCQ_F_INTR;
+- }
+-
+ n_qcq->intr.vector = src_qcq->intr.vector;
+ n_qcq->intr.index = src_qcq->intr.index;
+ n_qcq->napi_qcq = src_qcq->napi_qcq;
+--
+2.39.2
+
--- /dev/null
+From 06a02c88d3a087d91221deedbf3c69aea072004c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Jul 2023 14:59:10 +0800
+Subject: ipv6/addrconf: fix a potential refcount underflow for idev
+
+From: Ziyang Xuan <william.xuanziyang@huawei.com>
+
+[ Upstream commit 06a0716949c22e2aefb648526580671197151acc ]
+
+Now in addrconf_mod_rs_timer(), reference idev depends on whether
+rs_timer is not pending. Then modify rs_timer timeout.
+
+There is a time gap in [1], during which if the pending rs_timer
+becomes not pending. It will miss to hold idev, but the rs_timer
+is activated. Thus rs_timer callback function addrconf_rs_timer()
+will be executed and put idev later without holding idev. A refcount
+underflow issue for idev can be caused by this.
+
+ if (!timer_pending(&idev->rs_timer))
+ in6_dev_hold(idev);
+ <--------------[1]
+ mod_timer(&idev->rs_timer, jiffies + when);
+
+To fix the issue, hold idev if mod_timer() return 0.
+
+Fixes: b7b1bfce0bb6 ("ipv6: split duplicate address detection and router solicitation timer")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Ziyang Xuan <william.xuanziyang@huawei.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/addrconf.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index e6c7edcf68343..51bfc74805ecf 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -318,9 +318,8 @@ static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
+ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
+ unsigned long when)
+ {
+- if (!timer_pending(&idev->rs_timer))
++ if (!mod_timer(&idev->rs_timer, jiffies + when))
+ in6_dev_hold(idev);
+- mod_timer(&idev->rs_timer, jiffies + when);
+ }
+
+ static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
+--
+2.39.2
+
--- /dev/null
+From 5d488b6f7e845cc9ddf05f0dc69e34bdb3528588 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jul 2023 07:28:53 +0300
+Subject: kernel/trace: Fix cleanup logic of enable_trace_eprobe
+
+From: Tzvetomir Stoyanov (VMware) <tz.stoyanov@gmail.com>
+
+[ Upstream commit cf0a624dc706c306294c14e6b3e7694702f25191 ]
+
+The enable_trace_eprobe() function enables all event probes, attached
+to given trace probe. If an error occurs in enabling one of the event
+probes, all others should be roll backed. There is a bug in that roll
+back logic - instead of all event probes, only the failed one is
+disabled.
+
+Link: https://lore.kernel.org/all/20230703042853.1427493-1-tz.stoyanov@gmail.com/
+
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events")
+Signed-off-by: Tzvetomir Stoyanov (VMware) <tz.stoyanov@gmail.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_eprobe.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
+index 753fc536525d3..d2370cdb4c1d6 100644
+--- a/kernel/trace/trace_eprobe.c
++++ b/kernel/trace/trace_eprobe.c
+@@ -743,6 +743,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
+ struct trace_eprobe *ep;
+ bool enabled;
+ int ret = 0;
++ int cnt = 0;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+@@ -766,12 +767,25 @@ static int enable_trace_eprobe(struct trace_event_call *call,
+ if (ret)
+ break;
+ enabled = true;
++ cnt++;
+ }
+
+ if (ret) {
+ /* Failed to enable one of them. Roll back all */
+- if (enabled)
+- disable_eprobe(ep, file->tr);
++ if (enabled) {
++ /*
++ * It's a bug if one failed for something other than memory
++ * not being available but another eprobe succeeded.
++ */
++ WARN_ON_ONCE(ret != -ENOMEM);
++
++ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
++ ep = container_of(pos, struct trace_eprobe, tp);
++ disable_eprobe(ep, file->tr);
++ if (!--cnt)
++ break;
++ }
++ }
+ if (file)
+ trace_probe_remove_file(tp, file);
+ else
+--
+2.39.2
+
--- /dev/null
+From da8ab60d6a75bf12b663bdf6ddea7b72715d126b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jul 2023 08:53:25 +0200
+Subject: net: bgmac: postpone turning IRQs off to avoid SoC hangs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafał Miłecki <rafal@milecki.pl>
+
+[ Upstream commit e7731194fdf085f46d58b1adccfddbd0dfee4873 ]
+
+Turning IRQs off is done by accessing Ethernet controller registers.
+That can't be done until device's clock is enabled. It results in a SoC
+hang otherwise.
+
+This bug remained unnoticed for years as most bootloaders keep all
+Ethernet interfaces turned on. It seems to only affect a niche SoC
+family BCM47189. It has two Ethernet controllers but CFE bootloader uses
+only the first one.
+
+Fixes: 34322615cbaa ("net: bgmac: Mask interrupts during probe")
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bgmac.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 1761df8fb7f96..10c7c232cc4ec 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1492,8 +1492,6 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+
+ bgmac->in_init = true;
+
+- bgmac_chip_intrs_off(bgmac);
+-
+ net_dev->irq = bgmac->irq;
+ SET_NETDEV_DEV(net_dev, bgmac->dev);
+ dev_set_drvdata(bgmac->dev, bgmac);
+@@ -1511,6 +1509,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ */
+ bgmac_clk_enable(bgmac, 0);
+
++ bgmac_chip_intrs_off(bgmac);
++
+ /* This seems to be fixing IRQ by assigning OOB #6 to the core */
+ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+--
+2.39.2
+
--- /dev/null
+From 0b62a31f05b0a2b6cac70f3ba28bc534cc13aef7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 09:39:07 +0800
+Subject: net: dsa: qca8k: Add check for skb_copy
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 87355b7c3da9bfd81935caba0ab763355147f7b0 ]
+
+Add check for the return value of skb_copy in order to avoid NULL pointer
+dereference.
+
+Fixes: 2cd548566384 ("net: dsa: qca8k: add support for phy read/write with mgmt Ethernet")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/qca/qca8k-8xxx.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index 7a6166a0c9bcc..b3f7988668996 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -469,6 +469,9 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ bool ack;
+ int ret;
+
++ if (!skb)
++ return -ENOMEM;
++
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the copy pkt */
+--
+2.39.2
+
--- /dev/null
+From dd99e55563cb3069d81b733806d4d7616408a27d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 09:32:10 +0200
+Subject: net/mlx5e: Check for NOT_READY flag state after locking
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit 65e64640e97c0f223e77f9ea69b5a46186b93470 ]
+
+Currently the check for NOT_READY flag is performed before obtaining the
+necessary lock. This opens a possibility for race condition when the flow
+is concurrently removed from unready_flows list by the workqueue task,
+which causes a double-removal from the list and a crash[0]. Fix the issue
+by moving the flag check inside the section protected by
+uplink_priv->unready_flows_lock mutex.
+
+[0]:
+[44376.389654] general protection fault, probably for non-canonical address 0xdead000000000108: 0000 [#1] SMP
+[44376.391665] CPU: 7 PID: 59123 Comm: tc Not tainted 6.4.0-rc4+ #1
+[44376.392984] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+[44376.395342] RIP: 0010:mlx5e_tc_del_fdb_flow+0xb3/0x340 [mlx5_core]
+[44376.396857] Code: 00 48 8b b8 68 ce 02 00 e8 8a 4d 02 00 4c 8d a8 a8 01 00 00 4c 89 ef e8 8b 79 88 e1 48 8b 83 98 06 00 00 48 8b 93 90 06 00 00 <48> 89 42 08 48 89 10 48 b8 00 01 00 00 00 00 ad de 48 89 83 90 06
+[44376.399167] RSP: 0018:ffff88812cc97570 EFLAGS: 00010246
+[44376.399680] RAX: dead000000000122 RBX: ffff8881088e3800 RCX: ffff8881881bac00
+[44376.400337] RDX: dead000000000100 RSI: ffff88812cc97500 RDI: ffff8881242f71b0
+[44376.401001] RBP: ffff88811cbb0940 R08: 0000000000000400 R09: 0000000000000001
+[44376.401663] R10: 0000000000000001 R11: 0000000000000000 R12: ffff88812c944000
+[44376.402342] R13: ffff8881242f71a8 R14: ffff8881222b4000 R15: 0000000000000000
+[44376.402999] FS: 00007f0451104800(0000) GS:ffff88852cb80000(0000) knlGS:0000000000000000
+[44376.403787] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[44376.404343] CR2: 0000000000489108 CR3: 0000000123a79003 CR4: 0000000000370ea0
+[44376.405004] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[44376.405665] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[44376.406339] Call Trace:
+[44376.406651] <TASK>
+[44376.406939] ? die_addr+0x33/0x90
+[44376.407311] ? exc_general_protection+0x192/0x390
+[44376.407795] ? asm_exc_general_protection+0x22/0x30
+[44376.408292] ? mlx5e_tc_del_fdb_flow+0xb3/0x340 [mlx5_core]
+[44376.408876] __mlx5e_tc_del_fdb_peer_flow+0xbc/0xe0 [mlx5_core]
+[44376.409482] mlx5e_tc_del_flow+0x42/0x210 [mlx5_core]
+[44376.410055] mlx5e_flow_put+0x25/0x50 [mlx5_core]
+[44376.410529] mlx5e_delete_flower+0x24b/0x350 [mlx5_core]
+[44376.411043] tc_setup_cb_reoffload+0x22/0x80
+[44376.411462] fl_reoffload+0x261/0x2f0 [cls_flower]
+[44376.411907] ? mlx5e_rep_indr_setup_ft_cb+0x160/0x160 [mlx5_core]
+[44376.412481] ? mlx5e_rep_indr_setup_ft_cb+0x160/0x160 [mlx5_core]
+[44376.413044] tcf_block_playback_offloads+0x76/0x170
+[44376.413497] tcf_block_unbind+0x7b/0xd0
+[44376.413881] tcf_block_setup+0x17d/0x1c0
+[44376.414269] tcf_block_offload_cmd.isra.0+0xf1/0x130
+[44376.414725] tcf_block_offload_unbind+0x43/0x70
+[44376.415153] __tcf_block_put+0x82/0x150
+[44376.415532] ingress_destroy+0x22/0x30 [sch_ingress]
+[44376.415986] qdisc_destroy+0x3b/0xd0
+[44376.416343] qdisc_graft+0x4d0/0x620
+[44376.416706] tc_get_qdisc+0x1c9/0x3b0
+[44376.417074] rtnetlink_rcv_msg+0x29c/0x390
+[44376.419978] ? rep_movs_alternative+0x3a/0xa0
+[44376.420399] ? rtnl_calcit.isra.0+0x120/0x120
+[44376.420813] netlink_rcv_skb+0x54/0x100
+[44376.421192] netlink_unicast+0x1f6/0x2c0
+[44376.421573] netlink_sendmsg+0x232/0x4a0
+[44376.421980] sock_sendmsg+0x38/0x60
+[44376.422328] ____sys_sendmsg+0x1d0/0x1e0
+[44376.422709] ? copy_msghdr_from_user+0x6d/0xa0
+[44376.423127] ___sys_sendmsg+0x80/0xc0
+[44376.423495] ? ___sys_recvmsg+0x8b/0xc0
+[44376.423869] __sys_sendmsg+0x51/0x90
+[44376.424226] do_syscall_64+0x3d/0x90
+[44376.424587] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+[44376.425046] RIP: 0033:0x7f045134f887
+[44376.425403] Code: 0a 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+[44376.426914] RSP: 002b:00007ffd63a82b98 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+[44376.427592] RAX: ffffffffffffffda RBX: 000000006481955f RCX: 00007f045134f887
+[44376.428195] RDX: 0000000000000000 RSI: 00007ffd63a82c00 RDI: 0000000000000003
+[44376.428796] RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000
+[44376.429404] R10: 00007f0451208708 R11: 0000000000000246 R12: 0000000000000001
+[44376.430039] R13: 0000000000409980 R14: 000000000047e538 R15: 0000000000485400
+[44376.430644] </TASK>
+[44376.430907] Modules linked in: mlx5_ib mlx5_core act_mirred act_tunnel_key cls_flower vxlan dummy sch_ingress openvswitch nsh rpcrdma rdma_ucm ib_iser libiscsi scsi_transport_iscsi ib_umad rdma_cm ib_ipoib iw_cm ib_cm ib_uverbs ib_core xt_conntrack xt_MASQUERADE nf_conntrack_netlink nfnetlink xt_addrtype iptable_nat nf_nat br_netfilter rpcsec_g
+ss_krb5 auth_rpcgss oid_registry overlay zram zsmalloc fuse [last unloaded: mlx5_core]
+[44376.433936] ---[ end trace 0000000000000000 ]---
+[44376.434373] RIP: 0010:mlx5e_tc_del_fdb_flow+0xb3/0x340 [mlx5_core]
+[44376.434951] Code: 00 48 8b b8 68 ce 02 00 e8 8a 4d 02 00 4c 8d a8 a8 01 00 00 4c 89 ef e8 8b 79 88 e1 48 8b 83 98 06 00 00 48 8b 93 90 06 00 00 <48> 89 42 08 48 89 10 48 b8 00 01 00 00 00 00 ad de 48 89 83 90 06
+[44376.436452] RSP: 0018:ffff88812cc97570 EFLAGS: 00010246
+[44376.436924] RAX: dead000000000122 RBX: ffff8881088e3800 RCX: ffff8881881bac00
+[44376.437530] RDX: dead000000000100 RSI: ffff88812cc97500 RDI: ffff8881242f71b0
+[44376.438179] RBP: ffff88811cbb0940 R08: 0000000000000400 R09: 0000000000000001
+[44376.438786] R10: 0000000000000001 R11: 0000000000000000 R12: ffff88812c944000
+[44376.439393] R13: ffff8881242f71a8 R14: ffff8881222b4000 R15: 0000000000000000
+[44376.439998] FS: 00007f0451104800(0000) GS:ffff88852cb80000(0000) knlGS:0000000000000000
+[44376.440714] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[44376.441225] CR2: 0000000000489108 CR3: 0000000123a79003 CR4: 0000000000370ea0
+[44376.441843] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[44376.442471] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+
+Fixes: ad86755b18d5 ("net/mlx5e: Protect unready flows with dedicated lock")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Reviewed-by: Roi Dayan <roid@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 7883b625634fb..7ab489520a873 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1551,7 +1551,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_lock(&uplink_priv->unready_flows_lock);
+- unready_flow_del(flow);
++ if (flow_flag_test(flow, NOT_READY))
++ unready_flow_del(flow);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
+ }
+
+@@ -1896,8 +1897,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+ esw_attr = attr->esw_attr;
+ mlx5e_put_flow_tunnel_id(flow);
+
+- if (flow_flag_test(flow, NOT_READY))
+- remove_unready_flow(flow);
++ remove_unready_flow(flow);
+
+ if (mlx5e_is_offloaded_flow(flow)) {
+ if (flow_flag_test(flow, SLOW))
+--
+2.39.2
+
--- /dev/null
+From 1a52206069716e094edbfdac1625ab8dae1f2a95 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jun 2023 08:59:34 +0800
+Subject: net/mlx5e: fix double free in mlx5e_destroy_flow_table
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 884abe45a9014d0de2e6edb0630dfd64f23f1d1b ]
+
+In function accel_fs_tcp_create_groups(), when the ft->g memory is
+successfully allocated but the 'in' memory fails to be allocated, the
+memory pointed to by ft->g is released once. And in function
+accel_fs_tcp_create_table, mlx5e_destroy_flow_table is called to release
+the memory pointed to by ft->g again. This will cause double free problem.
+
+Fixes: c062d52ac24c ("net/mlx5e: Receive flow steering framework for accelerated TCP flows")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+index d7c020f724013..06c47404996bb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+@@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
++ ft->g = NULL;
+ kvfree(in);
+ return -ENOMEM;
+ }
+--
+2.39.2
+
--- /dev/null
+From 8cc6b4a6532b8b77d5c94084ce45a67a4c323683 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jun 2023 09:49:02 +0800
+Subject: net/mlx5e: fix memory leak in mlx5e_fs_tt_redirect_any_create
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 3250affdc658557a41df9c5fb567723e421f8bf2 ]
+
+The memory pointed to by the fs->any pointer is not freed in the error
+path of mlx5e_fs_tt_redirect_any_create, which can lead to a memory leak.
+Fix by freeing the memory in the error path, thereby making the error path
+identical to mlx5e_fs_tt_redirect_any_destroy().
+
+Fixes: 0f575c20bf06 ("net/mlx5e: Introduce Flow Steering ANY API")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index 03cb79adf912f..be83ad9db82a4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
+
+ err = fs_any_create_table(fs);
+ if (err)
+- return err;
++ goto err_free_any;
+
+ err = fs_any_enable(fs);
+ if (err)
+@@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
+
+ err_destroy_table:
+ fs_any_destroy_table(fs_any);
+-
+- kfree(fs_any);
++err_free_any:
+ mlx5e_fs_set_any(fs, NULL);
++ kfree(fs_any);
+ return err;
+ }
+--
+2.39.2
+
--- /dev/null
+From f41a1e782a6b2f77ad0f34e3be65080bfb49280c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jun 2023 09:49:03 +0800
+Subject: net/mlx5e: fix memory leak in mlx5e_ptp_open
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit d543b649ffe58a0cb4b6948b3305069c5980a1fa ]
+
+When kvzalloc_node or kvzalloc failed in mlx5e_ptp_open, the memory
+pointed by "c" or "cparams" is not freed, which can lead to a memory
+leak. Fix by freeing the array in the error path.
+
+Fixes: 145e5637d941 ("net/mlx5e: Add TX PTP port object support")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Reviewed-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index efd02ce4425de..72b4781f0eb2f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
+ cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
+- if (!c || !cparams)
+- return -ENOMEM;
++ if (!c || !cparams) {
++ err = -ENOMEM;
++ goto err_free;
++ }
+
+ c->priv = priv;
+ c->mdev = priv->mdev;
+--
+2.39.2
+
--- /dev/null
+From 3a44359fa0de0180914a76734b0335790d75fc61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 07:37:12 +0200
+Subject: net: mvneta: fix txq_map in case of txq_number==1
+
+From: Klaus Kudielka <klaus.kudielka@gmail.com>
+
+[ Upstream commit 21327f81db6337c8843ce755b01523c7d3df715b ]
+
+If we boot with mvneta.txq_number=1, the txq_map is set incorrectly:
+MVNETA_CPU_TXQ_ACCESS(1) refers to TX queue 1, but only TX queue 0 is
+initialized. Fix this.
+
+Fixes: 50bf8cb6fc9c ("net: mvneta: Configure XPS support")
+Signed-off-by: Klaus Kudielka <klaus.kudielka@gmail.com>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Link: https://lore.kernel.org/r/20230705053712.3914-1-klaus.kudielka@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 5aefaaff08711..aca5b72cfeec6 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1505,7 +1505,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
+ */
+ if (txq_number == 1)
+ txq_map = (cpu == pp->rxq_def) ?
+- MVNETA_CPU_TXQ_ACCESS(1) : 0;
++ MVNETA_CPU_TXQ_ACCESS(0) : 0;
+
+ } else {
+ txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+@@ -4294,7 +4294,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
+ */
+ if (txq_number == 1)
+ txq_map = (cpu == elected_cpu) ?
+- MVNETA_CPU_TXQ_ACCESS(1) : 0;
++ MVNETA_CPU_TXQ_ACCESS(0) : 0;
+ else
+ txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
+ MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+--
+2.39.2
+
--- /dev/null
+From a12226bbf0255142f4f700edb13391af63e6fdef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jul 2023 10:11:10 +0200
+Subject: net: prevent skb corruption on frag list segmentation
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit c329b261afe71197d9da83c1f18eb45a7e97e089 ]
+
+Ian reported several skb corruptions triggered by rx-gro-list,
+collecting different oops alike:
+
+[ 62.624003] BUG: kernel NULL pointer dereference, address: 00000000000000c0
+[ 62.631083] #PF: supervisor read access in kernel mode
+[ 62.636312] #PF: error_code(0x0000) - not-present page
+[ 62.641541] PGD 0 P4D 0
+[ 62.644174] Oops: 0000 [#1] PREEMPT SMP NOPTI
+[ 62.648629] CPU: 1 PID: 913 Comm: napi/eno2-79 Not tainted 6.4.0 #364
+[ 62.655162] Hardware name: Supermicro Super Server/A2SDi-12C-HLN4F, BIOS 1.7a 10/13/2022
+[ 62.663344] RIP: 0010:__udp_gso_segment (./include/linux/skbuff.h:2858
+./include/linux/udp.h:23 net/ipv4/udp_offload.c:228 net/ipv4/udp_offload.c:261
+net/ipv4/udp_offload.c:277)
+[ 62.687193] RSP: 0018:ffffbd3a83b4f868 EFLAGS: 00010246
+[ 62.692515] RAX: 00000000000000ce RBX: 0000000000000000 RCX: 0000000000000000
+[ 62.699743] RDX: ffffa124def8a000 RSI: 0000000000000079 RDI: ffffa125952a14d4
+[ 62.706970] RBP: ffffa124def8a000 R08: 0000000000000022 R09: 00002000001558c9
+[ 62.714199] R10: 0000000000000000 R11: 00000000be554639 R12: 00000000000000e2
+[ 62.721426] R13: ffffa125952a1400 R14: ffffa125952a1400 R15: 00002000001558c9
+[ 62.728654] FS: 0000000000000000(0000) GS:ffffa127efa40000(0000)
+knlGS:0000000000000000
+[ 62.736852] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 62.742702] CR2: 00000000000000c0 CR3: 00000001034b0000 CR4: 00000000003526e0
+[ 62.749948] Call Trace:
+[ 62.752498] <TASK>
+[ 62.779267] inet_gso_segment (net/ipv4/af_inet.c:1398)
+[ 62.787605] skb_mac_gso_segment (net/core/gro.c:141)
+[ 62.791906] __skb_gso_segment (net/core/dev.c:3403 (discriminator 2))
+[ 62.800492] validate_xmit_skb (./include/linux/netdevice.h:4862
+net/core/dev.c:3659)
+[ 62.804695] validate_xmit_skb_list (net/core/dev.c:3710)
+[ 62.809158] sch_direct_xmit (net/sched/sch_generic.c:330)
+[ 62.813198] __dev_queue_xmit (net/core/dev.c:3805 net/core/dev.c:4210)
+net/netfilter/core.c:626)
+[ 62.821093] br_dev_queue_push_xmit (net/bridge/br_forward.c:55)
+[ 62.825652] maybe_deliver (net/bridge/br_forward.c:193)
+[ 62.829420] br_flood (net/bridge/br_forward.c:233)
+[ 62.832758] br_handle_frame_finish (net/bridge/br_input.c:215)
+[ 62.837403] br_handle_frame (net/bridge/br_input.c:298
+net/bridge/br_input.c:416)
+[ 62.851417] __netif_receive_skb_core.constprop.0 (net/core/dev.c:5387)
+[ 62.866114] __netif_receive_skb_list_core (net/core/dev.c:5570)
+[ 62.871367] netif_receive_skb_list_internal (net/core/dev.c:5638
+net/core/dev.c:5727)
+[ 62.876795] napi_complete_done (./include/linux/list.h:37
+./include/net/gro.h:434 ./include/net/gro.h:429 net/core/dev.c:6067)
+[ 62.881004] ixgbe_poll (drivers/net/ethernet/intel/ixgbe/ixgbe_main.c:3191)
+[ 62.893534] __napi_poll (net/core/dev.c:6498)
+[ 62.897133] napi_threaded_poll (./include/linux/netpoll.h:89
+net/core/dev.c:6640)
+[ 62.905276] kthread (kernel/kthread.c:379)
+[ 62.913435] ret_from_fork (arch/x86/entry/entry_64.S:314)
+[ 62.917119] </TASK>
+
+In the critical scenario, rx-gro-list GRO-ed packets are fed, via a
+bridge, both to the local input path and to an egress device (tun).
+
+The segmentation of such packets unsafely writes to the cloned skbs
+with shared heads.
+
+This change addresses the issue by uncloning as needed the
+to-be-segmented skbs.
+
+Reported-by: Ian Kumlien <ian.kumlien@gmail.com>
+Tested-by: Ian Kumlien <ian.kumlien@gmail.com>
+Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skbuff.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index ef9772b12624c..b6c16db86c719 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4042,6 +4042,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+
+ skb_push(skb, -skb_network_offset(skb) + offset);
+
++ /* Ensure the head is writeable before touching the shared info */
++ err = skb_unclone(skb, GFP_ATOMIC);
++ if (err)
++ goto err_linearize;
++
+ skb_shinfo(skb)->frag_list = NULL;
+
+ while (list_skb) {
+--
+2.39.2
+
--- /dev/null
+From 95caf069a15de66a6cb13f7400f78f950b1876bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 12:15:30 -0400
+Subject: net/sched: cls_fw: Fix improper refcount update leads to
+ use-after-free
+
+From: M A Ramdhan <ramdhan@starlabs.sg>
+
+[ Upstream commit 0323bce598eea038714f941ce2b22541c46d488f ]
+
+In the event of a failure in tcf_change_indev(), fw_set_parms() will
+immediately return an error after incrementing or decrementing
+reference counter in tcf_bind_filter(). If attacker can control
+reference counter to zero and make reference freed, leading to
+use after free.
+
+In order to prevent this, move the point of possible failure above the
+point where the TC_FW_CLASSID is handled.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: M A Ramdhan <ramdhan@starlabs.sg>
+Signed-off-by: M A Ramdhan <ramdhan@starlabs.sg>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Message-ID: <20230705161530.52003-1-ramdhan@starlabs.sg>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_fw.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
+index a32351da968cd..1212b057b129c 100644
+--- a/net/sched/cls_fw.c
++++ b/net/sched/cls_fw.c
+@@ -210,11 +210,6 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
+ if (err < 0)
+ return err;
+
+- if (tb[TCA_FW_CLASSID]) {
+- f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
+- tcf_bind_filter(tp, &f->res, base);
+- }
+-
+ if (tb[TCA_FW_INDEV]) {
+ int ret;
+ ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
+@@ -231,6 +226,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
+ } else if (head->mask != 0xFFFFFFFF)
+ return err;
+
++ if (tb[TCA_FW_CLASSID]) {
++ f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
++ tcf_bind_filter(tp, &f->res, base);
++ }
++
+ return 0;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 67552a91b905352bf7d5e8c7b0780980d459ab88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jul 2023 10:08:09 +0300
+Subject: net/sched: flower: Ensure both minimum and maximum ports are
+ specified
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit d3f87278bcb80bd7f9519669d928b43320363d4f ]
+
+The kernel does not currently validate that both the minimum and maximum
+ports of a port range are specified. This can lead user space to think
+that a filter matching on a port range was successfully added, when in
+fact it was not. For example, with a patched (buggy) iproute2 that only
+sends the minimum port, the following commands do not return an error:
+
+ # tc filter add dev swp1 ingress pref 1 proto ip flower ip_proto udp src_port 100-200 action pass
+
+ # tc filter add dev swp1 ingress pref 1 proto ip flower ip_proto udp dst_port 100-200 action pass
+
+ # tc filter show dev swp1 ingress
+ filter protocol ip pref 1 flower chain 0
+ filter protocol ip pref 1 flower chain 0 handle 0x1
+ eth_type ipv4
+ ip_proto udp
+ not_in_hw
+ action order 1: gact action pass
+ random type none pass val 0
+ index 1 ref 1 bind 1
+
+ filter protocol ip pref 1 flower chain 0 handle 0x2
+ eth_type ipv4
+ ip_proto udp
+ not_in_hw
+ action order 1: gact action pass
+ random type none pass val 0
+ index 2 ref 1 bind 1
+
+Fix by returning an error unless both ports are specified:
+
+ # tc filter add dev swp1 ingress pref 1 proto ip flower ip_proto udp src_port 100-200 action pass
+ Error: Both min and max source ports must be specified.
+ We have an error talking to the kernel
+
+ # tc filter add dev swp1 ingress pref 1 proto ip flower ip_proto udp dst_port 100-200 action pass
+ Error: Both min and max destination ports must be specified.
+ We have an error talking to the kernel
+
+Fixes: 5c72299fba9d ("net: sched: cls_flower: Classify packets using port ranges")
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_flower.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 3de72e7c1075a..10e6ec0f94981 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -793,6 +793,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
+ TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
+
++ if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
++ NL_SET_ERR_MSG(extack,
++ "Both min and max destination ports must be specified");
++ return -EINVAL;
++ }
++ if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
++ NL_SET_ERR_MSG(extack,
++ "Both min and max source ports must be specified");
++ return -EINVAL;
++ }
+ if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
+ ntohs(key->tp_range.tp_max.dst) <=
+ ntohs(key->tp_range.tp_min.dst)) {
+--
+2.39.2
+
--- /dev/null
+From 41607ef176aa6977f473d5dbbe08d97e2981ffb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 23:16:34 -0300
+Subject: net/sched: make psched_mtu() RTNL-less safe
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+[ Upstream commit 150e33e62c1fa4af5aaab02776b6c3812711d478 ]
+
+Eric Dumazet says[1]:
+-------
+Speaking of psched_mtu(), I see that net/sched/sch_pie.c is using it
+without holding RTNL, so dev->mtu can be changed underneath.
+KCSAN could issue a warning.
+-------
+
+Annotate dev->mtu with READ_ONCE() so KCSAN don't issue a warning.
+
+[1] https://lore.kernel.org/all/CANn89iJoJO5VtaJ-2=_d2aOQhb0Xw8iBT_Cxqp2HyuS-zj6azw@mail.gmail.com/
+
+v1 -> v2: Fix commit message
+
+Fixes: d4b36210c2e6 ("net: pkt_sched: PIE AQM scheme")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230711021634.561598-1-pctammela@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/pkt_sched.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 8ab75128512ab..f99a513b40a92 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -135,7 +135,7 @@ extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+ */
+ static inline unsigned int psched_mtu(const struct net_device *dev)
+ {
+- return dev->mtu + dev->hard_header_len;
++ return READ_ONCE(dev->mtu) + dev->hard_header_len;
+ }
+
+ static inline struct net *qdisc_net(struct Qdisc *q)
+--
+2.39.2
+
--- /dev/null
+From 8c9c1a61f10391f067bceaeea6253f63f87e416e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jul 2023 18:01:02 -0300
+Subject: net/sched: sch_qfq: account for stab overhead in qfq_enqueue
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+[ Upstream commit 3e337087c3b5805fe0b8a46ba622a962880b5d64 ]
+
+Lion says:
+-------
+In the QFQ scheduler a similar issue to CVE-2023-31436
+persists.
+
+Consider the following code in net/sched/sch_qfq.c:
+
+static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ unsigned int len = qdisc_pkt_len(skb), gso_segs;
+
+ // ...
+
+ if (unlikely(cl->agg->lmax < len)) {
+ pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
+ cl->agg->lmax, len, cl->common.classid);
+ err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
+ if (err) {
+ cl->qstats.drops++;
+ return qdisc_drop(skb, sch, to_free);
+ }
+
+ // ...
+
+ }
+
+Similarly to CVE-2023-31436, "lmax" is increased without any bounds
+checks according to the packet length "len". Usually this would not
+impose a problem because packet sizes are naturally limited.
+
+This is however not the actual packet length, rather the
+"qdisc_pkt_len(skb)" which might apply size transformations according to
+"struct qdisc_size_table" as created by "qdisc_get_stab()" in
+net/sched/sch_api.c if the TCA_STAB option was set when modifying the qdisc.
+
+A user may choose virtually any size using such a table.
+
+As a result the same issue as in CVE-2023-31436 can occur, allowing heap
+out-of-bounds read / writes in the kmalloc-8192 cache.
+-------
+
+We can create the issue with the following commands:
+
+tc qdisc add dev $DEV root handle 1: stab mtu 2048 tsize 512 mpu 0 \
+overhead 999999999 linklayer ethernet qfq
+tc class add dev $DEV parent 1: classid 1:1 htb rate 6mbit burst 15k
+tc filter add dev $DEV parent 1: matchall classid 1:1
+ping -I $DEV 1.1.1.2
+
+This is caused by incorrectly assuming that qdisc_pkt_len() returns a
+length within the QFQ_MIN_LMAX < len < QFQ_MAX_LMAX.
+
+Fixes: 462dbc9101ac ("pkt_sched: QFQ Plus: fair-queueing service at DRR cost")
+Reported-by: Lion <nnamrec@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_qfq.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 2f3629c851584..d5610e145da20 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -381,8 +381,13 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
+ u32 lmax)
+ {
+ struct qfq_sched *q = qdisc_priv(sch);
+- struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
++ struct qfq_aggregate *new_agg;
+
++ /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
++ if (lmax > QFQ_MAX_LMAX)
++ return -EINVAL;
++
++ new_agg = qfq_find_agg(q, lmax, weight);
+ if (new_agg == NULL) { /* create new aggregate */
+ new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
+ if (new_agg == NULL)
+--
+2.39.2
+
--- /dev/null
+From 1f3f2e2d045f81f44d68e737993e020a3756de39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Apr 2023 12:56:11 -0300
+Subject: net/sched: sch_qfq: refactor parsing of netlink parameters
+
+From: Pedro Tammela <pctammela@mojatatu.com>
+
+[ Upstream commit 25369891fcef373540f8b4e0b3bccf77a04490d5 ]
+
+Two parameters can be transformed into netlink policies and
+validated while parsing the netlink message.
+
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 3e337087c3b5 ("net/sched: sch_qfq: account for stab overhead in qfq_enqueue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_qfq.c | 25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 02098a02943eb..2f3629c851584 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -113,6 +113,7 @@
+
+ #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
+ #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
++#define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT)
+
+ #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
+
+@@ -214,9 +215,14 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ return container_of(clc, struct qfq_class, common);
+ }
+
++static struct netlink_range_validation lmax_range = {
++ .min = QFQ_MIN_LMAX,
++ .max = QFQ_MAX_LMAX,
++};
++
+ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
+- [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
+- [TCA_QFQ_LMAX] = { .type = NLA_U32 },
++ [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT),
++ [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range),
+ };
+
+ /*
+@@ -408,17 +414,13 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ }
+
+ err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
+- qfq_policy, NULL);
++ qfq_policy, extack);
+ if (err < 0)
+ return err;
+
+- if (tb[TCA_QFQ_WEIGHT]) {
++ if (tb[TCA_QFQ_WEIGHT])
+ weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
+- if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
+- pr_notice("qfq: invalid weight %u\n", weight);
+- return -EINVAL;
+- }
+- } else
++ else
+ weight = 1;
+
+ if (tb[TCA_QFQ_LMAX])
+@@ -426,11 +428,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ else
+ lmax = psched_mtu(qdisc_dev(sch));
+
+- if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+- pr_notice("qfq: invalid max length %u\n", lmax);
+- return -EINVAL;
+- }
+-
+ inv_w = ONE_FP / weight;
+ weight = ONE_FP / inv_w;
+
+--
+2.39.2
+
--- /dev/null
+From 6f2ca3c8c1a7fda55fd52f9bb6deb07dd480418a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Jul 2023 11:52:26 +0300
+Subject: netdevsim: fix uninitialized data in nsim_dev_trap_fa_cookie_write()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit f72207a5c0dbaaf6921cf9a6c0d2fd0bc249ea78 ]
+
+The simple_write_to_buffer() function is designed to handle partial
+writes. It returns negatives on error, otherwise it returns the number
+of bytes that were able to be copied. This code doesn't check the
+return properly. We only know that the first byte is written, the rest
+of the buffer might be uninitialized.
+
+There is no need to use the simple_write_to_buffer() function.
+Partial writes are prohibited by the "if (*ppos != 0)" check at the
+start of the function. Just use memdup_user() and copy the whole
+buffer.
+
+Fixes: d3cbb907ae57 ("netdevsim: add ACL trap reporting cookie as a metadata")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://lore.kernel.org/r/7c1f950b-3a7d-4252-82a6-876e53078ef7@moroto.mountain
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/netdevsim/dev.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 68e56e451b2be..c3fbdd6b68baf 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -184,13 +184,10 @@ static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
+ cookie_len = (count - 1) / 2;
+ if ((count - 1) % 2)
+ return -EINVAL;
+- buf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
+- if (!buf)
+- return -ENOMEM;
+
+- ret = simple_write_to_buffer(buf, count, ppos, data, count);
+- if (ret < 0)
+- goto free_buf;
++ buf = memdup_user(data, count);
++ if (IS_ERR(buf))
++ return PTR_ERR(buf);
+
+ fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
+ GFP_KERNEL | __GFP_NOWARN);
+--
+2.39.2
+
--- /dev/null
+From 02c36801bbfb264b0b8a9241fed42c064a2dbe08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Nov 2022 09:43:09 +0000
+Subject: NTB: amd: Fix error handling in amd_ntb_pci_driver_init()
+
+From: Yuan Can <yuancan@huawei.com>
+
+[ Upstream commit 98af0a33c1101c29b3ce4f0cf4715fd927c717f9 ]
+
+A problem about ntb_hw_amd create debugfs failed is triggered with the
+following log given:
+
+ [ 618.431232] AMD(R) PCI-E Non-Transparent Bridge Driver 1.0
+ [ 618.433284] debugfs: Directory 'ntb_hw_amd' with parent '/' already present!
+
+The reason is that amd_ntb_pci_driver_init() returns pci_register_driver()
+directly without checking its return value, if pci_register_driver()
+failed, it returns without destroy the newly created debugfs, resulting
+the debugfs of ntb_hw_amd can never be created later.
+
+ amd_ntb_pci_driver_init()
+ debugfs_create_dir() # create debugfs directory
+ pci_register_driver()
+ driver_register()
+ bus_add_driver()
+ priv = kzalloc(...) # OOM happened
+ # return without destroy debugfs directory
+
+Fix by removing debugfs when pci_register_driver() returns error.
+
+Fixes: a1b3695820aa ("NTB: Add support for AMD PCI-Express Non-Transparent Bridge")
+Signed-off-by: Yuan Can <yuancan@huawei.com>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ntb/hw/amd/ntb_hw_amd.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index 04550b1f984c6..730f2103b91d1 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -1338,12 +1338,17 @@ static struct pci_driver amd_ntb_pci_driver = {
+
+ static int __init amd_ntb_pci_driver_init(void)
+ {
++ int ret;
+ pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+ if (debugfs_initialized())
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+- return pci_register_driver(&amd_ntb_pci_driver);
++ ret = pci_register_driver(&amd_ntb_pci_driver);
++ if (ret)
++ debugfs_remove_recursive(debugfs_dir);
++
++ return ret;
+ }
+ module_init(amd_ntb_pci_driver_init);
+
+--
+2.39.2
+
--- /dev/null
+From 0d3e744acbf28967be63072157b302b1ea66201f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Nov 2022 09:43:01 +0000
+Subject: ntb: idt: Fix error handling in idt_pci_driver_init()
+
+From: Yuan Can <yuancan@huawei.com>
+
+[ Upstream commit c012968259b451dc4db407f2310fe131eaefd800 ]
+
+A problem about ntb_hw_idt create debugfs failed is triggered with the
+following log given:
+
+ [ 1236.637636] IDT PCI-E Non-Transparent Bridge Driver 2.0
+ [ 1236.639292] debugfs: Directory 'ntb_hw_idt' with parent '/' already present!
+
+The reason is that idt_pci_driver_init() returns pci_register_driver()
+directly without checking its return value, if pci_register_driver()
+failed, it returns without destroy the newly created debugfs, resulting
+the debugfs of ntb_hw_idt can never be created later.
+
+ idt_pci_driver_init()
+ debugfs_create_dir() # create debugfs directory
+ pci_register_driver()
+ driver_register()
+ bus_add_driver()
+ priv = kzalloc(...) # OOM happened
+ # return without destroy debugfs directory
+
+Fix by removing debugfs when pci_register_driver() returns error.
+
+Fixes: bf2a952d31d2 ("NTB: Add IDT 89HPESxNTx PCIe-switches support")
+Signed-off-by: Yuan Can <yuancan@huawei.com>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ntb/hw/idt/ntb_hw_idt.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
+index 0ed6f809ff2ee..51799fccf8404 100644
+--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
++++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
+@@ -2891,6 +2891,7 @@ static struct pci_driver idt_pci_driver = {
+
+ static int __init idt_pci_driver_init(void)
+ {
++ int ret;
+ pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+ /* Create the top DebugFS directory if the FS is initialized */
+@@ -2898,7 +2899,11 @@ static int __init idt_pci_driver_init(void)
+ dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ /* Register the NTB hardware driver to handle the PCI device */
+- return pci_register_driver(&idt_pci_driver);
++ ret = pci_register_driver(&idt_pci_driver);
++ if (ret)
++ debugfs_remove_recursive(dbgfs_topdir);
++
++ return ret;
+ }
+ module_init(idt_pci_driver_init);
+
+--
+2.39.2
+
--- /dev/null
+From 68de370acd64ac8c119d8f145d6cd8a9022d6a0e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Nov 2022 09:43:22 +0000
+Subject: ntb: intel: Fix error handling in intel_ntb_pci_driver_init()
+
+From: Yuan Can <yuancan@huawei.com>
+
+[ Upstream commit 4c3c796aca02883ad35bb117468938cc4022ca41 ]
+
+A problem about ntb_hw_intel create debugfs failed is triggered with the
+following log given:
+
+ [ 273.112733] Intel(R) PCI-E Non-Transparent Bridge Driver 2.0
+ [ 273.115342] debugfs: Directory 'ntb_hw_intel' with parent '/' already present!
+
+The reason is that intel_ntb_pci_driver_init() returns
+pci_register_driver() directly without checking its return value, if
+pci_register_driver() failed, it returns without destroy the newly created
+debugfs, resulting the debugfs of ntb_hw_intel can never be created later.
+
+ intel_ntb_pci_driver_init()
+ debugfs_create_dir() # create debugfs directory
+ pci_register_driver()
+ driver_register()
+ bus_add_driver()
+ priv = kzalloc(...) # OOM happened
+ # return without destroy debugfs directory
+
+Fix by removing debugfs when pci_register_driver() returns error.
+
+Fixes: e26a5843f7f5 ("NTB: Split ntb_hw_intel and ntb_transport drivers")
+Signed-off-by: Yuan Can <yuancan@huawei.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ntb/hw/intel/ntb_hw_gen1.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+index 84772013812bf..60a4ebc7bf35a 100644
+--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+@@ -2064,12 +2064,17 @@ static struct pci_driver intel_ntb_pci_driver = {
+
+ static int __init intel_ntb_pci_driver_init(void)
+ {
++ int ret;
+ pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+ if (debugfs_initialized())
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+- return pci_register_driver(&intel_ntb_pci_driver);
++ ret = pci_register_driver(&intel_ntb_pci_driver);
++ if (ret)
++ debugfs_remove_recursive(debugfs_dir);
++
++ return ret;
+ }
+ module_init(intel_ntb_pci_driver_init);
+
+--
+2.39.2
+
--- /dev/null
+From f2bb9d596e9772896cdbc9e4638e7b76b46e6613 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Nov 2022 11:32:44 +0800
+Subject: NTB: ntb_tool: Add check for devm_kcalloc
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 2790143f09938776a3b4f69685b380bae8fd06c7 ]
+
+As the devm_kcalloc may return NULL pointer,
+it should be better to add check for the return
+value, as same as the others.
+
+Fixes: 7f46c8b3a552 ("NTB: ntb_tool: Add full multi-port NTB API support")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ntb/test/ntb_tool.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
+index 5ee0afa621a95..eeeb4b1c97d2c 100644
+--- a/drivers/ntb/test/ntb_tool.c
++++ b/drivers/ntb/test/ntb_tool.c
+@@ -998,6 +998,8 @@ static int tool_init_mws(struct tool_ctx *tc)
+ tc->peers[pidx].outmws =
+ devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt,
+ sizeof(*tc->peers[pidx].outmws), GFP_KERNEL);
++ if (tc->peers[pidx].outmws == NULL)
++ return -ENOMEM;
+
+ for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
+ tc->peers[pidx].outmws[widx].pidx = pidx;
+--
+2.39.2
+
--- /dev/null
+From 0f4e3f05e9406a68d728d0afa9cfef142bf0e90f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Nov 2022 23:19:17 +0800
+Subject: NTB: ntb_transport: fix possible memory leak while device_register()
+ fails
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 8623ccbfc55d962e19a3537652803676ad7acb90 ]
+
+If device_register() returns error, the name allocated by
+dev_set_name() need be freed. As comment of device_register()
+says, it should use put_device() to give up the reference in
+the error path. So fix this by calling put_device(), then the
+name can be freed in kobject_cleanup(), and client_dev is freed
+in ntb_transport_client_release().
+
+Fixes: fce8a7bb5b4b ("PCI-Express Non-Transparent Bridge Support")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Jon Mason <jdmason@kudzu.us>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ntb/ntb_transport.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index a9b97ebc71ac5..2abd2235bbcab 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -410,7 +410,7 @@ int ntb_transport_register_client_dev(char *device_name)
+
+ rc = device_register(dev);
+ if (rc) {
+- kfree(client_dev);
++ put_device(dev);
+ goto err;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 2e259784714ecf8af9c0fcf82e7535b3bd0372fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jun 2023 18:08:05 +0530
+Subject: nvme: fix the NVME_ID_NS_NVM_STS_MASK definition
+
+From: Ankit Kumar <ankit.kumar@samsung.com>
+
+[ Upstream commit b938e6603660652dc3db66d3c915fbfed3bce21d ]
+
+As per NVMe command set specification 1.0c Storage tag size is 7 bits.
+
+Fixes: 4020aad85c67 ("nvme: add support for enhanced metadata")
+Signed-off-by: Ankit Kumar <ankit.kumar@samsung.com>
+Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/nvme.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index d9fbc5afeaf72..e6fb36b71b59d 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -473,7 +473,7 @@ struct nvme_id_ns_nvm {
+ };
+
+ enum {
+- NVME_ID_NS_NVM_STS_MASK = 0x3f,
++ NVME_ID_NS_NVM_STS_MASK = 0x7f,
+ NVME_ID_NS_NVM_GUARD_SHIFT = 7,
+ NVME_ID_NS_NVM_GUARD_MASK = 0x3,
+ };
+--
+2.39.2
+
--- /dev/null
+From bc9ccd7125c55d66cc6d4718a236cf7328b57acc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Jul 2023 17:26:20 +0800
+Subject: nvme-pci: fix DMA direction of unmapping integrity data
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit b8f6446b6853768cb99e7c201bddce69ca60c15e ]
+
+DMA direction should be taken in dma_unmap_page() for unmapping integrity
+data.
+
+Fix this DMA direction, and reported in Guangwu's test.
+
+Reported-by: Guangwu Zhang <guazhang@redhat.com>
+Fixes: 4aedb705437f ("nvme-pci: split metadata handling from nvme_map_data / nvme_unmap_data")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 145fa7ef3f740..ce2e628f94a05 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1022,7 +1022,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ dma_unmap_page(dev->dev, iod->meta_dma,
+- rq_integrity_vec(req)->bv_len, rq_data_dir(req));
++ rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ }
+
+ if (blk_rq_nr_phys_segments(req))
+--
+2.39.2
+
--- /dev/null
+From 14b7090b56e22ba04e7b11d8ff4a6fccf9ff234d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jul 2023 13:59:36 +0530
+Subject: octeontx2-af: Move validation of ptp pointer before its usage
+
+From: Sai Krishna <saikrishnag@marvell.com>
+
+[ Upstream commit 7709fbd4922c197efabda03660d93e48a3e80323 ]
+
+Moved PTP pointer validation before its use to avoid smatch warning.
+Also used kzalloc/kfree instead of devm_kzalloc/devm_kfree.
+
+Fixes: 2ef4e45d99b1 ("octeontx2-af: Add PTP PPS Errata workaround on CN10K silicon")
+Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Signed-off-by: Sai Krishna <saikrishnag@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/ptp.c | 19 +++++++++----------
+ .../net/ethernet/marvell/octeontx2/af/rvu.c | 2 +-
+ 2 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+index 3411e2e47d46b..0ee420a489fc4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+@@ -208,7 +208,7 @@ struct ptp *ptp_get(void)
+ /* Check driver is bound to PTP block */
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+- else
++ else if (!IS_ERR(ptp))
+ pci_dev_get(ptp->pdev);
+
+ return ptp;
+@@ -388,11 +388,10 @@ static int ptp_extts_on(struct ptp *ptp, int on)
+ static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+- struct device *dev = &pdev->dev;
+ struct ptp *ptp;
+ int err;
+
+- ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
++ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+@@ -428,20 +427,19 @@ static int ptp_probe(struct pci_dev *pdev,
+ return 0;
+
+ error_free:
+- devm_kfree(dev, ptp);
++ kfree(ptp);
+
+ error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+- * the probe failed. In the later case we pretend that the
+- * initialization was successful and keep the error in
++ * the probe failed. In the later case we keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ if (!first_ptp_block)
+ first_ptp_block = ERR_PTR(err);
+
+- return 0;
++ return err;
+ }
+
+ static void ptp_remove(struct pci_dev *pdev)
+@@ -449,16 +447,17 @@ static void ptp_remove(struct pci_dev *pdev)
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+- if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+- hrtimer_cancel(&ptp->hrtimer);
+-
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
++ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
++ hrtimer_cancel(&ptp->hrtimer);
++
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
++ kfree(ptp);
+ }
+
+ static const struct pci_device_id ptp_id_table[] = {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 873f081c030de..733add3a9dc6b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -3244,7 +3244,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+- if (err == -EPROBE_DEFER)
++ if (err)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+--
+2.39.2
+
--- /dev/null
+From b95e3eae3ce1b093ec6e8cc6972aea5b45262891 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jul 2023 09:57:05 +0530
+Subject: octeontx2-af: Promisc enable/disable through mbox
+
+From: Ratheesh Kannoth <rkannoth@marvell.com>
+
+[ Upstream commit af42088bdaf292060b8d8a00d8644ca7b2b3f2d1 ]
+
+In legacy silicon, promiscuous mode is only modified
+through CGX mbox messages. In CN10KB silicon, it is modified
+from CGX mbox and NIX. This breaks legacy application
+behaviour. Fix this by removing call from NIX.
+
+Fixes: d6c9784baf59 ("octeontx2-af: Invoke exact match functions if supported")
+Signed-off-by: Ratheesh Kannoth <rkannoth@marvell.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeontx2/af/rvu_nix.c | 11 ++-------
+ .../marvell/octeontx2/af/rvu_npc_hash.c | 23 +++++++++++++++++--
+ 2 files changed, 23 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 8cb2a0181fb9b..705325431dec3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -3804,21 +3804,14 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+ }
+
+ /* install/uninstall promisc entry */
+- if (promisc) {
++ if (promisc)
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+-
+- if (rvu_npc_exact_has_match_table(rvu))
+- rvu_npc_exact_promisc_enable(rvu, pcifunc);
+- } else {
++ else
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+- if (rvu_npc_exact_has_match_table(rvu))
+- rvu_npc_exact_promisc_disable(rvu, pcifunc);
+- }
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 3b48b635977f6..3b0a66c0977a7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -1168,8 +1168,10 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+ {
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
++ bool promisc;
+
+ table = rvu->hw->table;
++ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+@@ -1181,13 +1183,18 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+
+ *enable_or_disable_cam = false;
+
+- /* If all rules are deleted, disable cam */
++ if (promisc)
++ goto done;
++
++ /* If all rules are deleted and not already in promisc mode;
++ * disable cam
++ */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+- /* If rule got added, enable cam */
++ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+@@ -1466,6 +1473,12 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+ *promisc = false;
+ mutex_unlock(&table->lock);
+
++ /* Enable drop rule */
++ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
++ true);
++
++ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n",
++ __func__, cgx_id, lmac_id);
+ return 0;
+ }
+
+@@ -1507,6 +1520,12 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+ *promisc = true;
+ mutex_unlock(&table->lock);
+
++ /* disable drop rule */
++ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
++ false);
++
++ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
++ __func__, cgx_id, lmac_id);
+ return 0;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 265189a084ea8819eaeadaa4491089327fe3ba34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 16:00:27 +0530
+Subject: octeontx2-pf: Add additional check for MCAM rules
+
+From: Suman Ghosh <sumang@marvell.com>
+
+[ Upstream commit 8278ee2a2646b9acf747317895e47a640ba933c9 ]
+
+Due to hardware limitation, MCAM drop rule with
+ether_type == 802.1Q and vlan_id == 0 is not supported. Hence rejecting
+such rules.
+
+Fixes: dce677da57c0 ("octeontx2-pf: Add vlan-etype to ntuple filters")
+Signed-off-by: Suman Ghosh <sumang@marvell.com>
+Link: https://lore.kernel.org/r/20230710103027.2244139-1-sumang@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeontx2/nic/otx2_flows.c | 8 ++++++++
+ .../net/ethernet/marvell/octeontx2/nic/otx2_tc.c | 15 +++++++++++++++
+ 2 files changed, 23 insertions(+)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index d0554f6d26731..934c199667b59 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -867,6 +867,14 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ return -EINVAL;
+
+ vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
++
++ /* Drop rule with vlan_etype == 802.1Q
++ * and vlan_id == 0 is not supported
++ */
++ if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
++ fsp->ring_cookie == RX_CLS_FLOW_DISC)
++ return -EINVAL;
++
+ /* Only ETH_P_8021Q and ETH_P_802AD types supported */
+ if (vlan_etype != ETH_P_8021Q &&
+ vlan_etype != ETH_P_8021AD)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index 6a01ab1a6e6f3..1aeb18a901b13 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -579,6 +579,21 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ return -EOPNOTSUPP;
+ }
+
++ if (!match.mask->vlan_id) {
++ struct flow_action_entry *act;
++ int i;
++
++ flow_action_for_each(i, act, &rule->action) {
++ if (act->id == FLOW_ACTION_DROP) {
++ netdev_err(nic->netdev,
++ "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
++ ntohs(match.key->vlan_tpid),
++ match.key->vlan_id);
++ return -EOPNOTSUPP;
++ }
++ }
++ }
++
+ if (match.mask->vlan_id ||
+ match.mask->vlan_dei ||
+ match.mask->vlan_priority) {
+--
+2.39.2
+
--- /dev/null
+From f884a075428f73c16681b023bdede17618d31877 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jun 2023 18:11:54 +0300
+Subject: platform/x86: wmi: Break possible infinite loop when parsing GUID
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 028e6e204ace1f080cfeacd72c50397eb8ae8883 ]
+
+The while-loop may break on one of the two conditions, either ID string
+is empty or GUID matches. The second one, may never be reached if the
+parsed string is not correct GUID. In such a case the loop will never
+advance to check the next ID.
+
+Break possible infinite loop by factoring out guid_parse_and_compare()
+helper which may be moved to the generic header for everyone later on
+and preventing from similar mistake in the future.
+
+Interestingly that firstly it appeared when WMI was turned into a bus
+driver, but later when duplicated GUIDs were checked, the while-loop
+has been replaced by for-loop and hence no mistake made again.
+
+Fixes: a48e23385fcf ("platform/x86: wmi: add context pointer field to struct wmi_device_id")
+Fixes: 844af950da94 ("platform/x86: wmi: Turn WMI into a bus driver")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20230621151155.78279-1-andriy.shevchenko@linux.intel.com
+Tested-by: Armin Wolf <W_Armin@gmx.de>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/wmi.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 223550a10d4dd..2fe6e147785e4 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -135,6 +135,16 @@ static acpi_status find_guid(const char *guid_string, struct wmi_block **out)
+ return AE_NOT_FOUND;
+ }
+
++static bool guid_parse_and_compare(const char *string, const guid_t *guid)
++{
++ guid_t guid_input;
++
++ if (guid_parse(string, &guid_input))
++ return false;
++
++ return guid_equal(&guid_input, guid);
++}
++
+ static const void *find_guid_context(struct wmi_block *wblock,
+ struct wmi_driver *wdriver)
+ {
+@@ -145,11 +155,7 @@ static const void *find_guid_context(struct wmi_block *wblock,
+ return NULL;
+
+ while (*id->guid_string) {
+- guid_t guid_input;
+-
+- if (guid_parse(id->guid_string, &guid_input))
+- continue;
+- if (guid_equal(&wblock->gblock.guid, &guid_input))
++ if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
+ return id->context;
+ id++;
+ }
+@@ -833,11 +839,7 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ return 0;
+
+ while (*id->guid_string) {
+- guid_t driver_guid;
+-
+- if (WARN_ON(guid_parse(id->guid_string, &driver_guid)))
+- continue;
+- if (guid_equal(&driver_guid, &wblock->gblock.guid))
++ if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
+ return 1;
+
+ id++;
+--
+2.39.2
+
--- /dev/null
+From 7b6ccc56fa28b86be4856b34b301dc1413f262ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 09:41:31 +0200
+Subject: riscv, bpf: Fix inconsistent JIT image generation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Björn Töpel <bjorn@rivosinc.com>
+
+[ Upstream commit c56fb2aab23505bb7160d06097c8de100b82b851 ]
+
+In order to generate the prologue and epilogue, the BPF JIT needs to
+know which registers that are clobbered. Therefore, the during
+pre-final passes, the prologue is generated after the body of the
+program body-prologue-epilogue. Then, in the final pass, a proper
+prologue-body-epilogue JITted image is generated.
+
+This scheme has worked most of the time. However, for some large
+programs with many jumps, e.g. the test_kmod.sh BPF selftest with
+hardening enabled (blinding constants), this has shown to be
+incorrect. For the final pass, when the proper prologue-body-epilogue
+is generated, the image has not converged. This will lead to that the
+final image will have incorrect jump offsets. The following is an
+excerpt from an incorrect image:
+
+ | ...
+ | 3b8: 00c50663 beq a0,a2,3c4 <.text+0x3c4>
+ | 3bc: 0020e317 auipc t1,0x20e
+ | 3c0: 49630067 jalr zero,1174(t1) # 20e852 <.text+0x20e852>
+ | ...
+ | 20e84c: 8796 c.mv a5,t0
+ | 20e84e: 6422 c.ldsp s0,8(sp) # Epilogue start
+ | 20e850: 6141 c.addi16sp sp,16
+ | 20e852: 853e c.mv a0,a5 # Incorrect jump target
+ | 20e854: 8082 c.jr ra
+
+The image has shrunk, and the epilogue offset is incorrect in the
+final pass.
+
+Correct the problem by always generating proper prologue-body-epilogue
+outputs, which means that the first pass will only generate the body
+to track what registers that are touched.
+
+Fixes: 2353ecc6f91f ("bpf, riscv: add BPF JIT for RV64G")
+Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20230710074131.19596-1-bjorn@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/net/bpf_jit.h | 6 +++---
+ arch/riscv/net/bpf_jit_core.c | 19 +++++++++++++------
+ 2 files changed, 16 insertions(+), 9 deletions(-)
+
+diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
+index d926e0f7ef571..5ee21a19969c9 100644
+--- a/arch/riscv/net/bpf_jit.h
++++ b/arch/riscv/net/bpf_jit.h
+@@ -69,7 +69,7 @@ struct rv_jit_context {
+ struct bpf_prog *prog;
+ u16 *insns; /* RV insns */
+ int ninsns;
+- int body_len;
++ int prologue_len;
+ int epilogue_offset;
+ int *offset; /* BPF to RV */
+ int nexentries;
+@@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
+ int from, to;
+
+ off++; /* BPF branch is from PC+1, RV is from PC */
+- from = (insn > 0) ? ctx->offset[insn - 1] : 0;
+- to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
++ from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
++ to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
+ return ninsns_rvoff(to - from);
+ }
+
+diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
+index 737baf8715da7..7a26a3e1c73cf 100644
+--- a/arch/riscv/net/bpf_jit_core.c
++++ b/arch/riscv/net/bpf_jit_core.c
+@@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ unsigned int prog_size = 0, extable_size = 0;
+ bool tmp_blinded = false, extra_pass = false;
+ struct bpf_prog *tmp, *orig_prog = prog;
+- int pass = 0, prev_ninsns = 0, prologue_len, i;
++ int pass = 0, prev_ninsns = 0, i;
+ struct rv_jit_data *jit_data;
+ struct rv_jit_context *ctx;
+
+@@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ prog = orig_prog;
+ goto out_offset;
+ }
++
++ if (build_body(ctx, extra_pass, NULL)) {
++ prog = orig_prog;
++ goto out_offset;
++ }
++
+ for (i = 0; i < prog->len; i++) {
+ prev_ninsns += 32;
+ ctx->offset[i] = prev_ninsns;
+@@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ for (i = 0; i < NR_JIT_ITERATIONS; i++) {
+ pass++;
+ ctx->ninsns = 0;
++
++ bpf_jit_build_prologue(ctx);
++ ctx->prologue_len = ctx->ninsns;
++
+ if (build_body(ctx, extra_pass, ctx->offset)) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+- ctx->body_len = ctx->ninsns;
+- bpf_jit_build_prologue(ctx);
++
+ ctx->epilogue_offset = ctx->ninsns;
+ bpf_jit_build_epilogue(ctx);
+
+@@ -162,10 +171,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+
+ if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(jit_data->header);
+- prologue_len = ctx->epilogue_offset - ctx->body_len;
+ for (i = 0; i < prog->len; i++)
+- ctx->offset[i] = ninsns_rvoff(prologue_len +
+- ctx->offset[i]);
++ ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
+ bpf_prog_fill_jited_linfo(prog, ctx->offset);
+ out_offset:
+ kfree(ctx->offset);
+--
+2.39.2
+
--- /dev/null
+From dee1a9728116f3c2b6626672c0d868bde92c42f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 01:10:36 +0800
+Subject: riscv: mm: fix truncation warning on RV32
+
+From: Jisheng Zhang <jszhang@kernel.org>
+
+[ Upstream commit b690e266dae2f85f4dfea21fa6a05e3500a51054 ]
+
+lkp reports below sparse warning when building for RV32:
+arch/riscv/mm/init.c:1204:48: sparse: warning: cast truncates bits from
+constant value (100000000 becomes 0)
+
+IMO, the reason we didn't see this truncates bug in real world is "0"
+means MEMBLOCK_ALLOC_ACCESSIBLE in memblock and there's no RV32 HW
+with more than 4GB memory.
+
+Fix it anyway to make sparse happy.
+
+Fixes: decf89f86ecd ("riscv: try to allocate crashkern region from 32bit addressible memory")
+Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202306080034.SLiCiOMn-lkp@intel.com/
+Link: https://lore.kernel.org/r/20230709171036.1906-1-jszhang@kernel.org
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 9390cdff39ffc..7c4852af9e3f1 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -1187,7 +1187,7 @@ static void __init reserve_crashkernel(void)
+ */
+ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+ search_start,
+- min(search_end, (unsigned long) SZ_4G));
++ min(search_end, (unsigned long)(SZ_4G - 1)));
+ if (crash_base == 0) {
+ /* Try again without restricting region to 32bit addressible memory */
+ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+--
+2.39.2
+
--- /dev/null
+From 3cfdca1f3557b9f649763938dc39d44eb66edee0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jun 2023 13:58:47 +0300
+Subject: scsi: qla2xxx: Fix error code in qla2x00_start_sp()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit e579b007eff3ff8d29d59d16214cd85fb9e573f7 ]
+
+This should be negative -EAGAIN instead of positive. The callers treat
+non-zero error codes the same so it doesn't really impact runtime beyond
+some trivial differences to debug output.
+
+Fixes: 80676d054e5a ("scsi: qla2xxx: Fix session cleanup hang")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/49866d28-4cfe-47b0-842b-78f110e61aab@moroto.mountain
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla2xxx/qla_iocb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 4f48f098ea5a6..605e94f973189 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -3898,7 +3898,7 @@ qla2x00_start_sp(srb_t *sp)
+
+ pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ if (!pkt) {
+- rval = EAGAIN;
++ rval = -EAGAIN;
+ ql_log(ql_log_warn, vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
+ goto done;
+--
+2.39.2
+
--- /dev/null
+From 050be3b329aef1686f3b8443c5b98b80b3278f24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jun 2023 22:23:48 -0700
+Subject: scsi: ufs: ufs-mediatek: Add dependency for RESET_CONTROLLER
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit 89f7ef7f2b23b2a7b8ce346c23161916eae5b15c ]
+
+When RESET_CONTROLLER is not set, kconfig complains about missing
+dependencies for RESET_TI_SYSCON, so add the missing dependency just as is
+done above for SCSI_UFS_QCOM.
+
+Silences this kconfig warning:
+
+WARNING: unmet direct dependencies detected for RESET_TI_SYSCON
+ Depends on [n]: RESET_CONTROLLER [=n] && HAS_IOMEM [=y]
+ Selected by [m]:
+ - SCSI_UFS_MEDIATEK [=m] && SCSI_UFSHCD [=y] && SCSI_UFSHCD_PLATFORM [=y] && ARCH_MEDIATEK [=y]
+
+Fixes: de48898d0cb6 ("scsi: ufs-mediatek: Create reset control device_link")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Link: lore.kernel.org/r/202306020859.1wHg9AaT-lkp@intel.com
+Link: https://lore.kernel.org/r/20230701052348.28046-1-rdunlap@infradead.org
+Cc: Stanley Chu <stanley.chu@mediatek.com>
+Cc: Peter Wang <peter.wang@mediatek.com>
+Cc: Paul Gazzillo <paul@pgazz.com>
+Cc: Necip Fazil Yildiran <fazilyildiran@gmail.com>
+Cc: linux-scsi@vger.kernel.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-mediatek@lists.infradead.org
+Cc: "James E.J. Bottomley" <jejb@linux.ibm.com>
+Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/host/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
+index 4cc2dbd79ed0e..9b39fd76031be 100644
+--- a/drivers/ufs/host/Kconfig
++++ b/drivers/ufs/host/Kconfig
+@@ -71,6 +71,7 @@ config SCSI_UFS_QCOM
+ config SCSI_UFS_MEDIATEK
+ tristate "Mediatek specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
++ depends on RESET_CONTROLLER
+ select PHY_MTK_UFS
+ select RESET_TI_SYSCON
+ help
+--
+2.39.2
+
wireguard-queueing-use-saner-cpu-selection-wrapping.patch
wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch
tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch
+drm-panel-simple-add-connector_type-for-innolux_at04.patch
+drm-bridge-ti-sn65dsi86-fix-auxiliary-bus-lifetime.patch
+swiotlb-always-set-the-number-of-areas-before-alloca.patch
+swiotlb-reduce-the-swiotlb-buffer-size-on-allocation.patch
+swiotlb-reduce-the-number-of-areas-to-match-actual-m.patch
+drm-panel-simple-add-powertip-ph800480t013-drm_displ.patch
+ice-fix-max_rate-check-while-configuring-tx-rate-lim.patch
+igc-remove-delay-during-tx-ring-configuration.patch
+net-mlx5e-fix-double-free-in-mlx5e_destroy_flow_tabl.patch
+net-mlx5e-fix-memory-leak-in-mlx5e_fs_tt_redirect_an.patch
+net-mlx5e-fix-memory-leak-in-mlx5e_ptp_open.patch
+net-mlx5e-check-for-not_ready-flag-state-after-locki.patch
+igc-set-tp-bit-in-supported-and-advertising-fields-o.patch
+igc-handle-pps-start-time-programming-for-past-time-.patch
+blk-crypto-use-dynamic-lock-class-for-blk_crypto_pro.patch
+scsi-qla2xxx-fix-error-code-in-qla2x00_start_sp.patch
+scsi-ufs-ufs-mediatek-add-dependency-for-reset_contr.patch
+bpf-fix-max-stack-depth-check-for-async-callbacks.patch
+net-mvneta-fix-txq_map-in-case-of-txq_number-1.patch
+net-sched-cls_fw-fix-improper-refcount-update-leads-.patch
+gve-set-default-duplex-configuration-to-full.patch
+octeontx2-af-promisc-enable-disable-through-mbox.patch
+octeontx2-af-move-validation-of-ptp-pointer-before-i.patch
+ionic-remove-warn_on-to-prevent-panic_on_warn.patch
+net-bgmac-postpone-turning-irqs-off-to-avoid-soc-han.patch
+net-prevent-skb-corruption-on-frag-list-segmentation.patch
+icmp6-fix-null-ptr-deref-of-ip6_null_entry-rt6i_idev.patch
+udp6-fix-udp6_ehashfn-typo.patch
+ntb-idt-fix-error-handling-in-idt_pci_driver_init.patch
+ntb-amd-fix-error-handling-in-amd_ntb_pci_driver_ini.patch
+ntb-intel-fix-error-handling-in-intel_ntb_pci_driver.patch
+ntb-ntb_transport-fix-possible-memory-leak-while-dev.patch
+ntb-ntb_tool-add-check-for-devm_kcalloc.patch
+ipv6-addrconf-fix-a-potential-refcount-underflow-for.patch
+net-dsa-qca8k-add-check-for-skb_copy.patch
+platform-x86-wmi-break-possible-infinite-loop-when-p.patch
+kernel-trace-fix-cleanup-logic-of-enable_trace_eprob.patch
+igc-fix-launchtime-before-start-of-cycle.patch
+igc-fix-inserting-of-empty-frame-for-launchtime.patch
+nvme-fix-the-nvme_id_ns_nvm_sts_mask-definition.patch
+riscv-bpf-fix-inconsistent-jit-image-generation.patch
+drm-i915-don-t-preserve-dpll_hw_state-for-slave-crtc.patch
+drm-i915-fix-one-wrong-caching-mode-enum-usage.patch
+octeontx2-pf-add-additional-check-for-mcam-rules.patch
+erofs-avoid-useless-loops-in-z_erofs_pcluster_readmo.patch
+erofs-avoid-infinite-loop-in-z_erofs_do_read_page-wh.patch
+erofs-fix-fsdax-unavailability-for-chunk-based-regul.patch
+wifi-airo-avoid-uninitialized-warning-in-airo_get_ra.patch
+bpf-cpumap-fix-memory-leak-in-cpu_map_update_elem.patch
+net-sched-flower-ensure-both-minimum-and-maximum-por.patch
+riscv-mm-fix-truncation-warning-on-rv32.patch
+netdevsim-fix-uninitialized-data-in-nsim_dev_trap_fa.patch
+net-sched-make-psched_mtu-rtnl-less-safe.patch
+wifi-rtw89-debug-fix-error-code-in-rtw89_debug_priv_.patch
+net-sched-sch_qfq-refactor-parsing-of-netlink-parame.patch
+net-sched-sch_qfq-account-for-stab-overhead-in-qfq_e.patch
+nvme-pci-fix-dma-direction-of-unmapping-integrity-da.patch
--- /dev/null
+From 8883ecd2d1af7f85f13d7fd381dc3f1e8d7d5f87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jun 2023 15:01:03 +0200
+Subject: swiotlb: always set the number of areas before allocating the pool
+
+From: Petr Tesarik <petr.tesarik.ext@huawei.com>
+
+[ Upstream commit aabd12609f91155f26584508b01f548215cc3c0c ]
+
+The number of areas defaults to the number of possible CPUs. However, the
+total number of slots may have to be increased after adjusting the number
+of areas. Consequently, the number of areas must be determined before
+allocating the memory pool. This is even explained with a comment in
+swiotlb_init_remap(), but swiotlb_init_late() adjusts the number of areas
+after slots are already allocated. The areas may end up being smaller than
+IO_TLB_SEGSIZE, which breaks per-area locking.
+
+While fixing swiotlb_init_late(), move all relevant comments before the
+definition of swiotlb_adjust_nareas() and convert them to kernel-doc.
+
+Fixes: 20347fca71a3 ("swiotlb: split up the global swiotlb lock")
+Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com>
+Reviewed-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 7f4ad5e70b40c..3961065412542 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -117,9 +117,16 @@ static bool round_up_default_nslabs(void)
+ return true;
+ }
+
++/**
++ * swiotlb_adjust_nareas() - adjust the number of areas and slots
++ * @nareas: Desired number of areas. Zero is treated as 1.
++ *
++ * Adjust the default number of areas in a memory pool.
++ * The default size of the memory pool may also change to meet minimum area
++ * size requirements.
++ */
+ static void swiotlb_adjust_nareas(unsigned int nareas)
+ {
+- /* use a single area when non is specified */
+ if (!nareas)
+ nareas = 1;
+ else if (!is_power_of_2(nareas))
+@@ -318,10 +325,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ if (swiotlb_force_disable)
+ return;
+
+- /*
+- * default_nslabs maybe changed when adjust area number.
+- * So allocate bounce buffer after adjusting area number.
+- */
+ if (!default_nareas)
+ swiotlb_adjust_nareas(num_possible_cpus());
+
+@@ -398,6 +401,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ if (swiotlb_force_disable)
+ return 0;
+
++ if (!default_nareas)
++ swiotlb_adjust_nareas(num_possible_cpus());
++
+ retry:
+ order = get_order(nslabs << IO_TLB_SHIFT);
+ nslabs = SLABS_PER_PAGE << order;
+@@ -432,9 +438,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ (PAGE_SIZE << order) >> 20);
+ }
+
+- if (!default_nareas)
+- swiotlb_adjust_nareas(num_possible_cpus());
+-
+ area_order = get_order(array_size(sizeof(*mem->areas),
+ default_nareas));
+ mem->areas = (struct io_tlb_area *)
+--
+2.39.2
+
--- /dev/null
+From b5a318236c968f18cbf207f75a11fd89ed54b05f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jun 2023 15:01:04 +0200
+Subject: swiotlb: reduce the number of areas to match actual memory pool size
+
+From: Petr Tesarik <petr.tesarik.ext@huawei.com>
+
+[ Upstream commit 8ac04063354a01a484d2e55d20ed1958aa0d3392 ]
+
+Although the desired size of the SWIOTLB memory pool is increased in
+swiotlb_adjust_nareas() to match the number of areas, the actual allocation
+may be smaller, which may require reducing the number of areas.
+
+For example, Xen uses swiotlb_init_late(), which in turn uses the page
+allocator. On x86, page size is 4 KiB and MAX_ORDER is 10 (1024 pages),
+resulting in a maximum memory pool size of 4 MiB. This corresponds to 2048
+slots of 2 KiB each. The minimum area size is 128 (IO_TLB_SEGSIZE),
+allowing at most 2048 / 128 = 16 areas.
+
+If num_possible_cpus() is greater than the maximum number of areas, areas
+are smaller than IO_TLB_SEGSIZE and contiguous groups of free slots will
+span multiple areas. When allocating and freeing slots, only one area will
+be properly locked, causing race conditions on the unlocked slots and
+ultimately data corruption, kernel hangs and crashes.
+
+Fixes: 20347fca71a3 ("swiotlb: split up the global swiotlb lock")
+Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com>
+Reviewed-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 27 ++++++++++++++++++++++++---
+ 1 file changed, 24 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index cc0c55ed20429..491d3c86c2280 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -140,6 +140,23 @@ static void swiotlb_adjust_nareas(unsigned int nareas)
+ (default_nslabs << IO_TLB_SHIFT) >> 20);
+ }
+
++/**
++ * limit_nareas() - get the maximum number of areas for a given memory pool size
++ * @nareas: Desired number of areas.
++ * @nslots: Total number of slots in the memory pool.
++ *
++ * Limit the number of areas to the maximum possible number of areas in
++ * a memory pool of the given size.
++ *
++ * Return: Maximum possible number of areas.
++ */
++static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
++{
++ if (nslots < nareas * IO_TLB_SEGSIZE)
++ return nslots / IO_TLB_SEGSIZE;
++ return nareas;
++}
++
+ static int __init
+ setup_io_tlb_npages(char *str)
+ {
+@@ -347,6 +364,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ {
+ struct io_tlb_mem *mem = &io_tlb_default_mem;
+ unsigned long nslabs;
++ unsigned int nareas;
+ size_t alloc_size;
+ void *tlb;
+
+@@ -359,10 +377,12 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ swiotlb_adjust_nareas(num_possible_cpus());
+
+ nslabs = default_nslabs;
++ nareas = limit_nareas(default_nareas, nslabs);
+ while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
+ if (nslabs <= IO_TLB_MIN_SLABS)
+ return;
+ nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
++ nareas = limit_nareas(nareas, nslabs);
+ }
+
+ if (default_nslabs != nslabs) {
+@@ -408,6 +428,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ {
+ struct io_tlb_mem *mem = &io_tlb_default_mem;
+ unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
++ unsigned int nareas;
+ unsigned char *vstart = NULL;
+ unsigned int order, area_order;
+ bool retried = false;
+@@ -453,8 +474,8 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ (PAGE_SIZE << order) >> 20);
+ }
+
+- area_order = get_order(array_size(sizeof(*mem->areas),
+- default_nareas));
++ nareas = limit_nareas(default_nareas, nslabs);
++ area_order = get_order(array_size(sizeof(*mem->areas), nareas));
+ mem->areas = (struct io_tlb_area *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
+ if (!mem->areas)
+@@ -468,7 +489,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ set_memory_decrypted((unsigned long)vstart,
+ (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
+ swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
+- default_nareas);
++ nareas);
+
+ swiotlb_print_info();
+ return 0;
+--
+2.39.2
+
--- /dev/null
+From fa5816c6a9d111dad466bebe4c8d8f115019c375 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 31 Oct 2022 19:13:27 +1100
+Subject: swiotlb: reduce the swiotlb buffer size on allocation failure
+
+From: Alexey Kardashevskiy <aik@amd.com>
+
+[ Upstream commit 8d58aa484920c4f9be4834a7aeb446cdced21a37 ]
+
+At the moment the AMD encrypted platform reserves 6% of RAM for SWIOTLB
+or 1GB, whichever is less. However it is possible that there is no block
+big enough in the low memory which make SWIOTLB allocation fail and
+the kernel continues without DMA. In such case a VM hangs on DMA.
+
+This moves alloc+remap to a helper and calls it from a loop where
+the size is halved on each iteration.
+
+This updates default_nslabs on successful allocation which looks like
+an oversight as not doing so should have broken callers of
+swiotlb_size_or_default().
+
+Signed-off-by: Alexey Kardashevskiy <aik@amd.com>
+Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Stable-dep-of: 8ac04063354a ("swiotlb: reduce the number of areas to match actual memory pool size")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/swiotlb.c | 63 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 39 insertions(+), 24 deletions(-)
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 3961065412542..cc0c55ed20429 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -307,6 +307,37 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
+ return;
+ }
+
++static void *swiotlb_memblock_alloc(unsigned long nslabs, unsigned int flags,
++ int (*remap)(void *tlb, unsigned long nslabs))
++{
++ size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
++ void *tlb;
++
++ /*
++ * By default allocate the bounce buffer memory from low memory, but
++ * allow to pick a location everywhere for hypervisors with guest
++ * memory encryption.
++ */
++ if (flags & SWIOTLB_ANY)
++ tlb = memblock_alloc(bytes, PAGE_SIZE);
++ else
++ tlb = memblock_alloc_low(bytes, PAGE_SIZE);
++
++ if (!tlb) {
++ pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
++ __func__, bytes);
++ return NULL;
++ }
++
++ if (remap && remap(tlb, nslabs) < 0) {
++ memblock_free(tlb, PAGE_ALIGN(bytes));
++ pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
++ return NULL;
++ }
++
++ return tlb;
++}
++
+ /*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+@@ -317,7 +348,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ struct io_tlb_mem *mem = &io_tlb_default_mem;
+ unsigned long nslabs;
+ size_t alloc_size;
+- size_t bytes;
+ void *tlb;
+
+ if (!addressing_limit && !swiotlb_force_bounce)
+@@ -329,31 +359,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ swiotlb_adjust_nareas(num_possible_cpus());
+
+ nslabs = default_nslabs;
+- /*
+- * By default allocate the bounce buffer memory from low memory, but
+- * allow to pick a location everywhere for hypervisors with guest
+- * memory encryption.
+- */
+-retry:
+- bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
+- if (flags & SWIOTLB_ANY)
+- tlb = memblock_alloc(bytes, PAGE_SIZE);
+- else
+- tlb = memblock_alloc_low(bytes, PAGE_SIZE);
+- if (!tlb) {
+- pr_warn("%s: failed to allocate tlb structure\n", __func__);
+- return;
+- }
+-
+- if (remap && remap(tlb, nslabs) < 0) {
+- memblock_free(tlb, PAGE_ALIGN(bytes));
+-
++ while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
++ if (nslabs <= IO_TLB_MIN_SLABS)
++ return;
+ nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
+- if (nslabs >= IO_TLB_MIN_SLABS)
+- goto retry;
++ }
+
+- pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
+- return;
++ if (default_nslabs != nslabs) {
++ pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
++ default_nslabs, nslabs);
++ default_nslabs = nslabs;
+ }
+
+ alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
+--
+2.39.2
+
--- /dev/null
+From 67060c56abaab910c6cd54ae78876ace1a93512c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Jul 2023 08:29:58 +0000
+Subject: udp6: fix udp6_ehashfn() typo
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 51d03e2f2203e76ed02d33fb5ffbb5fc85ffaf54 ]
+
+Amit Klein reported that udp6_ehash_secret was initialized but never used.
+
+Fixes: 1bbdceef1e53 ("inet: convert inet_ehash_secret and ipv6_hash_secret to net_get_random_once")
+Reported-by: Amit Klein <aksecurity@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willy Tarreau <w@1wt.eu>
+Cc: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Cc: David Ahern <dsahern@kernel.org>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/udp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index c029222ce46b0..04f1d696503cd 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -90,7 +90,7 @@ static u32 udp6_ehashfn(const struct net *net,
+ fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
+
+ return __inet6_ehashfn(lhash, lport, fhash, fport,
+- udp_ipv6_hash_secret + net_hash_mix(net));
++ udp6_ehash_secret + net_hash_mix(net));
+ }
+
+ int udp_v6_get_port(struct sock *sk, unsigned short snum)
+--
+2.39.2
+
--- /dev/null
+From fa647c0fe8a5cd52742db66ea9931a01ca7ca69b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Jul 2023 06:31:54 -0700
+Subject: wifi: airo: avoid uninitialized warning in airo_get_rate()
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit 9373771aaed17f5c2c38485f785568abe3a9f8c1 ]
+
+Quieten a gcc (11.3.0) build error or warning by checking the function
+call status and returning -EBUSY if the function call failed.
+This is similar to what several other wireless drivers do for the
+SIOCGIWRATE ioctl call when there is a locking problem.
+
+drivers/net/wireless/cisco/airo.c: error: 'status_rid.currentXmitRate' is used uninitialized [-Werror=uninitialized]
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Link: https://lore.kernel.org/r/39abf2c7-24a-f167-91da-ed4c5435d1c4@linux-m68k.org
+Link: https://lore.kernel.org/r/20230709133154.26206-1-rdunlap@infradead.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/cisco/airo.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
+index fb2c35bd73bb1..f6362429b735b 100644
+--- a/drivers/net/wireless/cisco/airo.c
++++ b/drivers/net/wireless/cisco/airo.c
+@@ -6146,8 +6146,11 @@ static int airo_get_rate(struct net_device *dev,
+ {
+ struct airo_info *local = dev->ml_priv;
+ StatusRid status_rid; /* Card status info */
++ int ret;
+
+- readStatusRid(local, &status_rid, 1);
++ ret = readStatusRid(local, &status_rid, 1);
++ if (ret)
++ return -EBUSY;
+
+ vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
+ /* If more than one rate, set auto */
+--
+2.39.2
+
--- /dev/null
+From 0d35501866349383cc507854289b2e91736728da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jul 2023 10:45:00 +0800
+Subject: wifi: rtw89: debug: fix error code in rtw89_debug_priv_send_h2c_set()
+
+From: Zhang Shurong <zhang_shurong@foxmail.com>
+
+[ Upstream commit 4f4626cd049576af1276c7568d5b44eb3f7bb1b1 ]
+
+If there is a failure during rtw89_fw_h2c_raw() rtw89_debug_priv_send_h2c
+should return negative error code instead of a positive value count.
+Fix this bug by returning correct error code.
+
+Fixes: e3ec7017f6a2 ("rtw89: add Realtek 802.11ax driver")
+Signed-off-by: Zhang Shurong <zhang_shurong@foxmail.com>
+Acked-by: Ping-Ke Shih <pkshih@realtek.com>
+Link: https://lore.kernel.org/r/tencent_AD09A61BC4DA92AD1EB0790F5C850E544D07@qq.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/realtek/rtw89/debug.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index 50701c55ed602..ec0af903961f0 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -2130,17 +2130,18 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ u8 *h2c;
++ int ret;
+ u16 h2c_len = count / 2;
+
+ h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ if (IS_ERR(h2c))
+ return -EFAULT;
+
+- rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
++ ret = rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
+
+ kfree(h2c);
+
+- return count;
++ return ret ? ret : count;
+ }
+
+ static int
+--
+2.39.2
+