]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Fri, 27 Aug 2021 17:35:49 +0000 (13:35 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 27 Aug 2021 17:35:49 +0000 (13:35 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/arc-fix-config_stackdepot.patch [new file with mode: 0644]
queue-5.10/asoc-component-remove-misplaced-prefix-handling-in-p.patch [new file with mode: 0644]
queue-5.10/asoc-rt5682-adjust-headset-volume-button-threshold.patch [new file with mode: 0644]
queue-5.10/blk-iocost-fix-lockdep-warning-on-blkcg-lock.patch [new file with mode: 0644]
queue-5.10/bpf-fix-null-pointer-dereference-in-bpf_get_local_st.patch [new file with mode: 0644]
queue-5.10/net-mscc-fix-non-gpl-export-of-regmap-apis.patch [new file with mode: 0644]
queue-5.10/netfilter-conntrack-collect-all-entries-in-one-cycle.patch [new file with mode: 0644]
queue-5.10/once-fix-panic-when-module-unload.patch [new file with mode: 0644]
queue-5.10/ovl-fix-uninitialized-pointer-read-in-ovl_lookup_rea.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arc-fix-config_stackdepot.patch b/queue-5.10/arc-fix-config_stackdepot.patch
new file mode 100644 (file)
index 0000000..9d6c616
--- /dev/null
@@ -0,0 +1,47 @@
+From 529a0dbeed61d59c2661a9346a47625df7723c15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Jul 2021 07:50:33 -0700
+Subject: ARC: Fix CONFIG_STACKDEPOT
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit bf79167fd86f3b97390fe2e70231d383526bd9cc ]
+
+Enabling CONFIG_STACKDEPOT results in the following build error.
+
+arc-elf-ld: lib/stackdepot.o: in function `filter_irq_stacks':
+stackdepot.c:(.text+0x456): undefined reference to `__irqentry_text_start'
+arc-elf-ld: stackdepot.c:(.text+0x456): undefined reference to `__irqentry_text_start'
+arc-elf-ld: stackdepot.c:(.text+0x476): undefined reference to `__irqentry_text_end'
+arc-elf-ld: stackdepot.c:(.text+0x476): undefined reference to `__irqentry_text_end'
+arc-elf-ld: stackdepot.c:(.text+0x484): undefined reference to `__softirqentry_text_start'
+arc-elf-ld: stackdepot.c:(.text+0x484): undefined reference to `__softirqentry_text_start'
+arc-elf-ld: stackdepot.c:(.text+0x48c): undefined reference to `__softirqentry_text_end'
+arc-elf-ld: stackdepot.c:(.text+0x48c): undefined reference to `__softirqentry_text_end'
+
+Other architectures address this problem by adding IRQENTRY_TEXT and
+SOFTIRQENTRY_TEXT to the text segment, so do the same here.
+
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arc/kernel/vmlinux.lds.S | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
+index 33ce59d91461..f67e4ad7b3ce 100644
+--- a/arch/arc/kernel/vmlinux.lds.S
++++ b/arch/arc/kernel/vmlinux.lds.S
+@@ -88,6 +88,8 @@ SECTIONS
+               CPUIDLE_TEXT
+               LOCK_TEXT
+               KPROBES_TEXT
++              IRQENTRY_TEXT
++              SOFTIRQENTRY_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/asoc-component-remove-misplaced-prefix-handling-in-p.patch b/queue-5.10/asoc-component-remove-misplaced-prefix-handling-in-p.patch
new file mode 100644 (file)
index 0000000..fe83826
--- /dev/null
@@ -0,0 +1,160 @@
+From 761a0eed3df6bc7e0f354df0ff0afddaca9a473b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jul 2021 20:41:23 +0100
+Subject: ASoC: component: Remove misplaced prefix handling in pin control
+ functions
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 31428c78748cafdd9352e1f622eb89bf453d9700 ]
+
+When the component level pin control functions were added they for some
+no longer obvious reason handled adding prefixing of widget names. This
+meant that when the lack of prefix handling in the DAPM level pin
+operations was fixed by ae4fc532244b3bb4d (ASoC: dapm: use component
+prefix when checking widget names) the one device using the component
+level API ended up with the prefix being applied twice, causing all
+lookups to fail.
+
+Fix this by removing the redundant prefixing from the component code,
+which has the nice side effect of also making that code much simpler.
+
+Reported-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Tested-by: Lucas Tanure <tanureal@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20210726194123.54585-1-broonie@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/soc-component.c | 63 +++++++++++++++++----------------------
+ 1 file changed, 27 insertions(+), 36 deletions(-)
+
+diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
+index 728e93f35ffb..4295c0592901 100644
+--- a/sound/soc/soc-component.c
++++ b/sound/soc/soc-component.c
+@@ -135,86 +135,75 @@ int snd_soc_component_set_bias_level(struct snd_soc_component *component,
+       return soc_component_ret(component, ret);
+ }
+-static int soc_component_pin(struct snd_soc_component *component,
+-                           const char *pin,
+-                           int (*pin_func)(struct snd_soc_dapm_context *dapm,
+-                                           const char *pin))
+-{
+-      struct snd_soc_dapm_context *dapm =
+-              snd_soc_component_get_dapm(component);
+-      char *full_name;
+-      int ret;
+-
+-      if (!component->name_prefix) {
+-              ret = pin_func(dapm, pin);
+-              goto end;
+-      }
+-
+-      full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+-      if (!full_name) {
+-              ret = -ENOMEM;
+-              goto end;
+-      }
+-
+-      ret = pin_func(dapm, full_name);
+-      kfree(full_name);
+-end:
+-      return soc_component_ret(component, ret);
+-}
+-
+ int snd_soc_component_enable_pin(struct snd_soc_component *component,
+                                const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_enable_pin);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_enable_pin(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
+ int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
+                                         const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_enable_pin_unlocked);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
+ int snd_soc_component_disable_pin(struct snd_soc_component *component,
+                                 const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_disable_pin);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_disable_pin(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
+ int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
+                                          const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_disable_pin_unlocked);
++      struct snd_soc_dapm_context *dapm = 
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
+ int snd_soc_component_nc_pin(struct snd_soc_component *component,
+                            const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_nc_pin);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_nc_pin(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
+ int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
+                                     const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_nc_pin_unlocked);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
+ int snd_soc_component_get_pin_status(struct snd_soc_component *component,
+                                    const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_get_pin_status);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_get_pin_status(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
+ int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
+                                      const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_force_enable_pin(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
+@@ -222,7 +211,9 @@ int snd_soc_component_force_enable_pin_unlocked(
+       struct snd_soc_component *component,
+       const char *pin)
+ {
+-      return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin_unlocked);
++      struct snd_soc_dapm_context *dapm =
++              snd_soc_component_get_dapm(component);
++      return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
+-- 
+2.30.2
+
diff --git a/queue-5.10/asoc-rt5682-adjust-headset-volume-button-threshold.patch b/queue-5.10/asoc-rt5682-adjust-headset-volume-button-threshold.patch
new file mode 100644 (file)
index 0000000..5987426
--- /dev/null
@@ -0,0 +1,35 @@
+From 0969a2c3fb2bcc0c989009283da18a1a95b86387 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jul 2021 21:31:21 +0800
+Subject: ASoC: rt5682: Adjust headset volume button threshold
+
+From: Derek Fang <derek.fang@realtek.com>
+
+[ Upstream commit 6d20bf7c020f417fdef1810a22da17c126603472 ]
+
+Adjust the threshold of headset button volume+ to fix
+the wrong button detection issue with some brand headsets.
+
+Signed-off-by: Derek Fang <derek.fang@realtek.com>
+Link: https://lore.kernel.org/r/20210721133121.12333-1-derek.fang@realtek.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/rt5682.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
+index 2e41b8c169e5..0486b1469799 100644
+--- a/sound/soc/codecs/rt5682.c
++++ b/sound/soc/codecs/rt5682.c
+@@ -44,6 +44,7 @@ static const struct reg_sequence patch_list[] = {
+       {RT5682_I2C_CTRL, 0x000f},
+       {RT5682_PLL2_INTERNAL, 0x8266},
+       {RT5682_SAR_IL_CMD_3, 0x8365},
++      {RT5682_SAR_IL_CMD_6, 0x0180},
+ };
+ void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
+-- 
+2.30.2
+
diff --git a/queue-5.10/blk-iocost-fix-lockdep-warning-on-blkcg-lock.patch b/queue-5.10/blk-iocost-fix-lockdep-warning-on-blkcg-lock.patch
new file mode 100644 (file)
index 0000000..09adc72
--- /dev/null
@@ -0,0 +1,71 @@
+From 3eee44afc2a36af67e5574bc215b532ceb97d903 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Aug 2021 15:06:08 +0800
+Subject: blk-iocost: fix lockdep warning on blkcg->lock
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 11431e26c9c43fa26f6b33ee1a90989f57b86024 ]
+
+blkcg->lock depends on q->queue_lock which may depend on another driver
+lock required in irq context, one example is dm-thin:
+
+       Chain exists of:
+         &pool->lock#3 --> &q->queue_lock --> &blkcg->lock
+
+        Possible interrupt unsafe locking scenario:
+
+              CPU0                    CPU1
+              ----                    ----
+         lock(&blkcg->lock);
+                                      local_irq_disable();
+                                      lock(&pool->lock#3);
+                                      lock(&q->queue_lock);
+         <Interrupt>
+           lock(&pool->lock#3);
+
+Fix the issue by using spin_lock_irq(&blkcg->lock) in ioc_weight_write().
+
+Cc: Tejun Heo <tj@kernel.org>
+Reported-by: Bruno Goncalves <bgoncalv@redhat.com>
+Link: https://lore.kernel.org/linux-block/CA+QYu4rzz6079ighEanS3Qq_Dmnczcf45ZoJoHKVLVATTo1e4Q@mail.gmail.com/T/#u
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Link: https://lore.kernel.org/r/20210803070608.1766400-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-iocost.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index b7d8a954d99c..e95b93f72bd5 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -3039,19 +3039,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
+               if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
+                       return -EINVAL;
+-              spin_lock(&blkcg->lock);
++              spin_lock_irq(&blkcg->lock);
+               iocc->dfl_weight = v * WEIGHT_ONE;
+               hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+                       struct ioc_gq *iocg = blkg_to_iocg(blkg);
+                       if (iocg) {
+-                              spin_lock_irq(&iocg->ioc->lock);
++                              spin_lock(&iocg->ioc->lock);
+                               ioc_now(iocg->ioc, &now);
+                               weight_updated(iocg, &now);
+-                              spin_unlock_irq(&iocg->ioc->lock);
++                              spin_unlock(&iocg->ioc->lock);
+                       }
+               }
+-              spin_unlock(&blkcg->lock);
++              spin_unlock_irq(&blkcg->lock);
+               return nbytes;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.10/bpf-fix-null-pointer-dereference-in-bpf_get_local_st.patch b/queue-5.10/bpf-fix-null-pointer-dereference-in-bpf_get_local_st.patch
new file mode 100644 (file)
index 0000000..b9c42e0
--- /dev/null
@@ -0,0 +1,265 @@
+From e0428f6128496efd05da5808aec6b13f23768f15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Aug 2021 10:36:46 -0700
+Subject: bpf: Fix NULL pointer dereference in bpf_get_local_storage() helper
+
+From: Yonghong Song <yhs@fb.com>
+
+commit b910eaaaa4b89976ef02e5d6448f3f73dc671d91 upstream.
+
+Jiri Olsa reported a bug ([1]) in kernel where cgroup local
+storage pointer may be NULL in bpf_get_local_storage() helper.
+There are two issues uncovered by this bug:
+  (1). kprobe or tracepoint prog incorrectly sets cgroup local storage
+       before prog run,
+  (2). due to change from preempt_disable to migrate_disable,
+       preemption is possible and percpu storage might be overwritten
+       by other tasks.
+
+This issue (1) is fixed in [2]. This patch tried to address issue (2).
+The following shows how things can go wrong:
+  task 1:   bpf_cgroup_storage_set() for percpu local storage
+         preemption happens
+  task 2:   bpf_cgroup_storage_set() for percpu local storage
+         preemption happens
+  task 1:   run bpf program
+
+task 1 will effectively use the percpu local storage setting by task 2
+which will be either NULL or incorrect ones.
+
+Instead of just one common local storage per cpu, this patch fixed
+the issue by permitting 8 local storages per cpu and each local
+storage is identified by a task_struct pointer. This way, we
+allow at most 8 nested preemption between bpf_cgroup_storage_set()
+and bpf_cgroup_storage_unset(). The percpu local storage slot
+is released (calling bpf_cgroup_storage_unset()) by the same task
+after bpf program finished running.
+bpf_test_run() is also fixed to use the new bpf_cgroup_storage_set()
+interface.
+
+The patch is tested on top of [2] with reproducer in [1].
+Without this patch, kernel will emit error in 2-3 minutes.
+With this patch, after one hour, still no error.
+
+ [1] https://lore.kernel.org/bpf/CAKH8qBuXCfUz=w8L+Fj74OaUpbosO29niYwTki7e3Ag044_aww@mail.gmail.com/T
+ [2] https://lore.kernel.org/bpf/20210309185028.3763817-1-yhs@fb.com
+
+Signed-off-by: Yonghong Song <yhs@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Roman Gushchin <guro@fb.com>
+Link: https://lore.kernel.org/bpf/20210323055146.3334476-1-yhs@fb.com
+Cc: <stable@vger.kernel.org> # 5.10.x
+Signed-off-by: Stanislav Fomichev <sdf@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf-cgroup.h | 57 ++++++++++++++++++++++++++++++++------
+ include/linux/bpf.h        | 15 +++++++---
+ kernel/bpf/helpers.c       | 15 +++++++---
+ kernel/bpf/local_storage.c |  5 ++--
+ net/bpf/test_run.c         |  6 +++-
+ 5 files changed, 79 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index ed71bd1a0825..53f14e8827cc 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -20,14 +20,25 @@ struct bpf_sock_ops_kern;
+ struct bpf_cgroup_storage;
+ struct ctl_table;
+ struct ctl_table_header;
++struct task_struct;
+ #ifdef CONFIG_CGROUP_BPF
+ extern struct static_key_false cgroup_bpf_enabled_key;
+ #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+-DECLARE_PER_CPU(struct bpf_cgroup_storage*,
+-              bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
++#define BPF_CGROUP_STORAGE_NEST_MAX   8
++
++struct bpf_cgroup_storage_info {
++      struct task_struct *task;
++      struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
++};
++
++/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
++ * to use bpf cgroup storage simultaneously.
++ */
++DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
++              bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
+ #define for_each_cgroup_storage_type(stype) \
+       for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+@@ -156,13 +167,42 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
+       return BPF_CGROUP_STORAGE_SHARED;
+ }
+-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
+-                                        *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
++static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
++                                       *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
+ {
+       enum bpf_cgroup_storage_type stype;
++      int i, err = 0;
++
++      preempt_disable();
++      for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
++              if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
++                      continue;
++
++              this_cpu_write(bpf_cgroup_storage_info[i].task, current);
++              for_each_cgroup_storage_type(stype)
++                      this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
++                                     storage[stype]);
++              goto out;
++      }
++      err = -EBUSY;
++      WARN_ON_ONCE(1);
++
++out:
++      preempt_enable();
++      return err;
++}
++
++static inline void bpf_cgroup_storage_unset(void)
++{
++      int i;
++
++      for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
++              if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
++                      continue;
+-      for_each_cgroup_storage_type(stype)
+-              this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
++              this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
++              return;
++      }
+ }
+ struct bpf_cgroup_storage *
+@@ -410,8 +450,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+       return -EINVAL;
+ }
+-static inline void bpf_cgroup_storage_set(
+-      struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
++static inline int bpf_cgroup_storage_set(
++      struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
++static inline void bpf_cgroup_storage_unset(void) {}
+ static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
+                                           struct bpf_map *map) { return 0; }
+ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index c3ccb242d199..3f93a50c25ef 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1089,9 +1089,14 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
+                       goto _out;                      \
+               _item = &_array->items[0];              \
+               while ((_prog = READ_ONCE(_item->prog))) {              \
+-                      if (set_cg_storage)             \
+-                              bpf_cgroup_storage_set(_item->cgroup_storage);  \
+-                      _ret &= func(_prog, ctx);       \
++                      if (!set_cg_storage) {                  \
++                              _ret &= func(_prog, ctx);       \
++                      } else {                                \
++                              if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage)))    \
++                                      break;                  \
++                              _ret &= func(_prog, ctx);       \
++                              bpf_cgroup_storage_unset();     \
++                      }                               \
+                       _item++;                        \
+               }                                       \
+ _out:                                                 \
+@@ -1135,8 +1140,10 @@ _out:                                                   \
+               _array = rcu_dereference(array);        \
+               _item = &_array->items[0];              \
+               while ((_prog = READ_ONCE(_item->prog))) {              \
+-                      bpf_cgroup_storage_set(_item->cgroup_storage);  \
++                      if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage)))    \
++                              break;                  \
+                       ret = func(_prog, ctx);         \
++                      bpf_cgroup_storage_unset();     \
+                       _ret &= (ret & 1);              \
+                       _cn |= (ret & 2);               \
+                       _item++;                        \
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index f7e99bb8c3b6..3bd7fbd8c543 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -372,8 +372,8 @@ const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
+ };
+ #ifdef CONFIG_CGROUP_BPF
+-DECLARE_PER_CPU(struct bpf_cgroup_storage*,
+-              bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
++DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
++              bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
+ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
+ {
+@@ -382,10 +382,17 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
+        * verifier checks that its value is correct.
+        */
+       enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
+-      struct bpf_cgroup_storage *storage;
++      struct bpf_cgroup_storage *storage = NULL;
+       void *ptr;
++      int i;
+-      storage = this_cpu_read(bpf_cgroup_storage[stype]);
++      for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
++              if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
++                      continue;
++
++              storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
++              break;
++      }
+       if (stype == BPF_CGROUP_STORAGE_SHARED)
+               ptr = &READ_ONCE(storage->buf)->data[0];
+diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
+index 571bb351ed3b..b139247d2dd3 100644
+--- a/kernel/bpf/local_storage.c
++++ b/kernel/bpf/local_storage.c
+@@ -9,10 +9,11 @@
+ #include <linux/slab.h>
+ #include <uapi/linux/btf.h>
+-DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+-
+ #ifdef CONFIG_CGROUP_BPF
++DEFINE_PER_CPU(struct bpf_cgroup_storage_info,
++             bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
++
+ #include "../cgroup/cgroup-internal.h"
+ #define LOCAL_STORAGE_CREATE_FLAG_MASK                                        \
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index e7cbd1b4a5e5..72d424a5a142 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -42,13 +42,17 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+       migrate_disable();
+       time_start = ktime_get_ns();
+       for (i = 0; i < repeat; i++) {
+-              bpf_cgroup_storage_set(storage);
++              ret = bpf_cgroup_storage_set(storage);
++              if (ret)
++                      break;
+               if (xdp)
+                       *retval = bpf_prog_run_xdp(prog, ctx);
+               else
+                       *retval = BPF_PROG_RUN(prog, ctx);
++              bpf_cgroup_storage_unset();
++
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+-- 
+2.30.2
+
diff --git a/queue-5.10/net-mscc-fix-non-gpl-export-of-regmap-apis.patch b/queue-5.10/net-mscc-fix-non-gpl-export-of-regmap-apis.patch
new file mode 100644 (file)
index 0000000..6943283
--- /dev/null
@@ -0,0 +1,99 @@
+From b8f8d05b64609c7d6cbc17d402691eb7e615274b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Aug 2021 13:37:48 +0100
+Subject: net: mscc: Fix non-GPL export of regmap APIs
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 48c812e0327744b4965296f65c23fe2405692afc ]
+
+The ocelot driver makes use of regmap, wrapping it with driver specific
+operations that are thin wrappers around the core regmap APIs. These are
+exported with EXPORT_SYMBOL, dropping the _GPL from the core regmap
+exports which is frowned upon. Add _GPL suffixes to at least the APIs that
+are doing register I/O.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Acked-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mscc/ocelot_io.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
+index ea4e83410fe4..7390fa3980ec 100644
+--- a/drivers/net/ethernet/mscc/ocelot_io.c
++++ b/drivers/net/ethernet/mscc/ocelot_io.c
+@@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
+                   ocelot->map[target][reg & REG_MASK] + offset, &val);
+       return val;
+ }
+-EXPORT_SYMBOL(__ocelot_read_ix);
++EXPORT_SYMBOL_GPL(__ocelot_read_ix);
+ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
+ {
+@@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
+       regmap_write(ocelot->targets[target],
+                    ocelot->map[target][reg & REG_MASK] + offset, val);
+ }
+-EXPORT_SYMBOL(__ocelot_write_ix);
++EXPORT_SYMBOL_GPL(__ocelot_write_ix);
+ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+                    u32 offset)
+@@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+                          ocelot->map[target][reg & REG_MASK] + offset,
+                          mask, val);
+ }
+-EXPORT_SYMBOL(__ocelot_rmw_ix);
++EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
+ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
+ {
+@@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
+       regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
+       return val;
+ }
+-EXPORT_SYMBOL(ocelot_port_readl);
++EXPORT_SYMBOL_GPL(ocelot_port_readl);
+ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
+ {
+@@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
+       regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
+ }
+-EXPORT_SYMBOL(ocelot_port_writel);
++EXPORT_SYMBOL_GPL(ocelot_port_writel);
+ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+ {
+@@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+       ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+ }
+-EXPORT_SYMBOL(ocelot_port_rmwl);
++EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
+ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+                           u32 reg, u32 offset)
+@@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
+       return 0;
+ }
+-EXPORT_SYMBOL(ocelot_regfields_init);
++EXPORT_SYMBOL_GPL(ocelot_regfields_init);
+ static struct regmap_config ocelot_regmap_config = {
+       .reg_bits       = 32,
+@@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
+       return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
+ }
+-EXPORT_SYMBOL(ocelot_regmap_init);
++EXPORT_SYMBOL_GPL(ocelot_regmap_init);
+-- 
+2.30.2
+
diff --git a/queue-5.10/netfilter-conntrack-collect-all-entries-in-one-cycle.patch b/queue-5.10/netfilter-conntrack-collect-all-entries-in-one-cycle.patch
new file mode 100644 (file)
index 0000000..aa0cbd7
--- /dev/null
@@ -0,0 +1,184 @@
+From 9f216faeb369c888b39d41ec76c2b16836810432 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Jul 2021 00:29:19 +0200
+Subject: netfilter: conntrack: collect all entries in one cycle
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 4608fdfc07e116f9fc0895beb40abad7cdb5ee3d ]
+
+Michal Kubecek reports that conntrack gc is responsible for frequent
+wakeups (every 125ms) on idle systems.
+
+On busy systems, timed out entries are evicted during lookup.
+The gc worker is only needed to remove entries after system becomes idle
+after a busy period.
+
+To resolve this, always scan the entire table.
+If the scan is taking too long, reschedule so other work_structs can run
+and resume from next bucket.
+
+After a completed scan, wait for 2 minutes before the next cycle.
+Heuristics for faster re-schedule are removed.
+
+GC_SCAN_INTERVAL could be exposed as a sysctl in the future to allow
+tuning this as-needed or even turn the gc worker off.
+
+Reported-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_core.c | 71 ++++++++++---------------------
+ 1 file changed, 22 insertions(+), 49 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index f9f2af26ccb3..54430a34d2f6 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+ struct conntrack_gc_work {
+       struct delayed_work     dwork;
+-      u32                     last_bucket;
++      u32                     next_bucket;
+       bool                    exiting;
+       bool                    early_drop;
+-      long                    next_gc_run;
+ };
+ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
+ static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
+ static __read_mostly bool nf_conntrack_locks_all;
+-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
+-#define GC_MAX_BUCKETS_DIV    128u
+-/* upper bound of full table scan */
+-#define GC_MAX_SCAN_JIFFIES   (16u * HZ)
+-/* desired ratio of entries found to be expired */
+-#define GC_EVICT_RATIO        50u
++#define GC_SCAN_INTERVAL      (120u * HZ)
++#define GC_SCAN_MAX_DURATION  msecs_to_jiffies(10)
+ static struct conntrack_gc_work conntrack_gc_work;
+@@ -1352,17 +1347,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
+ static void gc_worker(struct work_struct *work)
+ {
+-      unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
+-      unsigned int i, goal, buckets = 0, expired_count = 0;
+-      unsigned int nf_conntrack_max95 = 0;
++      unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
++      unsigned int i, hashsz, nf_conntrack_max95 = 0;
++      unsigned long next_run = GC_SCAN_INTERVAL;
+       struct conntrack_gc_work *gc_work;
+-      unsigned int ratio, scanned = 0;
+-      unsigned long next_run;
+-
+       gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+-      goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
+-      i = gc_work->last_bucket;
++      i = gc_work->next_bucket;
+       if (gc_work->early_drop)
+               nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
+@@ -1370,22 +1361,21 @@ static void gc_worker(struct work_struct *work)
+               struct nf_conntrack_tuple_hash *h;
+               struct hlist_nulls_head *ct_hash;
+               struct hlist_nulls_node *n;
+-              unsigned int hashsz;
+               struct nf_conn *tmp;
+-              i++;
+               rcu_read_lock();
+               nf_conntrack_get_ht(&ct_hash, &hashsz);
+-              if (i >= hashsz)
+-                      i = 0;
++              if (i >= hashsz) {
++                      rcu_read_unlock();
++                      break;
++              }
+               hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+                       struct net *net;
+                       tmp = nf_ct_tuplehash_to_ctrack(h);
+-                      scanned++;
+                       if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+                               nf_ct_offload_timeout(tmp);
+                               continue;
+@@ -1393,7 +1383,6 @@ static void gc_worker(struct work_struct *work)
+                       if (nf_ct_is_expired(tmp)) {
+                               nf_ct_gc_expired(tmp);
+-                              expired_count++;
+                               continue;
+                       }
+@@ -1425,7 +1414,14 @@ static void gc_worker(struct work_struct *work)
+                */
+               rcu_read_unlock();
+               cond_resched();
+-      } while (++buckets < goal);
++              i++;
++
++              if (time_after(jiffies, end_time) && i < hashsz) {
++                      gc_work->next_bucket = i;
++                      next_run = 0;
++                      break;
++              }
++      } while (i < hashsz);
+       if (gc_work->exiting)
+               return;
+@@ -1436,40 +1432,17 @@ static void gc_worker(struct work_struct *work)
+        *
+        * This worker is only here to reap expired entries when system went
+        * idle after a busy period.
+-       *
+-       * The heuristics below are supposed to balance conflicting goals:
+-       *
+-       * 1. Minimize time until we notice a stale entry
+-       * 2. Maximize scan intervals to not waste cycles
+-       *
+-       * Normally, expire ratio will be close to 0.
+-       *
+-       * As soon as a sizeable fraction of the entries have expired
+-       * increase scan frequency.
+        */
+-      ratio = scanned ? expired_count * 100 / scanned : 0;
+-      if (ratio > GC_EVICT_RATIO) {
+-              gc_work->next_gc_run = min_interval;
+-      } else {
+-              unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
+-
+-              BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
+-
+-              gc_work->next_gc_run += min_interval;
+-              if (gc_work->next_gc_run > max)
+-                      gc_work->next_gc_run = max;
++      if (next_run) {
++              gc_work->early_drop = false;
++              gc_work->next_bucket = 0;
+       }
+-
+-      next_run = gc_work->next_gc_run;
+-      gc_work->last_bucket = i;
+-      gc_work->early_drop = false;
+       queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
+ }
+ static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+ {
+       INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+-      gc_work->next_gc_run = HZ;
+       gc_work->exiting = false;
+ }
+-- 
+2.30.2
+
diff --git a/queue-5.10/once-fix-panic-when-module-unload.patch b/queue-5.10/once-fix-panic-when-module-unload.patch
new file mode 100644 (file)
index 0000000..6f3657e
--- /dev/null
@@ -0,0 +1,123 @@
+From 4c78e91b46fe005feb2378c4d7a3b98967e58e46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Aug 2021 16:21:24 +0800
+Subject: once: Fix panic when module unload
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+[ Upstream commit 1027b96ec9d34f9abab69bc1a4dc5b1ad8ab1349 ]
+
+DO_ONCE
+DEFINE_STATIC_KEY_TRUE(___once_key);
+__do_once_done
+  once_disable_jump(once_key);
+    INIT_WORK(&w->work, once_deferred);
+    struct once_work *w;
+    w->key = key;
+    schedule_work(&w->work);                     module unload
+                                                   //*the key is
+destroy*
+process_one_work
+  once_deferred
+    BUG_ON(!static_key_enabled(work->key));
+       static_key_count((struct static_key *)x)    //*access key, crash*
+
+When module uses DO_ONCE mechanism, it could crash due to the above
+concurrency problem, we could reproduce it with link[1].
+
+Fix it by add/put module refcount in the once work process.
+
+[1] https://lore.kernel.org/netdev/eaa6c371-465e-57eb-6be9-f4b16b9d7cbf@huawei.com/
+
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Eric Dumazet <edumazet@google.com>
+Reported-by: Minmin chen <chenmingmin@huawei.com>
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/once.h |  4 ++--
+ lib/once.c           | 11 ++++++++---
+ 2 files changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/once.h b/include/linux/once.h
+index 9225ee6d96c7..ae6f4eb41cbe 100644
+--- a/include/linux/once.h
++++ b/include/linux/once.h
+@@ -7,7 +7,7 @@
+ bool __do_once_start(bool *done, unsigned long *flags);
+ void __do_once_done(bool *done, struct static_key_true *once_key,
+-                  unsigned long *flags);
++                  unsigned long *flags, struct module *mod);
+ /* Call a function exactly once. The idea of DO_ONCE() is to perform
+  * a function call such as initialization of random seeds, etc, only
+@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
+                       if (unlikely(___ret)) {                              \
+                               func(__VA_ARGS__);                           \
+                               __do_once_done(&___done, &___once_key,       \
+-                                             &___flags);                   \
++                                             &___flags, THIS_MODULE);      \
+                       }                                                    \
+               }                                                            \
+               ___ret;                                                      \
+diff --git a/lib/once.c b/lib/once.c
+index 8b7d6235217e..59149bf3bfb4 100644
+--- a/lib/once.c
++++ b/lib/once.c
+@@ -3,10 +3,12 @@
+ #include <linux/spinlock.h>
+ #include <linux/once.h>
+ #include <linux/random.h>
++#include <linux/module.h>
+ struct once_work {
+       struct work_struct work;
+       struct static_key_true *key;
++      struct module *module;
+ };
+ static void once_deferred(struct work_struct *w)
+@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
+       work = container_of(w, struct once_work, work);
+       BUG_ON(!static_key_enabled(work->key));
+       static_branch_disable(work->key);
++      module_put(work->module);
+       kfree(work);
+ }
+-static void once_disable_jump(struct static_key_true *key)
++static void once_disable_jump(struct static_key_true *key, struct module *mod)
+ {
+       struct once_work *w;
+@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
+       INIT_WORK(&w->work, once_deferred);
+       w->key = key;
++      w->module = mod;
++      __module_get(mod);
+       schedule_work(&w->work);
+ }
+@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
+ EXPORT_SYMBOL(__do_once_start);
+ void __do_once_done(bool *done, struct static_key_true *once_key,
+-                  unsigned long *flags)
++                  unsigned long *flags, struct module *mod)
+       __releases(once_lock)
+ {
+       *done = true;
+       spin_unlock_irqrestore(&once_lock, *flags);
+-      once_disable_jump(once_key);
++      once_disable_jump(once_key, mod);
+ }
+ EXPORT_SYMBOL(__do_once_done);
+-- 
+2.30.2
+
diff --git a/queue-5.10/ovl-fix-uninitialized-pointer-read-in-ovl_lookup_rea.patch b/queue-5.10/ovl-fix-uninitialized-pointer-read-in-ovl_lookup_rea.patch
new file mode 100644 (file)
index 0000000..6edf03d
--- /dev/null
@@ -0,0 +1,45 @@
+From 0d114f066222e96c7c60ac4635852e9144ab81ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Aug 2021 10:03:12 +0200
+Subject: ovl: fix uninitialized pointer read in ovl_lookup_real_one()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+[ Upstream commit 580c610429b3994e8db24418927747cf28443cde ]
+
+One error path can result in release_dentry_name_snapshot() being called
+before "name" was initialized by take_dentry_name_snapshot().
+
+Fix by moving the release_dentry_name_snapshot() to immediately after the
+only use.
+
+Reported-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/overlayfs/export.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index ed35be3fafc6..f469982dcb36 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -390,6 +390,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
+        */
+       take_dentry_name_snapshot(&name, real);
+       this = lookup_one_len(name.name.name, connected, name.name.len);
++      release_dentry_name_snapshot(&name);
+       err = PTR_ERR(this);
+       if (IS_ERR(this)) {
+               goto fail;
+@@ -404,7 +405,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
+       }
+ out:
+-      release_dentry_name_snapshot(&name);
+       dput(parent);
+       inode_unlock(dir);
+       return this;
+-- 
+2.30.2
+
index 737806f2afb2e8fff5d3719aae83494135df8232..a0df97279685367d099b6d5b637fabecce41fe4e 100644 (file)
@@ -1,2 +1,11 @@
 net-qrtr-fix-another-oob-read-in-qrtr_endpoint_post.patch
 bpf-fix-ringbuf-helper-function-compatibility.patch
+bpf-fix-null-pointer-dereference-in-bpf_get_local_st.patch
+asoc-rt5682-adjust-headset-volume-button-threshold.patch
+asoc-component-remove-misplaced-prefix-handling-in-p.patch
+arc-fix-config_stackdepot.patch
+netfilter-conntrack-collect-all-entries-in-one-cycle.patch
+once-fix-panic-when-module-unload.patch
+blk-iocost-fix-lockdep-warning-on-blkcg-lock.patch
+ovl-fix-uninitialized-pointer-read-in-ovl_lookup_rea.patch
+net-mscc-fix-non-gpl-export-of-regmap-apis.patch