]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Sat, 26 Aug 2023 13:50:17 +0000 (09:50 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 26 Aug 2023 13:50:17 +0000 (09:50 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
37 files changed:
queue-5.4/alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch [new file with mode: 0644]
queue-5.4/alsa-pcm-set-per-card-upper-limit-of-pcm-buffer-allo.patch [new file with mode: 0644]
queue-5.4/alsa-pcm-use-sg-buffer-only-when-direct-dma-is-avail.patch [new file with mode: 0644]
queue-5.4/asoc-fsl_sai-add-new-added-registers-and-new-bit-def.patch [new file with mode: 0644]
queue-5.4/asoc-fsl_sai-disable-bit-clock-with-transmitter.patch [new file with mode: 0644]
queue-5.4/asoc-fsl_sai-refine-enable-disable-te-re-sequence-in.patch [new file with mode: 0644]
queue-5.4/bonding-fix-macvlan-over-alb-bond-support.patch [new file with mode: 0644]
queue-5.4/dccp-annotate-data-races-in-dccp_poll.patch [new file with mode: 0644]
queue-5.4/dlm-improve-plock-logging-if-interrupted.patch [new file with mode: 0644]
queue-5.4/dlm-replace-usage-of-found-with-dedicated-list-itera.patch [new file with mode: 0644]
queue-5.4/dm-integrity-increase-recalc_sectors-to-improve-reca.patch [new file with mode: 0644]
queue-5.4/dm-integrity-reduce-vmalloc-space-footprint-on-32-bi.patch [new file with mode: 0644]
queue-5.4/drm-amd-display-check-tg-is-non-null-before-checking.patch [new file with mode: 0644]
queue-5.4/drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch [new file with mode: 0644]
queue-5.4/fbdev-fix-potential-oob-read-in-fast_imageblit.patch [new file with mode: 0644]
queue-5.4/fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch [new file with mode: 0644]
queue-5.4/fbdev-improve-performance-of-sys_imageblit.patch [new file with mode: 0644]
queue-5.4/fs-dlm-add-pid-to-debug-log.patch [new file with mode: 0644]
queue-5.4/fs-dlm-change-plock-interrupted-message-to-debug-aga.patch [new file with mode: 0644]
queue-5.4/fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch [new file with mode: 0644]
queue-5.4/fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch [new file with mode: 0644]
queue-5.4/igb-avoid-starting-unnecessary-workqueues.patch [new file with mode: 0644]
queue-5.4/ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch [new file with mode: 0644]
queue-5.4/mips-cpu-features-enable-octeon_cache-by-cpu_type.patch [new file with mode: 0644]
queue-5.4/mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch [new file with mode: 0644]
queue-5.4/net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch [new file with mode: 0644]
queue-5.4/net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch [new file with mode: 0644]
queue-5.4/net-remove-bond_slave_has_mac_rcu.patch [new file with mode: 0644]
queue-5.4/net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch [new file with mode: 0644]
queue-5.4/net-validate-veth-and-vxcan-peer-ifindexes.patch [new file with mode: 0644]
queue-5.4/octeontx2-af-sdp-fix-receive-link-config.patch [new file with mode: 0644]
queue-5.4/pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch [new file with mode: 0644]
queue-5.4/regmap-account-for-register-length-in-smbus-i-o-limi.patch [new file with mode: 0644]
queue-5.4/regmap-i2c-add-16-bit-width-registers-support.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/sock-annotate-data-races-around-prot-memory_pressure.patch [new file with mode: 0644]
queue-5.4/tracing-fix-memleak-due-to-race-between-current_trac.patch [new file with mode: 0644]

diff --git a/queue-5.4/alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch b/queue-5.4/alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch
new file mode 100644 (file)
index 0000000..2a90fd6
--- /dev/null
@@ -0,0 +1,115 @@
+From 1e1e845af20e7af462dac1f6ca891de24b59c6d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jul 2023 13:24:30 +0200
+Subject: ALSA: pcm: Fix potential data race at PCM memory allocation helpers
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit bd55842ed998a622ba6611fe59b3358c9f76773d ]
+
+The PCM memory allocation helpers have a sanity check against too many
+buffer allocations.  However, the check is performed without a proper
+lock and the allocation isn't serialized; this allows user to allocate
+more memories than predefined max size.
+
+Practically seen, this isn't really a big problem, as it's more or
+less some "soft limit" as a sanity check, and it's not possible to
+allocate unlimitedly.  But it's still better to address this for more
+consistent behavior.
+
+The patch covers the size check in do_alloc_pages() with the
+card->memory_mutex, and increases the allocated size there for
+preventing the further overflow.  When the actual allocation fails,
+the size is decreased accordingly.
+
+Reported-by: BassCheck <bass@buaa.edu.cn>
+Reported-by: Tuo Li <islituo@gmail.com>
+Link: https://lore.kernel.org/r/CADm8Tek6t0WedK+3Y6rbE5YEt19tML8BUL45N2ji4ZAz1KcN_A@mail.gmail.com
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230703112430.30634-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/pcm_memory.c | 44 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 36 insertions(+), 8 deletions(-)
+
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index 97b471d7b32e5..beca39f7c8f35 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -31,14 +31,40 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
+ module_param(max_alloc_per_card, ulong, 0644);
+ MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
++static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++      card->total_pcm_alloc_bytes += bytes;
++}
++
++static void update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++      mutex_lock(&card->memory_mutex);
++      __update_allocated_size(card, bytes);
++      mutex_unlock(&card->memory_mutex);
++}
++
++static void decrease_allocated_size(struct snd_card *card, size_t bytes)
++{
++      mutex_lock(&card->memory_mutex);
++      WARN_ON(card->total_pcm_alloc_bytes < bytes);
++      __update_allocated_size(card, -(ssize_t)bytes);
++      mutex_unlock(&card->memory_mutex);
++}
++
+ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+                         size_t size, struct snd_dma_buffer *dmab)
+ {
+       int err;
++      /* check and reserve the requested size */
++      mutex_lock(&card->memory_mutex);
+       if (max_alloc_per_card &&
+-          card->total_pcm_alloc_bytes + size > max_alloc_per_card)
++          card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
++              mutex_unlock(&card->memory_mutex);
+               return -ENOMEM;
++      }
++      __update_allocated_size(card, size);
++      mutex_unlock(&card->memory_mutex);
+       if (IS_ENABLED(CONFIG_SND_DMA_SGBUF) &&
+           (type == SNDRV_DMA_TYPE_DEV_SG || type == SNDRV_DMA_TYPE_DEV_UC_SG) &&
+@@ -53,9 +79,14 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+       err = snd_dma_alloc_pages(type, dev, size, dmab);
+       if (!err) {
+-              mutex_lock(&card->memory_mutex);
+-              card->total_pcm_alloc_bytes += dmab->bytes;
+-              mutex_unlock(&card->memory_mutex);
++              /* the actual allocation size might be bigger than requested,
++               * and we need to correct the account
++               */
++              if (dmab->bytes != size)
++                      update_allocated_size(card, dmab->bytes - size);
++      } else {
++              /* take back on allocation failure */
++              decrease_allocated_size(card, size);
+       }
+       return err;
+ }
+@@ -64,10 +95,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
+ {
+       if (!dmab->area)
+               return;
+-      mutex_lock(&card->memory_mutex);
+-      WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
+-      card->total_pcm_alloc_bytes -= dmab->bytes;
+-      mutex_unlock(&card->memory_mutex);
++      decrease_allocated_size(card, dmab->bytes);
+       snd_dma_free_pages(dmab);
+       dmab->area = NULL;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.4/alsa-pcm-set-per-card-upper-limit-of-pcm-buffer-allo.patch b/queue-5.4/alsa-pcm-set-per-card-upper-limit-of-pcm-buffer-allo.patch
new file mode 100644 (file)
index 0000000..8797bcb
--- /dev/null
@@ -0,0 +1,218 @@
+From 9e7c0dcb97477a582cdc732a15cf065dba7331a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jan 2020 13:44:22 +0100
+Subject: ALSA: pcm: Set per-card upper limit of PCM buffer allocations
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit d4cfb30fce03093ad944e0b44bd8f40bdad5330e ]
+
+Currently, the available buffer allocation size for a PCM stream
+depends on the preallocated size; when a buffer has been preallocated,
+the max buffer size is set to that size, so that application won't
+re-allocate too much memory.  OTOH, when no preallocation is done,
+each substream may allocate arbitrary size of buffers as long as
+snd_pcm_hardware.buffer_bytes_max allows -- which can be quite high,
+HD-audio sets 1GB there.
+
+It means that the system may consume a high amount of pages for PCM
+buffers, and they are pinned and never swapped out.  This can lead to
+OOM easily.
+
+For avoiding such a situation, this patch adds the upper limit per
+card.  Each snd_pcm_lib_malloc_pages() and _free_pages() calls are
+tracked and it will return an error if the total amount of buffers
+goes over the defined upper limit.  The default value is set to 32MB,
+which should be really large enough for usual operations.
+
+If larger buffers are needed for any specific usage, it can be
+adjusted (also dynamically) via snd_pcm.max_alloc_per_card option.
+Setting zero there means no chceck is performed, and again, unlimited
+amount of buffers are allowed.
+
+Link: https://lore.kernel.org/r/20200120124423.11862-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: bd55842ed998 ("ALSA: pcm: Fix potential data race at PCM memory allocation helpers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/sound/core.h    |  3 ++
+ sound/core/init.c       |  1 +
+ sound/core/pcm_memory.c | 69 ++++++++++++++++++++++++++++++-----------
+ 3 files changed, 55 insertions(+), 18 deletions(-)
+
+diff --git a/include/sound/core.h b/include/sound/core.h
+index 8a80121811d94..e4b24dcb4b190 100644
+--- a/include/sound/core.h
++++ b/include/sound/core.h
+@@ -119,6 +119,9 @@ struct snd_card {
+       bool registered;                /* card_dev is registered? */
+       wait_queue_head_t remove_sleep;
++      size_t total_pcm_alloc_bytes;   /* total amount of allocated buffers */
++      struct mutex memory_mutex;      /* protection for the above */
++
+ #ifdef CONFIG_PM
+       unsigned int power_state;       /* power state */
+       wait_queue_head_t power_sleep;
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 45bbc4884ef0f..a127763ae5fbd 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -211,6 +211,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
+       INIT_LIST_HEAD(&card->ctl_files);
+       spin_lock_init(&card->files_lock);
+       INIT_LIST_HEAD(&card->files_list);
++      mutex_init(&card->memory_mutex);
+ #ifdef CONFIG_PM
+       init_waitqueue_head(&card->power_sleep);
+ #endif
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index 9aea1d6fb0547..94bd5de01a4d0 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -26,6 +26,38 @@ MODULE_PARM_DESC(maximum_substreams, "Maximum substreams with preallocated DMA m
+ static const size_t snd_minimum_buffer = 16384;
++static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
++module_param(max_alloc_per_card, ulong, 0644);
++MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
++
++static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
++                        size_t size, struct snd_dma_buffer *dmab)
++{
++      int err;
++
++      if (max_alloc_per_card &&
++          card->total_pcm_alloc_bytes + size > max_alloc_per_card)
++              return -ENOMEM;
++      err = snd_dma_alloc_pages(type, dev, size, dmab);
++      if (!err) {
++              mutex_lock(&card->memory_mutex);
++              card->total_pcm_alloc_bytes += dmab->bytes;
++              mutex_unlock(&card->memory_mutex);
++      }
++      return err;
++}
++
++static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
++{
++      if (!dmab->area)
++              return;
++      mutex_lock(&card->memory_mutex);
++      WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
++      card->total_pcm_alloc_bytes -= dmab->bytes;
++      mutex_unlock(&card->memory_mutex);
++      snd_dma_free_pages(dmab);
++      dmab->area = NULL;
++}
+ /*
+  * try to allocate as the large pages as possible.
+@@ -36,16 +68,15 @@ static const size_t snd_minimum_buffer = 16384;
+ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t size)
+ {
+       struct snd_dma_buffer *dmab = &substream->dma_buffer;
++      struct snd_card *card = substream->pcm->card;
+       size_t orig_size = size;
+       int err;
+       do {
+-              if ((err = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev,
+-                                             size, dmab)) < 0) {
+-                      if (err != -ENOMEM)
+-                              return err; /* fatal error */
+-              } else
+-                      return 0;
++              err = do_alloc_pages(card, dmab->dev.type, dmab->dev.dev,
++                                   size, dmab);
++              if (err != -ENOMEM)
++                      return err;
+               size >>= 1;
+       } while (size >= snd_minimum_buffer);
+       dmab->bytes = 0; /* tell error */
+@@ -61,10 +92,7 @@ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t siz
+  */
+ static void snd_pcm_lib_preallocate_dma_free(struct snd_pcm_substream *substream)
+ {
+-      if (substream->dma_buffer.area == NULL)
+-              return;
+-      snd_dma_free_pages(&substream->dma_buffer);
+-      substream->dma_buffer.area = NULL;
++      do_free_pages(substream->pcm->card, &substream->dma_buffer);
+ }
+ /**
+@@ -129,6 +157,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
+                                              struct snd_info_buffer *buffer)
+ {
+       struct snd_pcm_substream *substream = entry->private_data;
++      struct snd_card *card = substream->pcm->card;
+       char line[64], str[64];
+       size_t size;
+       struct snd_dma_buffer new_dmab;
+@@ -150,9 +179,10 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
+               memset(&new_dmab, 0, sizeof(new_dmab));
+               new_dmab.dev = substream->dma_buffer.dev;
+               if (size > 0) {
+-                      if (snd_dma_alloc_pages(substream->dma_buffer.dev.type,
+-                                              substream->dma_buffer.dev.dev,
+-                                              size, &new_dmab) < 0) {
++                      if (do_alloc_pages(card,
++                                         substream->dma_buffer.dev.type,
++                                         substream->dma_buffer.dev.dev,
++                                         size, &new_dmab) < 0) {
+                               buffer->error = -ENOMEM;
+                               goto unlock;
+                       }
+@@ -161,7 +191,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
+                       substream->buffer_bytes_max = UINT_MAX;
+               }
+               if (substream->dma_buffer.area)
+-                      snd_dma_free_pages(&substream->dma_buffer);
++                      do_free_pages(card, &substream->dma_buffer);
+               substream->dma_buffer = new_dmab;
+       } else {
+               buffer->error = -EINVAL;
+@@ -289,6 +319,7 @@ EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
+  */
+ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
+ {
++      struct snd_card *card = substream->pcm->card;
+       struct snd_pcm_runtime *runtime;
+       struct snd_dma_buffer *dmab = NULL;
+@@ -317,9 +348,10 @@ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
+               if (! dmab)
+                       return -ENOMEM;
+               dmab->dev = substream->dma_buffer.dev;
+-              if (snd_dma_alloc_pages(substream->dma_buffer.dev.type,
+-                                      substream->dma_buffer.dev.dev,
+-                                      size, dmab) < 0) {
++              if (do_alloc_pages(card,
++                                 substream->dma_buffer.dev.type,
++                                 substream->dma_buffer.dev.dev,
++                                 size, dmab) < 0) {
+                       kfree(dmab);
+                       return -ENOMEM;
+               }
+@@ -340,6 +372,7 @@ EXPORT_SYMBOL(snd_pcm_lib_malloc_pages);
+  */
+ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream)
+ {
++      struct snd_card *card = substream->pcm->card;
+       struct snd_pcm_runtime *runtime;
+       if (PCM_RUNTIME_CHECK(substream))
+@@ -349,7 +382,7 @@ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream)
+               return 0;
+       if (runtime->dma_buffer_p != &substream->dma_buffer) {
+               /* it's a newly allocated buffer.  release it now. */
+-              snd_dma_free_pages(runtime->dma_buffer_p);
++              do_free_pages(card, runtime->dma_buffer_p);
+               kfree(runtime->dma_buffer_p);
+       }
+       snd_pcm_set_runtime_buffer(substream, NULL);
+-- 
+2.40.1
+
diff --git a/queue-5.4/alsa-pcm-use-sg-buffer-only-when-direct-dma-is-avail.patch b/queue-5.4/alsa-pcm-use-sg-buffer-only-when-direct-dma-is-avail.patch
new file mode 100644 (file)
index 0000000..1744473
--- /dev/null
@@ -0,0 +1,68 @@
+From e4cacb4efafe2e3f23e555b5a056811d0dfd0ab6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 18:00:45 +0200
+Subject: ALSA: pcm: Use SG-buffer only when direct DMA is available
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 3ad796cbc36a7bc8bfd4de191d791b9490bc112b ]
+
+The DMA-coherent SG-buffer is tricky to use, as it does need the
+mapping.  It used to work stably on x86 over years (and that's why we
+had enabled SG-buffer on solely x86) with the default mmap handler and
+vmap(), but our luck seems no forever success.  The chance of breakage
+is high when the special DMA handling is introduced in the arch side.
+
+In this patch, we change the buffer allocation to use the SG-buffer
+only when the device in question is with the direct DMA.  It's a bit
+hackish, but it's currently the only condition that may work (more or
+less) reliably with the default mmap and vmap() for mapping the pages
+that are deduced via virt_to_page().
+
+In theory, we can apply the similar hack in the sound/core memory
+allocation helper, too; but it's used by SOF for allocating SG pages
+without re-mapping via vmap() or mmap, and it's fine to use it in that
+way, so let's keep it and adds the workaround in PCM side.
+
+Link: https://lore.kernel.org/r/20200615160045.2703-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: bd55842ed998 ("ALSA: pcm: Fix potential data race at PCM memory allocation helpers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/pcm_memory.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index 94bd5de01a4d0..97b471d7b32e5 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -11,6 +11,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/vmalloc.h>
+ #include <linux/export.h>
++#include <linux/dma-mapping.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/info.h>
+@@ -38,6 +39,18 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+       if (max_alloc_per_card &&
+           card->total_pcm_alloc_bytes + size > max_alloc_per_card)
+               return -ENOMEM;
++
++      if (IS_ENABLED(CONFIG_SND_DMA_SGBUF) &&
++          (type == SNDRV_DMA_TYPE_DEV_SG || type == SNDRV_DMA_TYPE_DEV_UC_SG) &&
++          !dma_is_direct(get_dma_ops(dev))) {
++              /* mutate to continuous page allocation */
++              dev_dbg(dev, "Use continuous page allocator\n");
++              if (type == SNDRV_DMA_TYPE_DEV_SG)
++                      type = SNDRV_DMA_TYPE_DEV;
++              else
++                      type = SNDRV_DMA_TYPE_DEV_UC;
++      }
++
+       err = snd_dma_alloc_pages(type, dev, size, dmab);
+       if (!err) {
+               mutex_lock(&card->memory_mutex);
+-- 
+2.40.1
+
diff --git a/queue-5.4/asoc-fsl_sai-add-new-added-registers-and-new-bit-def.patch b/queue-5.4/asoc-fsl_sai-add-new-added-registers-and-new-bit-def.patch
new file mode 100644 (file)
index 0000000..16117c6
--- /dev/null
@@ -0,0 +1,202 @@
+From 986abc452fd5412c068820f28bbf9ddd51318790 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Sep 2020 14:11:17 +0800
+Subject: ASoC: fsl_sai: Add new added registers and new bit definition
+
+From: Shengjiu Wang <shengjiu.wang@nxp.com>
+
+[ Upstream commit 0b2cbce6898600aae5e87285f1c2000162d59c76 ]
+
+On i.MX8MQ/i.MX8MN/i.MX8MM platform, the sai IP is upgraded.
+There are some new registers and new bit definition. This
+patch is to complete the register list.
+
+Signed-off-by: Shengjiu Wang <shengjiu.wang@nxp.com>
+Acked-by: Nicolin Chen <nicoleotsuka@gmail.com>
+Reviewed-by: Fabio Estevam <festevam@gmail.com>
+Link: https://lore.kernel.org/r/1600323079-5317-2-git-send-email-shengjiu.wang@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 269f399dc19f ("ASoC: fsl_sai: Disable bit clock with transmitter")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_sai.c | 23 ++++++++++++++++
+ sound/soc/fsl/fsl_sai.h | 59 +++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 82 insertions(+)
+
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 23f0b5ee000c3..ebca0778d3f57 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -810,6 +810,8 @@ static struct reg_default fsl_sai_reg_defaults_ofs8[] = {
+       {FSL_SAI_RCR4(8), 0},
+       {FSL_SAI_RCR5(8), 0},
+       {FSL_SAI_RMR, 0},
++      {FSL_SAI_MCTL, 0},
++      {FSL_SAI_MDIV, 0},
+ };
+ static bool fsl_sai_readable_reg(struct device *dev, unsigned int reg)
+@@ -850,6 +852,18 @@ static bool fsl_sai_readable_reg(struct device *dev, unsigned int reg)
+       case FSL_SAI_RFR6:
+       case FSL_SAI_RFR7:
+       case FSL_SAI_RMR:
++      case FSL_SAI_MCTL:
++      case FSL_SAI_MDIV:
++      case FSL_SAI_VERID:
++      case FSL_SAI_PARAM:
++      case FSL_SAI_TTCTN:
++      case FSL_SAI_RTCTN:
++      case FSL_SAI_TTCTL:
++      case FSL_SAI_TBCTN:
++      case FSL_SAI_TTCAP:
++      case FSL_SAI_RTCTL:
++      case FSL_SAI_RBCTN:
++      case FSL_SAI_RTCAP:
+               return true;
+       default:
+               return false;
+@@ -864,6 +878,10 @@ static bool fsl_sai_volatile_reg(struct device *dev, unsigned int reg)
+       if (reg == FSL_SAI_TCSR(ofs) || reg == FSL_SAI_RCSR(ofs))
+               return true;
++      /* Set VERID and PARAM be volatile for reading value in probe */
++      if (ofs == 8 && (reg == FSL_SAI_VERID || reg == FSL_SAI_PARAM))
++              return true;
++
+       switch (reg) {
+       case FSL_SAI_TFR0:
+       case FSL_SAI_TFR1:
+@@ -917,6 +935,10 @@ static bool fsl_sai_writeable_reg(struct device *dev, unsigned int reg)
+       case FSL_SAI_TDR7:
+       case FSL_SAI_TMR:
+       case FSL_SAI_RMR:
++      case FSL_SAI_MCTL:
++      case FSL_SAI_MDIV:
++      case FSL_SAI_TTCTL:
++      case FSL_SAI_RTCTL:
+               return true;
+       default:
+               return false;
+@@ -965,6 +987,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
+       if (sai->soc_data->reg_offset == 8) {
+               fsl_sai_regmap_config.reg_defaults = fsl_sai_reg_defaults_ofs8;
++              fsl_sai_regmap_config.max_register = FSL_SAI_MDIV;
+               fsl_sai_regmap_config.num_reg_defaults =
+                       ARRAY_SIZE(fsl_sai_reg_defaults_ofs8);
+       }
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index afaef20272342..156ee28077b76 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -14,6 +14,8 @@
+                        SNDRV_PCM_FMTBIT_S32_LE)
+ /* SAI Register Map Register */
++#define FSL_SAI_VERID 0x00 /* SAI Version ID Register */
++#define FSL_SAI_PARAM 0x04 /* SAI Parameter Register */
+ #define FSL_SAI_TCSR(ofs)     (0x00 + ofs) /* SAI Transmit Control */
+ #define FSL_SAI_TCR1(ofs)     (0x04 + ofs) /* SAI Transmit Configuration 1 */
+ #define FSL_SAI_TCR2(ofs)     (0x08 + ofs) /* SAI Transmit Configuration 2 */
+@@ -37,6 +39,10 @@
+ #define FSL_SAI_TFR6  0x58 /* SAI Transmit FIFO 6 */
+ #define FSL_SAI_TFR7  0x5C /* SAI Transmit FIFO 7 */
+ #define FSL_SAI_TMR   0x60 /* SAI Transmit Mask */
++#define FSL_SAI_TTCTL 0x70 /* SAI Transmit Timestamp Control Register */
++#define FSL_SAI_TTCTN 0x74 /* SAI Transmit Timestamp Counter Register */
++#define FSL_SAI_TBCTN 0x78 /* SAI Transmit Bit Counter Register */
++#define FSL_SAI_TTCAP 0x7C /* SAI Transmit Timestamp Capture */
+ #define FSL_SAI_RCSR(ofs)     (0x80 + ofs) /* SAI Receive Control */
+ #define FSL_SAI_RCR1(ofs)     (0x84 + ofs)/* SAI Receive Configuration 1 */
+ #define FSL_SAI_RCR2(ofs)     (0x88 + ofs) /* SAI Receive Configuration 2 */
+@@ -60,6 +66,13 @@
+ #define FSL_SAI_RFR6  0xd8 /* SAI Receive FIFO 6 */
+ #define FSL_SAI_RFR7  0xdc /* SAI Receive FIFO 7 */
+ #define FSL_SAI_RMR   0xe0 /* SAI Receive Mask */
++#define FSL_SAI_RTCTL 0xf0 /* SAI Receive Timestamp Control Register */
++#define FSL_SAI_RTCTN 0xf4 /* SAI Receive Timestamp Counter Register */
++#define FSL_SAI_RBCTN 0xf8 /* SAI Receive Bit Counter Register */
++#define FSL_SAI_RTCAP 0xfc /* SAI Receive Timestamp Capture */
++
++#define FSL_SAI_MCTL  0x100 /* SAI MCLK Control Register */
++#define FSL_SAI_MDIV  0x104 /* SAI MCLK Divide Register */
+ #define FSL_SAI_xCSR(tx, ofs) (tx ? FSL_SAI_TCSR(ofs) : FSL_SAI_RCSR(ofs))
+ #define FSL_SAI_xCR1(tx, ofs) (tx ? FSL_SAI_TCR1(ofs) : FSL_SAI_RCR1(ofs))
+@@ -73,6 +86,7 @@
+ /* SAI Transmit/Receive Control Register */
+ #define FSL_SAI_CSR_TERE      BIT(31)
++#define FSL_SAI_CSR_SE                BIT(30)
+ #define FSL_SAI_CSR_FR                BIT(25)
+ #define FSL_SAI_CSR_SR                BIT(24)
+ #define FSL_SAI_CSR_xF_SHIFT  16
+@@ -106,6 +120,7 @@
+ #define FSL_SAI_CR2_MSEL(ID)  ((ID) << 26)
+ #define FSL_SAI_CR2_BCP               BIT(25)
+ #define FSL_SAI_CR2_BCD_MSTR  BIT(24)
++#define FSL_SAI_CR2_BYP               BIT(23) /* BCLK bypass */
+ #define FSL_SAI_CR2_DIV_MASK  0xff
+ /* SAI Transmit and Receive Configuration 3 Register */
+@@ -115,6 +130,13 @@
+ #define FSL_SAI_CR3_WDFL_MASK 0x1f
+ /* SAI Transmit and Receive Configuration 4 Register */
++
++#define FSL_SAI_CR4_FCONT     BIT(28)
++#define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
++#define FSL_SAI_CR4_FCOMB_SOFT  BIT(27)
++#define FSL_SAI_CR4_FCOMB_MASK  (0x3 << 26)
++#define FSL_SAI_CR4_FPACK_8     (0x2 << 24)
++#define FSL_SAI_CR4_FPACK_16    (0x3 << 24)
+ #define FSL_SAI_CR4_FRSZ(x)   (((x) - 1) << 16)
+ #define FSL_SAI_CR4_FRSZ_MASK (0x1f << 16)
+ #define FSL_SAI_CR4_SYWD(x)   (((x) - 1) << 8)
+@@ -132,6 +154,43 @@
+ #define FSL_SAI_CR5_FBT(x)    ((x) << 8)
+ #define FSL_SAI_CR5_FBT_MASK  (0x1f << 8)
++/* SAI MCLK Control Register */
++#define FSL_SAI_MCTL_MCLK_EN  BIT(30) /* MCLK Enable */
++#define FSL_SAI_MCTL_MSEL_MASK        (0x3 << 24)
++#define FSL_SAI_MCTL_MSEL(ID)   ((ID) << 24)
++#define FSL_SAI_MCTL_MSEL_BUS 0
++#define FSL_SAI_MCTL_MSEL_MCLK1       BIT(24)
++#define FSL_SAI_MCTL_MSEL_MCLK2       BIT(25)
++#define FSL_SAI_MCTL_MSEL_MCLK3       (BIT(24) | BIT(25))
++#define FSL_SAI_MCTL_DIV_EN   BIT(23)
++#define FSL_SAI_MCTL_DIV_MASK 0xFF
++
++/* SAI VERID Register */
++#define FSL_SAI_VERID_MAJOR_SHIFT   24
++#define FSL_SAI_VERID_MAJOR_MASK    GENMASK(31, 24)
++#define FSL_SAI_VERID_MINOR_SHIFT   16
++#define FSL_SAI_VERID_MINOR_MASK    GENMASK(23, 16)
++#define FSL_SAI_VERID_FEATURE_SHIFT 0
++#define FSL_SAI_VERID_FEATURE_MASK  GENMASK(15, 0)
++#define FSL_SAI_VERID_EFIFO_EN            BIT(0)
++#define FSL_SAI_VERID_TSTMP_EN            BIT(1)
++
++/* SAI PARAM Register */
++#define FSL_SAI_PARAM_SPF_SHIFT           16
++#define FSL_SAI_PARAM_SPF_MASK            GENMASK(19, 16)
++#define FSL_SAI_PARAM_WPF_SHIFT           8
++#define FSL_SAI_PARAM_WPF_MASK            GENMASK(11, 8)
++#define FSL_SAI_PARAM_DLN_MASK            GENMASK(3, 0)
++
++/* SAI MCLK Divide Register */
++#define FSL_SAI_MDIV_MASK         0xFFFFF
++
++/* SAI timestamp and bitcounter */
++#define FSL_SAI_xTCTL_TSEN         BIT(0)
++#define FSL_SAI_xTCTL_TSINC        BIT(1)
++#define FSL_SAI_xTCTL_RTSC         BIT(8)
++#define FSL_SAI_xTCTL_RBC          BIT(9)
++
+ /* SAI type */
+ #define FSL_SAI_DMA           BIT(0)
+ #define FSL_SAI_USE_AC97      BIT(1)
+-- 
+2.40.1
+
diff --git a/queue-5.4/asoc-fsl_sai-disable-bit-clock-with-transmitter.patch b/queue-5.4/asoc-fsl_sai-disable-bit-clock-with-transmitter.patch
new file mode 100644 (file)
index 0000000..8530fca
--- /dev/null
@@ -0,0 +1,50 @@
+From da89b89851ba4f137c4e921905c368a4b4c88d82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jul 2023 14:49:33 +0200
+Subject: ASoC: fsl_sai: Disable bit clock with transmitter
+
+From: Matus Gajdos <matuszpd@gmail.com>
+
+[ Upstream commit 269f399dc19f0e5c51711c3ba3bd06e0ef6ef403 ]
+
+Otherwise bit clock remains running writing invalid data to the DAC.
+
+Signed-off-by: Matus Gajdos <matuszpd@gmail.com>
+Acked-by: Shengjiu Wang <shengjiu.wang@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230712124934.32232-1-matuszpd@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_sai.c | 2 +-
+ sound/soc/fsl/fsl_sai.h | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index ebca0778d3f57..fdbfaedda4ce8 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -548,7 +548,7 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+       u32 xcsr, count = 100;
+       regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+-                         FSL_SAI_CSR_TERE, 0);
++                         FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
+       /* TERE will remain set till the end of current frame */
+       do {
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 156ee28077b76..771990396804c 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -87,6 +87,7 @@
+ /* SAI Transmit/Receive Control Register */
+ #define FSL_SAI_CSR_TERE      BIT(31)
+ #define FSL_SAI_CSR_SE                BIT(30)
++#define FSL_SAI_CSR_BCE               BIT(28)
+ #define FSL_SAI_CSR_FR                BIT(25)
+ #define FSL_SAI_CSR_SR                BIT(24)
+ #define FSL_SAI_CSR_xF_SHIFT  16
+-- 
+2.40.1
+
diff --git a/queue-5.4/asoc-fsl_sai-refine-enable-disable-te-re-sequence-in.patch b/queue-5.4/asoc-fsl_sai-refine-enable-disable-te-re-sequence-in.patch
new file mode 100644 (file)
index 0000000..4356edb
--- /dev/null
@@ -0,0 +1,208 @@
+From a5b1534b9ca33e58a24e3d18f4abc6b1eaf7f0dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Aug 2020 14:34:11 +0800
+Subject: ASoC: fsl_sai: Refine enable/disable TE/RE sequence in trigger()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shengjiu Wang <shengjiu.wang@nxp.com>
+
+[ Upstream commit 94741eba63c23b0f1527b0ae0125e6b553bde10e ]
+
+Current code enables TCSR.TE and RCSR.RE together, and disable
+TCSR.TE and RCSR.RE together in trigger(), which only supports
+one operation mode:
+1. Rx synchronous with Tx: TE is last enabled and first disabled
+
+Other operation mode need to be considered also:
+2. Tx synchronous with Rx: RE is last enabled and first disabled.
+3. Asynchronous mode: Tx and Rx are independent.
+
+So the enable TCSR.TE and RCSR.RE sequence and the disable
+sequence need to be refined accordingly for #2 and #3.
+
+There is slightly against what RM recommennds with this change.
+For example in Rx synchronous with Tx mode, case "aplay 1.wav;
+arecord 2.wav" enable TE before RE. But it should be safe to
+do so, judging by years of testing results.
+
+Signed-off-by: Shengjiu Wang <shengjiu.wang@nxp.com>
+Reviewed-by: Nicolin Chen <nicoleotsuka@gmail.com>
+Link: https://lore.kernel.org/r/20200805063413.4610-2-shengjiu.wang@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 269f399dc19f ("ASoC: fsl_sai: Disable bit clock with transmitter")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_sai.c | 126 +++++++++++++++++++++++++++-------------
+ 1 file changed, 85 insertions(+), 41 deletions(-)
+
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index f8445231ad782..23f0b5ee000c3 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -37,6 +37,24 @@ static const struct snd_pcm_hw_constraint_list fsl_sai_rate_constraints = {
+       .list = fsl_sai_rates,
+ };
++/**
++ * fsl_sai_dir_is_synced - Check if stream is synced by the opposite stream
++ *
++ * SAI supports synchronous mode using bit/frame clocks of either Transmitter's
++ * or Receiver's for both streams. This function is used to check if clocks of
++ * the stream's are synced by the opposite stream.
++ *
++ * @sai: SAI context
++ * @dir: stream direction
++ */
++static inline bool fsl_sai_dir_is_synced(struct fsl_sai *sai, int dir)
++{
++      int adir = (dir == TX) ? RX : TX;
++
++      /* current dir in async mode while opposite dir in sync mode */
++      return !sai->synchronous[dir] && sai->synchronous[adir];
++}
++
+ static irqreturn_t fsl_sai_isr(int irq, void *devid)
+ {
+       struct fsl_sai *sai = (struct fsl_sai *)devid;
+@@ -523,6 +541,38 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
+       return 0;
+ }
++static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
++{
++      unsigned int ofs = sai->soc_data->reg_offset;
++      bool tx = dir == TX;
++      u32 xcsr, count = 100;
++
++      regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
++                         FSL_SAI_CSR_TERE, 0);
++
++      /* TERE will remain set till the end of current frame */
++      do {
++              udelay(10);
++              regmap_read(sai->regmap, FSL_SAI_xCSR(tx, ofs), &xcsr);
++      } while (--count && xcsr & FSL_SAI_CSR_TERE);
++
++      regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
++                         FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
++
++      /*
++       * For sai master mode, after several open/close sai,
++       * there will be no frame clock, and can't recover
++       * anymore. Add software reset to fix this issue.
++       * This is a hardware bug, and will be fix in the
++       * next sai version.
++       */
++      if (!sai->is_slave_mode) {
++              /* Software Reset */
++              regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
++              /* Clear SR bit to finish the reset */
++              regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
++      }
++}
+ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+               struct snd_soc_dai *cpu_dai)
+@@ -531,7 +581,9 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+       unsigned int ofs = sai->soc_data->reg_offset;
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+-      u32 xcsr, count = 100;
++      int adir = tx ? RX : TX;
++      int dir = tx ? TX : RX;
++      u32 xcsr;
+       /*
+        * Asynchronous mode: Clear SYNC for both Tx and Rx.
+@@ -554,10 +606,22 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+               regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+                                  FSL_SAI_CSR_FRDE, FSL_SAI_CSR_FRDE);
+-              regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
+-                                 FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
+-              regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
++              regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+                                  FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
++              /*
++               * Enable the opposite direction for synchronous mode
++               * 1. Tx sync with Rx: only set RE for Rx; set TE & RE for Tx
++               * 2. Rx sync with Tx: only set TE for Tx; set RE & TE for Rx
++               *
++               * RM recommends to enable RE after TE for case 1 and to enable
++               * TE after RE for case 2, but we here may not always guarantee
++               * that happens: "arecord 1.wav; aplay 2.wav" in case 1 enables
++               * TE after RE, which is against what RM recommends but should
++               * be safe to do, judging by years of testing results.
++               */
++              if (fsl_sai_dir_is_synced(sai, adir))
++                      regmap_update_bits(sai->regmap, FSL_SAI_xCSR((!tx), ofs),
++                                         FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
+               regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+                                  FSL_SAI_CSR_xIE_MASK, FSL_SAI_FLAGS);
+@@ -572,43 +636,23 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+               /* Check if the opposite FRDE is also disabled */
+               regmap_read(sai->regmap, FSL_SAI_xCSR(!tx, ofs), &xcsr);
+-              if (!(xcsr & FSL_SAI_CSR_FRDE)) {
+-                      /* Disable both directions and reset their FIFOs */
+-                      regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
+-                                         FSL_SAI_CSR_TERE, 0);
+-                      regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
+-                                         FSL_SAI_CSR_TERE, 0);
+-
+-                      /* TERE will remain set till the end of current frame */
+-                      do {
+-                              udelay(10);
+-                              regmap_read(sai->regmap,
+-                                          FSL_SAI_xCSR(tx, ofs), &xcsr);
+-                      } while (--count && xcsr & FSL_SAI_CSR_TERE);
+-
+-                      regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
+-                                         FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
+-                      regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
+-                                         FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
+-
+-                      /*
+-                       * For sai master mode, after several open/close sai,
+-                       * there will be no frame clock, and can't recover
+-                       * anymore. Add software reset to fix this issue.
+-                       * This is a hardware bug, and will be fix in the
+-                       * next sai version.
+-                       */
+-                      if (!sai->is_slave_mode) {
+-                              /* Software Reset for both Tx and Rx */
+-                              regmap_write(sai->regmap, FSL_SAI_TCSR(ofs),
+-                                           FSL_SAI_CSR_SR);
+-                              regmap_write(sai->regmap, FSL_SAI_RCSR(ofs),
+-                                           FSL_SAI_CSR_SR);
+-                              /* Clear SR bit to finish the reset */
+-                              regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0);
+-                              regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
+-                      }
+-              }
++
++              /*
++               * If opposite stream provides clocks for synchronous mode and
++               * it is inactive, disable it before disabling the current one
++               */
++              if (fsl_sai_dir_is_synced(sai, adir) && !(xcsr & FSL_SAI_CSR_FRDE))
++                      fsl_sai_config_disable(sai, adir);
++
++              /*
++               * Disable current stream if either of:
++               * 1. current stream doesn't provide clocks for synchronous mode
++               * 2. current stream provides clocks for synchronous mode but no
++               *    more stream is active.
++               */
++              if (!fsl_sai_dir_is_synced(sai, dir) || !(xcsr & FSL_SAI_CSR_FRDE))
++                      fsl_sai_config_disable(sai, dir);
++
+               break;
+       default:
+               return -EINVAL;
+-- 
+2.40.1
+
diff --git a/queue-5.4/bonding-fix-macvlan-over-alb-bond-support.patch b/queue-5.4/bonding-fix-macvlan-over-alb-bond-support.patch
new file mode 100644 (file)
index 0000000..7f3657a
--- /dev/null
@@ -0,0 +1,90 @@
+From 19d9a8e22a17062c7e2cfbfca045977a05b50c6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Aug 2023 15:19:04 +0800
+Subject: bonding: fix macvlan over alb bond support
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit e74216b8def3803e98ae536de78733e9d7f3b109 ]
+
+The commit 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode
+bonds") aims to enable the use of macvlans on top of rlb bond mode. However,
+the current rlb bond mode only handles ARP packets to update remote neighbor
+entries. This causes an issue when a macvlan is on top of the bond, and
+remote devices send packets to the macvlan using the bond's MAC address
+as the destination. After delivering the packets to the macvlan, the macvlan
+will rejects them as the MAC address is incorrect. Consequently, this commit
+makes macvlan over bond non-functional.
+
+To address this problem, one potential solution is to check for the presence
+of a macvlan port on the bond device using netif_is_macvlan_port(bond->dev)
+and return NULL in the rlb_arp_xmit() function. However, this approach
+doesn't fully resolve the situation when a VLAN exists between the bond and
+macvlan.
+
+So let's just do a partial revert for commit 14af9963ba1e in rlb_arp_xmit().
+As the comment said, Don't modify or load balance ARPs that do not originate
+locally.
+
+Fixes: 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode bonds")
+Reported-by: susan.zheng@veritas.com
+Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2117816
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Acked-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_alb.c |  6 +++---
+ include/net/bonding.h          | 11 +----------
+ 2 files changed, 4 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 20114e1dde77e..6df78a36bafde 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+               return NULL;
+       arp = (struct arp_pkt *)skb_network_header(skb);
+-      /* Don't modify or load balance ARPs that do not originate locally
+-       * (e.g.,arrive via a bridge).
++      /* Don't modify or load balance ARPs that do not originate
++       * from the bond itself or a VLAN directly above the bond.
+        */
+-      if (!bond_slave_has_mac_rx(bond, arp->mac_src))
++      if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+               return NULL;
+       if (arp->op_code == htons(ARPOP_REPLY)) {
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 4e1e589aae057..9e9ccbade3b54 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -686,23 +686,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ }
+ /* Caller must hold rcu_read_lock() for read */
+-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
++static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+ {
+       struct list_head *iter;
+       struct slave *tmp;
+-      struct netdev_hw_addr *ha;
+       bond_for_each_slave_rcu(bond, tmp, iter)
+               if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+                       return true;
+-
+-      if (netdev_uc_empty(bond->dev))
+-              return false;
+-
+-      netdev_for_each_uc_addr(ha, bond->dev)
+-              if (ether_addr_equal_64bits(mac, ha->addr))
+-                      return true;
+-
+       return false;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.4/dccp-annotate-data-races-in-dccp_poll.patch b/queue-5.4/dccp-annotate-data-races-in-dccp_poll.patch
new file mode 100644 (file)
index 0000000..a555b4c
--- /dev/null
@@ -0,0 +1,82 @@
+From 9d08ddc726326167eb5c188a78da8e979d782524 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:58:20 +0000
+Subject: dccp: annotate data-races in dccp_poll()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cba3f1786916063261e3e5ccbb803abc325b24ef ]
+
+We changed tcp_poll() over time, bug never updated dccp.
+
+Note that we also could remove dccp instead of maintaining it.
+
+Fixes: 7c657876b63c ("[DCCP]: Initial implementation")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230818015820.2701595-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/dccp/proto.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index cd868556452ec..491b148afa8f0 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
+ __poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait)
+ {
+-      __poll_t mask;
+       struct sock *sk = sock->sk;
++      __poll_t mask;
++      u8 shutdown;
++      int state;
+       sock_poll_wait(file, sock, wait);
+-      if (sk->sk_state == DCCP_LISTEN)
++
++      state = inet_sk_state_load(sk);
++      if (state == DCCP_LISTEN)
+               return inet_csk_listen_poll(sk);
+       /* Socket is not locked. We are protected from async events
+@@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+        */
+       mask = 0;
+-      if (sk->sk_err)
++      if (READ_ONCE(sk->sk_err))
+               mask = EPOLLERR;
++      shutdown = READ_ONCE(sk->sk_shutdown);
+-      if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
++      if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
+               mask |= EPOLLHUP;
+-      if (sk->sk_shutdown & RCV_SHUTDOWN)
++      if (shutdown & RCV_SHUTDOWN)
+               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+       /* Connected? */
+-      if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
++      if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+               if (atomic_read(&sk->sk_rmem_alloc) > 0)
+                       mask |= EPOLLIN | EPOLLRDNORM;
+-              if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++              if (!(shutdown & SEND_SHUTDOWN)) {
+                       if (sk_stream_is_writeable(sk)) {
+                               mask |= EPOLLOUT | EPOLLWRNORM;
+                       } else {  /* send SIGIO later */
+@@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+       }
+       return mask;
+ }
+-
+ EXPORT_SYMBOL_GPL(dccp_poll);
+ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+-- 
+2.40.1
+
diff --git a/queue-5.4/dlm-improve-plock-logging-if-interrupted.patch b/queue-5.4/dlm-improve-plock-logging-if-interrupted.patch
new file mode 100644 (file)
index 0000000..0e68c46
--- /dev/null
@@ -0,0 +1,65 @@
+From 2b461676c7445a4d85bbf736aebf994a8945bcbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:34 -0400
+Subject: dlm: improve plock logging if interrupted
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit bcfad4265cedf3adcac355e994ef9771b78407bd ]
+
+This patch changes the log level if a plock is removed when interrupted
+from debug to info. Additional it signals now that the plock entity was
+removed to let the user know what's happening.
+
+If on a dev_write() a pending plock cannot be find it will signal that
+it might have been removed because wait interruption.
+
+Before this patch there might be a "dev_write no op ..." info message
+and the users can only guess that the plock was removed before because
+the wait interruption. To be sure that is the case we log both messages
+on the same log level.
+
+Let both message be logged on info layer because it should not happened
+a lot and if it happens it should be clear why the op was not found.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f3482e936cc25..f74d5a28ad27c 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -161,11 +161,12 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+       rv = wait_event_killable(recv_wq, (op->done != 0));
+       if (rv == -ERESTARTSYS) {
+-              log_debug(ls, "%s: wait killed %llx", __func__,
+-                        (unsigned long long)number);
+               spin_lock(&ops_lock);
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
++              log_print("%s: wait interrupted %x %llx, op removed",
++                        __func__, ls->ls_global_id,
++                        (unsigned long long)number);
+               dlm_release_plock_op(op);
+               do_unlock_close(ls, number, file, fl);
+               goto out;
+@@ -469,8 +470,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+               else
+                       wake_up(&recv_wq);
+       } else
+-              log_print("dev_write no op %x %llx", info.fsid,
+-                        (unsigned long long)info.number);
++              log_print("%s: no op %x %llx - may got interrupted?", __func__,
++                        info.fsid, (unsigned long long)info.number);
+       return count;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.4/dlm-replace-usage-of-found-with-dedicated-list-itera.patch b/queue-5.4/dlm-replace-usage-of-found-with-dedicated-list-itera.patch
new file mode 100644 (file)
index 0000000..2c3bcd8
--- /dev/null
@@ -0,0 +1,300 @@
+From e9468ac74dffd946bb95740d333b5dff1678c8b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Apr 2022 14:05:31 -0400
+Subject: dlm: replace usage of found with dedicated list iterator variable
+
+From: Jakob Koschel <jakobkoschel@gmail.com>
+
+[ Upstream commit dc1acd5c94699389a9ed023e94dd860c846ea1f6 ]
+
+To move the list iterator variable into the list_for_each_entry_*()
+macro in the future it should be avoided to use the list iterator
+variable after the loop body.
+
+To *never* use the list iterator variable after the loop it was
+concluded to use a separate iterator variable instead of a
+found boolean [1].
+
+This removes the need to use a found variable and simply checking if
+the variable was set, can determine if the break/goto was hit.
+
+Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ [1]
+Signed-off-by: Jakob Koschel <jakobkoschel@gmail.com>
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lock.c    | 53 +++++++++++++++++++++++-------------------------
+ fs/dlm/plock.c   | 24 +++++++++++-----------
+ fs/dlm/recover.c | 39 +++++++++++++++++------------------
+ 3 files changed, 56 insertions(+), 60 deletions(-)
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index b6242071583e0..86d645d02d55c 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
+ void dlm_scan_timeout(struct dlm_ls *ls)
+ {
+       struct dlm_rsb *r;
+-      struct dlm_lkb *lkb;
++      struct dlm_lkb *lkb = NULL, *iter;
+       int do_cancel, do_warn;
+       s64 wait_us;
+@@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
+               do_cancel = 0;
+               do_warn = 0;
+               mutex_lock(&ls->ls_timeout_mutex);
+-              list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
++              list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
+                       wait_us = ktime_to_us(ktime_sub(ktime_get(),
+-                                                      lkb->lkb_timestamp));
++                                                      iter->lkb_timestamp));
+-                      if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
+-                          wait_us >= (lkb->lkb_timeout_cs * 10000))
++                      if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
++                          wait_us >= (iter->lkb_timeout_cs * 10000))
+                               do_cancel = 1;
+-                      if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
++                      if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
+                           wait_us >= dlm_config.ci_timewarn_cs * 10000)
+                               do_warn = 1;
+                       if (!do_cancel && !do_warn)
+                               continue;
+-                      hold_lkb(lkb);
++                      hold_lkb(iter);
++                      lkb = iter;
+                       break;
+               }
+               mutex_unlock(&ls->ls_timeout_mutex);
+-              if (!do_cancel && !do_warn)
++              if (!lkb)
+                       break;
+               r = lkb->lkb_resource;
+@@ -5241,21 +5242,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
+ {
+-      struct dlm_lkb *lkb;
+-      int found = 0;
++      struct dlm_lkb *lkb = NULL, *iter;
+       mutex_lock(&ls->ls_waiters_mutex);
+-      list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+-              if (lkb->lkb_flags & DLM_IFL_RESEND) {
+-                      hold_lkb(lkb);
+-                      found = 1;
++      list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
++              if (iter->lkb_flags & DLM_IFL_RESEND) {
++                      hold_lkb(iter);
++                      lkb = iter;
+                       break;
+               }
+       }
+       mutex_unlock(&ls->ls_waiters_mutex);
+-      if (!found)
+-              lkb = NULL;
+       return lkb;
+ }
+@@ -5914,37 +5912,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+                    int mode, uint32_t flags, void *name, unsigned int namelen,
+                    unsigned long timeout_cs, uint32_t *lkid)
+ {
+-      struct dlm_lkb *lkb;
++      struct dlm_lkb *lkb = NULL, *iter;
+       struct dlm_user_args *ua;
+       int found_other_mode = 0;
+-      int found = 0;
+       int rv = 0;
+       mutex_lock(&ls->ls_orphans_mutex);
+-      list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
+-              if (lkb->lkb_resource->res_length != namelen)
++      list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
++              if (iter->lkb_resource->res_length != namelen)
+                       continue;
+-              if (memcmp(lkb->lkb_resource->res_name, name, namelen))
++              if (memcmp(iter->lkb_resource->res_name, name, namelen))
+                       continue;
+-              if (lkb->lkb_grmode != mode) {
++              if (iter->lkb_grmode != mode) {
+                       found_other_mode = 1;
+                       continue;
+               }
+-              found = 1;
+-              list_del_init(&lkb->lkb_ownqueue);
+-              lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
+-              *lkid = lkb->lkb_id;
++              lkb = iter;
++              list_del_init(&iter->lkb_ownqueue);
++              iter->lkb_flags &= ~DLM_IFL_ORPHAN;
++              *lkid = iter->lkb_id;
+               break;
+       }
+       mutex_unlock(&ls->ls_orphans_mutex);
+-      if (!found && found_other_mode) {
++      if (!lkb && found_other_mode) {
+               rv = -EAGAIN;
+               goto out;
+       }
+-      if (!found) {
++      if (!lkb) {
+               rv = -ENOENT;
+               goto out;
+       }
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f74d5a28ad27c..95f4662c1209a 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -434,9 +434,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+                        loff_t *ppos)
+ {
++      struct plock_op *op = NULL, *iter;
+       struct dlm_plock_info info;
+-      struct plock_op *op;
+-      int found = 0, do_callback = 0;
++      int do_callback = 0;
+       if (count != sizeof(info))
+               return -EINVAL;
+@@ -448,23 +448,23 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+               return -EINVAL;
+       spin_lock(&ops_lock);
+-      list_for_each_entry(op, &recv_list, list) {
+-              if (op->info.fsid == info.fsid &&
+-                  op->info.number == info.number &&
+-                  op->info.owner == info.owner) {
+-                      list_del_init(&op->list);
+-                      memcpy(&op->info, &info, sizeof(info));
+-                      if (op->data)
++      list_for_each_entry(iter, &recv_list, list) {
++              if (iter->info.fsid == info.fsid &&
++                  iter->info.number == info.number &&
++                  iter->info.owner == info.owner) {
++                      list_del_init(&iter->list);
++                      memcpy(&iter->info, &info, sizeof(info));
++                      if (iter->data)
+                               do_callback = 1;
+                       else
+-                              op->done = 1;
+-                      found = 1;
++                              iter->done = 1;
++                      op = iter;
+                       break;
+               }
+       }
+       spin_unlock(&ops_lock);
+-      if (found) {
++      if (op) {
+               if (do_callback)
+                       dlm_plock_callback(op);
+               else
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index 8928e99dfd47d..df18f38a02734 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
+ static void recover_lvb(struct dlm_rsb *r)
+ {
+-      struct dlm_lkb *lkb, *high_lkb = NULL;
++      struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
+       uint32_t high_seq = 0;
+       int lock_lvb_exists = 0;
+-      int big_lock_exists = 0;
+       int lvblen = r->res_ls->ls_lvblen;
+       if (!rsb_flag(r, RSB_NEW_MASTER2) &&
+@@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
+       /* we are the new master, so figure out if VALNOTVALID should
+          be set, and set the rsb lvb from the best lkb available. */
+-      list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+-              if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++      list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
++              if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+                       continue;
+               lock_lvb_exists = 1;
+-              if (lkb->lkb_grmode > DLM_LOCK_CR) {
+-                      big_lock_exists = 1;
++              if (iter->lkb_grmode > DLM_LOCK_CR) {
++                      big_lkb = iter;
+                       goto setflag;
+               }
+-              if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+-                      high_lkb = lkb;
+-                      high_seq = lkb->lkb_lvbseq;
++              if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++                      high_lkb = iter;
++                      high_seq = iter->lkb_lvbseq;
+               }
+       }
+-      list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+-              if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++      list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
++              if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+                       continue;
+               lock_lvb_exists = 1;
+-              if (lkb->lkb_grmode > DLM_LOCK_CR) {
+-                      big_lock_exists = 1;
++              if (iter->lkb_grmode > DLM_LOCK_CR) {
++                      big_lkb = iter;
+                       goto setflag;
+               }
+-              if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+-                      high_lkb = lkb;
+-                      high_seq = lkb->lkb_lvbseq;
++              if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++                      high_lkb = iter;
++                      high_seq = iter->lkb_lvbseq;
+               }
+       }
+@@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
+               goto out;
+       /* lvb is invalidated if only NL/CR locks remain */
+-      if (!big_lock_exists)
++      if (!big_lkb)
+               rsb_set_flag(r, RSB_VALNOTVALID);
+       if (!r->res_lvbptr) {
+@@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
+                       goto out;
+       }
+-      if (big_lock_exists) {
+-              r->res_lvbseq = lkb->lkb_lvbseq;
+-              memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
++      if (big_lkb) {
++              r->res_lvbseq = big_lkb->lkb_lvbseq;
++              memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
+       } else if (high_lkb) {
+               r->res_lvbseq = high_lkb->lkb_lvbseq;
+               memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
+-- 
+2.40.1
+
diff --git a/queue-5.4/dm-integrity-increase-recalc_sectors-to-improve-reca.patch b/queue-5.4/dm-integrity-increase-recalc_sectors-to-improve-reca.patch
new file mode 100644 (file)
index 0000000..6446400
--- /dev/null
@@ -0,0 +1,36 @@
+From e3089981378618bc4dd6cc6810b00e8a18d8cfb2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Apr 2021 11:57:43 -0400
+Subject: dm integrity: increase RECALC_SECTORS to improve recalculate speed
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit b1a2b9332050c7ae32a22c2c74bc443e39f37b23 ]
+
+Increase RECALC_SECTORS because it improves recalculate speed slightly
+(from 390kiB/s to 410kiB/s).
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Stable-dep-of: 6d50eb472593 ("dm integrity: reduce vmalloc space footprint on 32-bit architectures")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-integrity.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index d7911c623edde..12b086b14cc4a 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -35,7 +35,7 @@
+ #define MIN_LOG2_INTERLEAVE_SECTORS   3
+ #define MAX_LOG2_INTERLEAVE_SECTORS   31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS                        8192
++#define RECALC_SECTORS                        32768
+ #define RECALC_WRITE_SUPER            16
+ #define BITMAP_BLOCK_SIZE             4096    /* don't change it */
+ #define BITMAP_FLUSH_INTERVAL         (10 * HZ)
+-- 
+2.40.1
+
diff --git a/queue-5.4/dm-integrity-reduce-vmalloc-space-footprint-on-32-bi.patch b/queue-5.4/dm-integrity-reduce-vmalloc-space-footprint-on-32-bi.patch
new file mode 100644 (file)
index 0000000..bf3b9e0
--- /dev/null
@@ -0,0 +1,47 @@
+From 5e5bd105aa2363efe0ed79f8f83777b8912cc4f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jun 2023 16:44:34 +0200
+Subject: dm integrity: reduce vmalloc space footprint on 32-bit architectures
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+[ Upstream commit 6d50eb4725934fd22f5eeccb401000687c790fd0 ]
+
+It was reported that dm-integrity runs out of vmalloc space on 32-bit
+architectures. On x86, there is only 128MiB vmalloc space and dm-integrity
+consumes it quickly because it has a 64MiB journal and 8MiB recalculate
+buffer.
+
+Fix this by reducing the size of the journal to 4MiB and the size of
+the recalculate buffer to 1MiB, so that multiple dm-integrity devices
+can be created and activated on 32-bit architectures.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-integrity.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 12b086b14cc4a..81157801a3dc6 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -31,11 +31,11 @@
+ #define DEFAULT_BUFFER_SECTORS                128
+ #define DEFAULT_JOURNAL_WATERMARK     50
+ #define DEFAULT_SYNC_MSEC             10000
+-#define DEFAULT_MAX_JOURNAL_SECTORS   131072
++#define DEFAULT_MAX_JOURNAL_SECTORS   (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
+ #define MIN_LOG2_INTERLEAVE_SECTORS   3
+ #define MAX_LOG2_INTERLEAVE_SECTORS   31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS                        32768
++#define RECALC_SECTORS                        (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
+ #define RECALC_WRITE_SUPER            16
+ #define BITMAP_BLOCK_SIZE             4096    /* don't change it */
+ #define BITMAP_FLUSH_INTERVAL         (10 * HZ)
+-- 
+2.40.1
+
diff --git a/queue-5.4/drm-amd-display-check-tg-is-non-null-before-checking.patch b/queue-5.4/drm-amd-display-check-tg-is-non-null-before-checking.patch
new file mode 100644 (file)
index 0000000..3430cd7
--- /dev/null
@@ -0,0 +1,43 @@
+From 8c589100551d71e007a5502736a5b99c5dedd45c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 17:00:28 -0400
+Subject: drm/amd/display: check TG is non-null before checking if enabled
+
+From: Taimur Hassan <syed.hassan@amd.com>
+
+[ Upstream commit 5a25cefc0920088bb9afafeb80ad3dcd84fe278b ]
+
+[Why & How]
+If there is no TG allocation we can dereference a NULL pointer when
+checking if the TG is enabled.
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Acked-by: Alan Liu <haoping.liu@amd.com>
+Signed-off-by: Taimur Hassan <syed.hassan@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 16b87af6f6628..c4c99bc7f2890 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2902,7 +2902,8 @@ static void dcn10_wait_for_mpcc_disconnect(
+               if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+                       struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+-                      if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++                      if (pipe_ctx->stream_res.tg &&
++                              pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+                               res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+                       pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+                       hubp->funcs->set_blank(hubp, true);
+-- 
+2.40.1
+
diff --git a/queue-5.4/drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch b/queue-5.4/drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch
new file mode 100644 (file)
index 0000000..7e7fcb1
--- /dev/null
@@ -0,0 +1,47 @@
+From f18de9e64ac55a84210ee6563e2bf27f9fe31412 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Mar 2022 12:08:43 -0400
+Subject: drm/amd/display: do not wait for mpc idle if tg is disabled
+
+From: Josip Pavic <Josip.Pavic@amd.com>
+
+[ Upstream commit 2513ed4f937999c0446fd824f7564f76b697d722 ]
+
+[Why]
+When booting, the driver waits for the MPC idle bit to be set as part of
+pipe initialization. However, on some systems this occurs before OTG is
+enabled, and since the MPC idle bit won't be set until the vupdate
+signal occurs (which requires OTG to be enabled), this never happens and
+the wait times out. This can add hundreds of milliseconds to the boot
+time.
+
+[How]
+Do not wait for mpc idle if tg is disabled
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Pavle Kotarac <Pavle.Kotarac@amd.com>
+Signed-off-by: Josip Pavic <Josip.Pavic@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 5a25cefc0920 ("drm/amd/display: check TG is non-null before checking if enabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index fa3acf60e7bd2..16b87af6f6628 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2902,7 +2902,8 @@ static void dcn10_wait_for_mpcc_disconnect(
+               if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+                       struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+-                      res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
++                      if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++                              res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+                       pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+                       hubp->funcs->set_blank(hubp, true);
+               }
+-- 
+2.40.1
+
diff --git a/queue-5.4/fbdev-fix-potential-oob-read-in-fast_imageblit.patch b/queue-5.4/fbdev-fix-potential-oob-read-in-fast_imageblit.patch
new file mode 100644 (file)
index 0000000..577ed2c
--- /dev/null
@@ -0,0 +1,45 @@
+From 67b037969ccb2a91211b435cbb1e9ae7eb512bdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Jun 2023 00:16:49 +0800
+Subject: fbdev: fix potential OOB read in fast_imageblit()
+
+From: Zhang Shurong <zhang_shurong@foxmail.com>
+
+[ Upstream commit c2d22806aecb24e2de55c30a06e5d6eb297d161d ]
+
+There is a potential OOB read at fast_imageblit, for
+"colortab[(*src >> 4)]" can become a negative value due to
+"const char *s = image->data, *src".
+This change makes sure the index for colortab always positive
+or zero.
+
+Similar commit:
+https://patchwork.kernel.org/patch/11746067
+
+Potential bug report:
+https://groups.google.com/g/syzkaller-bugs/c/9ubBXKeKXf4/m/k-QXy4UgAAAJ
+
+Signed-off-by: Zhang Shurong <zhang_shurong@foxmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 335e92b813fc4..665ef7a0a2495 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -189,7 +189,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+       u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+       u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+       u32 bit_mask, eorx, shift;
+-      const char *s = image->data, *src;
++      const u8 *s = image->data, *src;
+       u32 *dst;
+       const u32 *tab;
+       size_t tablen;
+-- 
+2.40.1
+
diff --git a/queue-5.4/fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch b/queue-5.4/fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch
new file mode 100644 (file)
index 0000000..2ab2bb4
--- /dev/null
@@ -0,0 +1,103 @@
+From e2a5e7e57c2b91e744ab1964ee6ac439dd89448d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Mar 2022 20:29:51 +0100
+Subject: fbdev: Fix sys_imageblit() for arbitrary image widths
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 61bfcb6a3b981e8f19e044ac8c3de6edbe6caf70 ]
+
+Commit 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+broke sys_imageblit() for image width that are not aligned to 8-bit
+boundaries. Fix this by handling the trailing pixels on each line
+separately. The performance improvements in the original commit do not
+regress by this change.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 6f29e04938bf ("fbdev: Improve performance of sys_imageblit()")
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Javier Martinez Canillas <javierm@redhat.com>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220313192952.12058-2-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 29 ++++++++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 722c327a381bd..335e92b813fc4 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,7 +188,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+       u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+       u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+-      u32 bit_mask, eorx;
++      u32 bit_mask, eorx, shift;
+       const char *s = image->data, *src;
+       u32 *dst;
+       const u32 *tab;
+@@ -229,17 +229,23 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+       for (i = image->height; i--; ) {
+               dst = dst1;
++              shift = 8;
+               src = s;
++              /*
++               * Manually unroll the per-line copying loop for better
++               * performance. This works until we processed the last
++               * completely filled source byte (inclusive).
++               */
+               switch (ppw) {
+               case 4: /* 8 bpp */
+-                      for (j = k; j; j -= 2, ++src) {
++                      for (j = k; j >= 2; j -= 2, ++src) {
+                               *dst++ = colortab[(*src >> 4) & bit_mask];
+                               *dst++ = colortab[(*src >> 0) & bit_mask];
+                       }
+                       break;
+               case 2: /* 16 bpp */
+-                      for (j = k; j; j -= 4, ++src) {
++                      for (j = k; j >= 4; j -= 4, ++src) {
+                               *dst++ = colortab[(*src >> 6) & bit_mask];
+                               *dst++ = colortab[(*src >> 4) & bit_mask];
+                               *dst++ = colortab[(*src >> 2) & bit_mask];
+@@ -247,7 +253,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+                       }
+                       break;
+               case 1: /* 32 bpp */
+-                      for (j = k; j; j -= 8, ++src) {
++                      for (j = k; j >= 8; j -= 8, ++src) {
+                               *dst++ = colortab[(*src >> 7) & bit_mask];
+                               *dst++ = colortab[(*src >> 6) & bit_mask];
+                               *dst++ = colortab[(*src >> 5) & bit_mask];
+@@ -259,6 +265,21 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+                       }
+                       break;
+               }
++
++              /*
++               * For image widths that are not a multiple of 8, there
++               * are trailing pixels left on the current line. Print
++               * them as well.
++               */
++              for (; j--; ) {
++                      shift -= ppw;
++                      *dst++ = colortab[(*src >> shift) & bit_mask];
++                      if (!shift) {
++                              shift = 8;
++                              ++src;
++                      }
++              }
++
+               dst1 += p->fix.line_length;
+               s += spitch;
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.4/fbdev-improve-performance-of-sys_imageblit.patch b/queue-5.4/fbdev-improve-performance-of-sys_imageblit.patch
new file mode 100644 (file)
index 0000000..024f60c
--- /dev/null
@@ -0,0 +1,130 @@
+From abfbcb3c73da1ad0658f70d7dba917fda6358109 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Feb 2022 20:38:01 +0100
+Subject: fbdev: Improve performance of sys_imageblit()
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 6f29e04938bf509fccfad490a74284cf158891ce ]
+
+Improve the performance of sys_imageblit() by manually unrolling
+the inner blitting loop and moving some invariants out. The compiler
+failed to do this automatically. The resulting binary code was even
+slower than the cfb_imageblit() helper, which uses the same algorithm,
+but operates on I/O memory.
+
+A microbenchmark measures the average number of CPU cycles
+for sys_imageblit() after a stabilizing period of a few minutes
+(i7-4790, FullHD, simpledrm, kernel with debugging). The value
+for CFB is given as a reference.
+
+  sys_imageblit(), new: 25934 cycles
+  sys_imageblit(), old: 35944 cycles
+  cfb_imageblit():      30566 cycles
+
+In the optimized case, sys_imageblit() is now ~30% faster than before
+and ~20% faster than cfb_imageblit().
+
+v2:
+       * move switch out of inner loop (Gerd)
+       * remove test for alignment of dst1 (Sam)
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220223193804.18636-3-tzimmermann@suse.de
+Stable-dep-of: c2d22806aecb ("fbdev: fix potential OOB read in fast_imageblit()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/sysimgblt.c | 49 +++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index a4d05b1b17d7d..722c327a381bd 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+       u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+       u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+-      u32 bit_mask, end_mask, eorx, shift;
++      u32 bit_mask, eorx;
+       const char *s = image->data, *src;
+       u32 *dst;
+-      const u32 *tab = NULL;
++      const u32 *tab;
++      size_t tablen;
++      u32 colortab[16];
+       int i, j, k;
+       switch (bpp) {
+       case 8:
+               tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
++              tablen = 16;
+               break;
+       case 16:
+               tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
++              tablen = 4;
+               break;
+       case 32:
+-      default:
+               tab = cfb_tab32;
++              tablen = 2;
+               break;
++      default:
++              return;
+       }
+       for (i = ppw-1; i--; ) {
+@@ -218,19 +224,40 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+       eorx = fgx ^ bgx;
+       k = image->width/ppw;
++      for (i = 0; i < tablen; ++i)
++              colortab[i] = (tab[i] & eorx) ^ bgx;
++
+       for (i = image->height; i--; ) {
+               dst = dst1;
+-              shift = 8;
+               src = s;
+-              for (j = k; j--; ) {
+-                      shift -= ppw;
+-                      end_mask = tab[(*src >> shift) & bit_mask];
+-                      *dst++ = (end_mask & eorx) ^ bgx;
+-                      if (!shift) {
+-                              shift = 8;
+-                              src++;
++              switch (ppw) {
++              case 4: /* 8 bpp */
++                      for (j = k; j; j -= 2, ++src) {
++                              *dst++ = colortab[(*src >> 4) & bit_mask];
++                              *dst++ = colortab[(*src >> 0) & bit_mask];
++                      }
++                      break;
++              case 2: /* 16 bpp */
++                      for (j = k; j; j -= 4, ++src) {
++                              *dst++ = colortab[(*src >> 6) & bit_mask];
++                              *dst++ = colortab[(*src >> 4) & bit_mask];
++                              *dst++ = colortab[(*src >> 2) & bit_mask];
++                              *dst++ = colortab[(*src >> 0) & bit_mask];
++                      }
++                      break;
++              case 1: /* 32 bpp */
++                      for (j = k; j; j -= 8, ++src) {
++                              *dst++ = colortab[(*src >> 7) & bit_mask];
++                              *dst++ = colortab[(*src >> 6) & bit_mask];
++                              *dst++ = colortab[(*src >> 5) & bit_mask];
++                              *dst++ = colortab[(*src >> 4) & bit_mask];
++                              *dst++ = colortab[(*src >> 3) & bit_mask];
++                              *dst++ = colortab[(*src >> 2) & bit_mask];
++                              *dst++ = colortab[(*src >> 1) & bit_mask];
++                              *dst++ = colortab[(*src >> 0) & bit_mask];
+                       }
++                      break;
+               }
+               dst1 += p->fix.line_length;
+               s += spitch;
+-- 
+2.40.1
+
diff --git a/queue-5.4/fs-dlm-add-pid-to-debug-log.patch b/queue-5.4/fs-dlm-add-pid-to-debug-log.patch
new file mode 100644 (file)
index 0000000..43ad850
--- /dev/null
@@ -0,0 +1,39 @@
+From f2c4d7953511ecc6ede8c461197c92e57b98c221 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:06 -0400
+Subject: fs: dlm: add pid to debug log
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 19d7ca051d303622c423b4cb39e6bde5d177328b ]
+
+This patch adds the pid information which requested the lock operation
+to the debug log output.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 95f4662c1209a..f685d56a4f909 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,9 +164,9 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+               spin_lock(&ops_lock);
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
+-              log_print("%s: wait interrupted %x %llx, op removed",
++              log_print("%s: wait interrupted %x %llx pid %d, op removed",
+                         __func__, ls->ls_global_id,
+-                        (unsigned long long)number);
++                        (unsigned long long)number, op->info.pid);
+               dlm_release_plock_op(op);
+               do_unlock_close(ls, number, file, fl);
+               goto out;
+-- 
+2.40.1
+
diff --git a/queue-5.4/fs-dlm-change-plock-interrupted-message-to-debug-aga.patch b/queue-5.4/fs-dlm-change-plock-interrupted-message-to-debug-aga.patch
new file mode 100644 (file)
index 0000000..e4d8b13
--- /dev/null
@@ -0,0 +1,46 @@
+From c309ba92755b3b725251f4356c9df4c019210b8d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:05 -0400
+Subject: fs: dlm: change plock interrupted message to debug again
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit ea06d4cabf529eefbe7e89e3a8325f1f89355ccd ]
+
+This patch reverses the commit bcfad4265ced ("dlm: improve plock logging
+if interrupted") by moving it to debug level and notifying the user an op
+was removed.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f685d56a4f909..0d00ca2c44c71 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -164,7 +164,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+               spin_lock(&ops_lock);
+               list_del(&op->list);
+               spin_unlock(&ops_lock);
+-              log_print("%s: wait interrupted %x %llx pid %d, op removed",
++              log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+                         __func__, ls->ls_global_id,
+                         (unsigned long long)number, op->info.pid);
+               dlm_release_plock_op(op);
+@@ -470,7 +470,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+               else
+                       wake_up(&recv_wq);
+       } else
+-              log_print("%s: no op %x %llx - may got interrupted?", __func__,
++              log_print("%s: no op %x %llx", __func__,
+                         info.fsid, (unsigned long long)info.number);
+       return count;
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.4/fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch b/queue-5.4/fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch
new file mode 100644 (file)
index 0000000..36c5447
--- /dev/null
@@ -0,0 +1,109 @@
+From 6b31f01e29cb296b09df3e6f0207ce4c1181832c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 May 2023 12:02:04 -0400
+Subject: fs: dlm: fix mismatch of plock results from userspace
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 57e2c2f2d94cfd551af91cedfa1af6d972487197 ]
+
+When a waiting plock request (F_SETLKW) is sent to userspace
+for processing (dlm_controld), the result is returned at a
+later time. That result could be incorrectly matched to a
+different waiting request in cases where the owner field is
+the same (e.g. different threads in a process.) This is fixed
+by comparing all the properties in the request and reply.
+
+The results for non-waiting plock requests are now matched
+based on list order because the results are returned in the
+same order they were sent.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 58 +++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 45 insertions(+), 13 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index fa8969c0a5f55..28735e8c5e206 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -405,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+               if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+                       list_del(&op->list);
+               else
+-                      list_move(&op->list, &recv_list);
++                      list_move_tail(&op->list, &recv_list);
+               memcpy(&info, &op->info, sizeof(info));
+       }
+       spin_unlock(&ops_lock);
+@@ -443,20 +443,52 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+       if (check_version(&info))
+               return -EINVAL;
++      /*
++       * The results for waiting ops (SETLKW) can be returned in any
++       * order, so match all fields to find the op.  The results for
++       * non-waiting ops are returned in the order that they were sent
++       * to userspace, so match the result with the first non-waiting op.
++       */
+       spin_lock(&ops_lock);
+-      list_for_each_entry(iter, &recv_list, list) {
+-              if (iter->info.fsid == info.fsid &&
+-                  iter->info.number == info.number &&
+-                  iter->info.owner == info.owner) {
+-                      list_del_init(&iter->list);
+-                      memcpy(&iter->info, &info, sizeof(info));
+-                      if (iter->data)
+-                              do_callback = 1;
+-                      else
+-                              iter->done = 1;
+-                      op = iter;
+-                      break;
++      if (info.wait) {
++              list_for_each_entry(iter, &recv_list, list) {
++                      if (iter->info.fsid == info.fsid &&
++                          iter->info.number == info.number &&
++                          iter->info.owner == info.owner &&
++                          iter->info.pid == info.pid &&
++                          iter->info.start == info.start &&
++                          iter->info.end == info.end &&
++                          iter->info.ex == info.ex &&
++                          iter->info.wait) {
++                              op = iter;
++                              break;
++                      }
+               }
++      } else {
++              list_for_each_entry(iter, &recv_list, list) {
++                      if (!iter->info.wait) {
++                              op = iter;
++                              break;
++                      }
++              }
++      }
++
++      if (op) {
++              /* Sanity check that op and info match. */
++              if (info.wait)
++                      WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
++              else
++                      WARN_ON(op->info.fsid != info.fsid ||
++                              op->info.number != info.number ||
++                              op->info.owner != info.owner ||
++                              op->info.optype != info.optype);
++
++              list_del_init(&op->list);
++              memcpy(&op->info, &info, sizeof(info));
++              if (op->data)
++                      do_callback = 1;
++              else
++                      op->done = 1;
+       }
+       spin_unlock(&ops_lock);
+-- 
+2.40.1
+
diff --git a/queue-5.4/fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch b/queue-5.4/fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch
new file mode 100644 (file)
index 0000000..9c837c8
--- /dev/null
@@ -0,0 +1,66 @@
+From 7e27fe9e5a30a2bc28ac1ea77414fa21b954f4c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jun 2022 14:45:08 -0400
+Subject: fs: dlm: use dlm_plock_info for do_unlock_close
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 4d413ae9ced4180c0e2114553c3a7560b509b0f8 ]
+
+This patch refactors do_unlock_close() by using only struct dlm_plock_info
+as a parameter.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 57e2c2f2d94c ("fs: dlm: fix mismatch of plock results from userspace")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 0d00ca2c44c71..fa8969c0a5f55 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -80,8 +80,7 @@ static void send_op(struct plock_op *op)
+    abandoned waiter.  So, we have to insert the unlock-close when the
+    lock call is interrupted. */
+-static void do_unlock_close(struct dlm_ls *ls, u64 number,
+-                          struct file *file, struct file_lock *fl)
++static void do_unlock_close(const struct dlm_plock_info *info)
+ {
+       struct plock_op *op;
+@@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
+               return;
+       op->info.optype         = DLM_PLOCK_OP_UNLOCK;
+-      op->info.pid            = fl->fl_pid;
+-      op->info.fsid           = ls->ls_global_id;
+-      op->info.number         = number;
++      op->info.pid            = info->pid;
++      op->info.fsid           = info->fsid;
++      op->info.number         = info->number;
+       op->info.start          = 0;
+       op->info.end            = OFFSET_MAX;
+-      if (fl->fl_lmops && fl->fl_lmops->lm_grant)
+-              op->info.owner  = (__u64) fl->fl_pid;
+-      else
+-              op->info.owner  = (__u64)(long) fl->fl_owner;
++      op->info.owner          = info->owner;
+       op->info.flags |= DLM_PLOCK_FL_CLOSE;
+       send_op(op);
+@@ -168,7 +164,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+                         __func__, ls->ls_global_id,
+                         (unsigned long long)number, op->info.pid);
+               dlm_release_plock_op(op);
+-              do_unlock_close(ls, number, file, fl);
++              do_unlock_close(&op->info);
+               goto out;
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.4/igb-avoid-starting-unnecessary-workqueues.patch b/queue-5.4/igb-avoid-starting-unnecessary-workqueues.patch
new file mode 100644 (file)
index 0000000..35d43b7
--- /dev/null
@@ -0,0 +1,91 @@
+From 41a177f06c4d850cb3f5754d3dc43f4b14df15c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Aug 2023 10:19:27 -0700
+Subject: igb: Avoid starting unnecessary workqueues
+
+From: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+
+[ Upstream commit b888c510f7b3d64ca75fc0f43b4a4bd1a611312f ]
+
+If ptp_clock_register() fails or CONFIG_PTP isn't enabled, avoid starting
+PTP related workqueues.
+
+In this way we can fix this:
+ BUG: unable to handle page fault for address: ffffc9000440b6f8
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 100000067 P4D 100000067 PUD 1001e0067 PMD 107dc5067 PTE 0
+ Oops: 0000 [#1] PREEMPT SMP
+ [...]
+ Workqueue: events igb_ptp_overflow_check
+ RIP: 0010:igb_rd32+0x1f/0x60
+ [...]
+ Call Trace:
+  igb_ptp_read_82580+0x20/0x50
+  timecounter_read+0x15/0x60
+  igb_ptp_overflow_check+0x1a/0x50
+  process_one_work+0x1cb/0x3c0
+  worker_thread+0x53/0x3f0
+  ? rescuer_thread+0x370/0x370
+  kthread+0x142/0x160
+  ? kthread_associate_blkcg+0xc0/0xc0
+  ret_from_fork+0x1f/0x30
+
+Fixes: 1f6e8178d685 ("igb: Prevent dropped Tx timestamps via work items and interrupts.")
+Fixes: d339b1331616 ("igb: add PTP Hardware Clock code")
+Signed-off-by: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+Tested-by: Arpana Arland <arpanax.arland@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20230821171927.2203644-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_ptp.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index c39e921757ba9..3c501c67bdbb6 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1245,18 +1245,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+               return;
+       }
+-      spin_lock_init(&adapter->tmreg_lock);
+-      INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+-      if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+-              INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+-                                igb_ptp_overflow_check);
+-
+-      adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+-      adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+-      igb_ptp_reset(adapter);
+-
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+                                               &adapter->pdev->dev);
+       if (IS_ERR(adapter->ptp_clock)) {
+@@ -1266,6 +1254,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+               dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+                        adapter->netdev->name);
+               adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++              spin_lock_init(&adapter->tmreg_lock);
++              INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++              if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++                      INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++                                        igb_ptp_overflow_check);
++
++              adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++              adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++              igb_ptp_reset(adapter);
+       }
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.4/ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch b/queue-5.4/ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch
new file mode 100644 (file)
index 0000000..2316c0c
--- /dev/null
@@ -0,0 +1,90 @@
+From ad48fe5022727d724c7b7fef23ec853d15ce963a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 22:54:49 +0800
+Subject: ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
+
+From: Lu Wei <luwei32@huawei.com>
+
+[ Upstream commit 043d5f68d0ccdda91029b4b6dce7eeffdcfad281 ]
+
+There are two network devices(veth1 and veth3) in ns1, and ipvlan1 with
+L3S mode and ipvlan2 with L2 mode are created based on them as
+figure (1). In this case, ipvlan_register_nf_hook() will be called to
+register nf hook which is needed by ipvlans in L3S mode in ns1 and value
+of ipvl_nf_hook_refcnt is set to 1.
+
+(1)
+           ns1                           ns2
+      ------------                  ------------
+
+   veth1--ipvlan1 (L3S)
+
+   veth3--ipvlan2 (L2)
+
+(2)
+           ns1                           ns2
+      ------------                  ------------
+
+   veth1--ipvlan1 (L3S)
+
+         ipvlan2 (L2)                  veth3
+     |                                  |
+     |------->-------->--------->--------
+                    migrate
+
+When veth3 migrates from ns1 to ns2 as figure (2), veth3 will register in
+ns2 and calls call_netdevice_notifiers with NETDEV_REGISTER event:
+
+dev_change_net_namespace
+    call_netdevice_notifiers
+        ipvlan_device_event
+            ipvlan_migrate_l3s_hook
+                ipvlan_register_nf_hook(newnet)      (I)
+                ipvlan_unregister_nf_hook(oldnet)    (II)
+
+In function ipvlan_migrate_l3s_hook(), ipvl_nf_hook_refcnt in ns1 is not 0
+since veth1 with ipvlan1 still in ns1, (I) and (II) will be called to
+register nf_hook in ns2 and unregister nf_hook in ns1. As a result,
+ipvl_nf_hook_refcnt in ns1 is decreased incorrectly and this in ns2
+is increased incorrectly. When the second net namespace is removed, a
+reference count leak warning in ipvlan_ns_exit() will be triggered.
+
+This patch add a check before ipvlan_migrate_l3s_hook() is called. The
+warning can be triggered as follows:
+
+$ ip netns add ns1
+$ ip netns add ns2
+$ ip netns exec ns1 ip link add veth1 type veth peer name veth2
+$ ip netns exec ns1 ip link add veth3 type veth peer name veth4
+$ ip netns exec ns1 ip link add ipv1 link veth1 type ipvlan mode l3s
+$ ip netns exec ns1 ip link add ipv2 link veth3 type ipvlan mode l2
+$ ip netns exec ns1 ip link set veth3 netns ns2
+$ ip net del ns2
+
+Fixes: 3133822f5ac1 ("ipvlan: use pernet operations and restrict l3s hooks to master netns")
+Signed-off-by: Lu Wei <luwei32@huawei.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Link: https://lore.kernel.org/r/20230817145449.141827-1-luwei32@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 5fbabae2909ee..5fea2e4a93101 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -735,7 +735,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
+               write_pnet(&port->pnet, newnet);
+-              ipvlan_migrate_l3s_hook(oldnet, newnet);
++              if (port->mode == IPVLAN_MODE_L3S)
++                      ipvlan_migrate_l3s_hook(oldnet, newnet);
+               break;
+       }
+       case NETDEV_UNREGISTER:
+-- 
+2.40.1
+
diff --git a/queue-5.4/mips-cpu-features-enable-octeon_cache-by-cpu_type.patch b/queue-5.4/mips-cpu-features-enable-octeon_cache-by-cpu_type.patch
new file mode 100644 (file)
index 0000000..1826771
--- /dev/null
@@ -0,0 +1,56 @@
+From 348e1e05f02a713331420bfda42758d3cd6751df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Apr 2023 10:33:44 +0100
+Subject: MIPS: cpu-features: Enable octeon_cache by cpu_type
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit f641519409a73403ee6612b8648b95a688ab85c2 ]
+
+cpu_has_octeon_cache was tied to 0 for generic cpu-features,
+whith this generic kernel built for octeon CPU won't boot.
+
+Just enable this flag by cpu_type. It won't hurt orther platforms
+because compiler will eliminate the code path on other processors.
+
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Stable-dep-of: 5487a7b60695 ("MIPS: cpu-features: Use boot_cpu_type for CPU type based features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 3e26b0c7391b8..b72887e033082 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -124,7 +124,24 @@
+ #define cpu_has_tx39_cache    __opt(MIPS_CPU_TX39_CACHE)
+ #endif
+ #ifndef cpu_has_octeon_cache
+-#define cpu_has_octeon_cache  0
++#define cpu_has_octeon_cache                                          \
++({                                                                    \
++      int __res;                                                      \
++                                                                      \
++      switch (current_cpu_type()) {                                   \
++      case CPU_CAVIUM_OCTEON:                                         \
++      case CPU_CAVIUM_OCTEON_PLUS:                                    \
++      case CPU_CAVIUM_OCTEON2:                                        \
++      case CPU_CAVIUM_OCTEON3:                                        \
++              __res = 1;                                              \
++              break;                                                  \
++                                                                      \
++      default:                                                        \
++              __res = 0;                                              \
++      }                                                               \
++                                                                      \
++      __res;                                                          \
++})
+ #endif
+ /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work.  */
+ #ifndef cpu_has_fpu
+-- 
+2.40.1
+
diff --git a/queue-5.4/mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch b/queue-5.4/mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch
new file mode 100644 (file)
index 0000000..f6c1fa2
--- /dev/null
@@ -0,0 +1,51 @@
+From c18646987d64962ef3fc5ac3e6c3dc830e66b6be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 13:51:22 +0800
+Subject: MIPS: cpu-features: Use boot_cpu_type for CPU type based features
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit 5487a7b60695a92cf998350e4beac17144c91fcd ]
+
+Some CPU feature macros were using current_cpu_type to mark feature
+availability.
+
+However current_cpu_type will use smp_processor_id, which is prohibited
+under preemptable context.
+
+Since those features are all uniform on all CPUs in a SMP system, use
+boot_cpu_type instead of current_cpu_type to fix preemptable kernel.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index b72887e033082..ae4a2f52e3c4d 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -128,7 +128,7 @@
+ ({                                                                    \
+       int __res;                                                      \
+                                                                       \
+-      switch (current_cpu_type()) {                                   \
++      switch (boot_cpu_type()) {                                      \
+       case CPU_CAVIUM_OCTEON:                                         \
+       case CPU_CAVIUM_OCTEON_PLUS:                                    \
+       case CPU_CAVIUM_OCTEON2:                                        \
+@@ -358,7 +358,7 @@
+ ({                                                                    \
+       int __res;                                                      \
+                                                                       \
+-      switch (current_cpu_type()) {                                   \
++      switch (boot_cpu_type()) {                                      \
+       case CPU_M14KC:                                                 \
+       case CPU_74K:                                                   \
+       case CPU_1074K:                                                 \
+-- 
+2.40.1
+
diff --git a/queue-5.4/net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch b/queue-5.4/net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch
new file mode 100644 (file)
index 0000000..1909286
--- /dev/null
@@ -0,0 +1,38 @@
+From 8b754fde915f7634cdc8ccb0c42a8a0039e04874 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 13:12:21 +0800
+Subject: net: bcmgenet: Fix return value check for fixed_phy_register()
+
+From: Ruan Jinjie <ruanjinjie@huawei.com>
+
+[ Upstream commit 32bbe64a1386065ab2aef8ce8cae7c689d0add6e ]
+
+The fixed_phy_register() function returns error pointers and never
+returns NULL. Update the checks accordingly.
+
+Fixes: b0ba512e25d7 ("net: bcmgenet: enable driver to work without a device tree")
+Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Acked-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 53495d39cc9c5..2fbec2acb606d 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -565,7 +565,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+               };
+               phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+-              if (!phydev || IS_ERR(phydev)) {
++              if (IS_ERR(phydev)) {
+                       dev_err(kdev, "failed to register fixed PHY device\n");
+                       return -ENODEV;
+               }
+-- 
+2.40.1
+
diff --git a/queue-5.4/net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch b/queue-5.4/net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch
new file mode 100644 (file)
index 0000000..27abd02
--- /dev/null
@@ -0,0 +1,38 @@
+From 34b3177afa36e35c3cd315727ff235ad5b959fea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 13:12:20 +0800
+Subject: net: bgmac: Fix return value check for fixed_phy_register()
+
+From: Ruan Jinjie <ruanjinjie@huawei.com>
+
+[ Upstream commit 23a14488ea5882dea5851b65c9fce2127ee8fcad ]
+
+The fixed_phy_register() function returns error pointers and never
+returns NULL. Update the checks accordingly.
+
+Fixes: c25b23b8a387 ("bgmac: register fixed PHY for ARM BCM470X / BCM5301X chipsets")
+Signed-off-by: Ruan Jinjie <ruanjinjie@huawei.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bgmac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 89a63fdbe0e39..1148370e2432d 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1447,7 +1447,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
+       int err;
+       phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+-      if (!phy_dev || IS_ERR(phy_dev)) {
++      if (IS_ERR(phy_dev)) {
+               dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
+               return -ENODEV;
+       }
+-- 
+2.40.1
+
diff --git a/queue-5.4/net-remove-bond_slave_has_mac_rcu.patch b/queue-5.4/net-remove-bond_slave_has_mac_rcu.patch
new file mode 100644 (file)
index 0000000..78f95f4
--- /dev/null
@@ -0,0 +1,47 @@
+From e071a24e10fdae2a221a734adb5dbc64bd583e20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jan 2022 11:10:59 -0800
+Subject: net: remove bond_slave_has_mac_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 8b0fdcdc3a7d44aff907f0103f5ffb86b12bfe71 ]
+
+No caller since v3.16.
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e74216b8def3 ("bonding: fix macvlan over alb bond support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bonding.h | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index a3698f0fb2a6d..4e1e589aae057 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -685,20 +685,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+       return NULL;
+ }
+-/* Caller must hold rcu_read_lock() for read */
+-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+-                                             const u8 *mac)
+-{
+-      struct list_head *iter;
+-      struct slave *tmp;
+-
+-      bond_for_each_slave_rcu(bond, tmp, iter)
+-              if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+-                      return tmp;
+-
+-      return NULL;
+-}
+-
+ /* Caller must hold rcu_read_lock() for read */
+ static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+ {
+-- 
+2.40.1
+
diff --git a/queue-5.4/net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch b/queue-5.4/net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch
new file mode 100644 (file)
index 0000000..4ef2e6f
--- /dev/null
@@ -0,0 +1,138 @@
+From 0492f7bf000cdfc3055b7eea7c7d19959d81ffb1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Aug 2023 06:12:31 -0400
+Subject: net/sched: fix a qdisc modification with ambiguous command request
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit da71714e359b64bd7aab3bd56ec53f307f058133 ]
+
+When replacing an existing root qdisc, with one that is of the same kind, the
+request boils down to essentially a parameterization change  i.e not one that
+requires allocation and grafting of a new qdisc. syzbot was able to create a
+scenario which resulted in a taprio qdisc replacing an existing taprio qdisc
+with a combination of NLM_F_CREATE, NLM_F_REPLACE and NLM_F_EXCL leading to
+create and graft scenario.
+The fix ensures that only when the qdisc kinds are different that we should
+allow a create and graft, otherwise it goes into the "change" codepath.
+
+While at it, fix the code and comments to improve readability.
+
+While syzbot was able to create the issue, it did not zone on the root cause.
+Analysis from Vladimir Oltean <vladimir.oltean@nxp.com> helped narrow it down.
+
+v1->V2 changes:
+- remove "inline" function definition (Vladmir)
+- remove extrenous braces in branches (Vladmir)
+- change inline function names (Pedro)
+- Run tdc tests (Victor)
+v2->v3 changes:
+- dont break else/if (Simon)
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+a3618a167af2021433cd@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/20230816225759.g25x76kmgzya2gei@skbuf/T/
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Tested-by: Victor Nogueira <victor@mojatatu.com>
+Reviewed-by: Pedro Tammela <pctammela@mojatatu.com>
+Reviewed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_api.c | 53 ++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 40 insertions(+), 13 deletions(-)
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 6ca0cba8aad16..d07146a2d0bba 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1503,10 +1503,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+       return 0;
+ }
++static bool req_create_or_replace(struct nlmsghdr *n)
++{
++      return (n->nlmsg_flags & NLM_F_CREATE &&
++              n->nlmsg_flags & NLM_F_REPLACE);
++}
++
++static bool req_create_exclusive(struct nlmsghdr *n)
++{
++      return (n->nlmsg_flags & NLM_F_CREATE &&
++              n->nlmsg_flags & NLM_F_EXCL);
++}
++
++static bool req_change(struct nlmsghdr *n)
++{
++      return (!(n->nlmsg_flags & NLM_F_CREATE) &&
++              !(n->nlmsg_flags & NLM_F_REPLACE) &&
++              !(n->nlmsg_flags & NLM_F_EXCL));
++}
++
+ /*
+  * Create/change qdisc.
+  */
+-
+ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+                          struct netlink_ext_ack *extack)
+ {
+@@ -1603,27 +1621,35 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+                                *
+                                *   We know, that some child q is already
+                                *   attached to this parent and have choice:
+-                               *   either to change it or to create/graft new one.
++                               *   1) change it or 2) create/graft new one.
++                               *   If the requested qdisc kind is different
++                               *   than the existing one, then we choose graft.
++                               *   If they are the same then this is "change"
++                               *   operation - just let it fallthrough..
+                                *
+                                *   1. We are allowed to create/graft only
+-                               *   if CREATE and REPLACE flags are set.
++                               *   if the request is explicitly stating
++                               *   "please create if it doesn't exist".
+                                *
+-                               *   2. If EXCL is set, requestor wanted to say,
+-                               *   that qdisc tcm_handle is not expected
++                               *   2. If the request is to exclusive create
++                               *   then the qdisc tcm_handle is not expected
+                                *   to exist, so that we choose create/graft too.
+                                *
+                                *   3. The last case is when no flags are set.
++                               *   This will happen when for example tc
++                               *   utility issues a "change" command.
+                                *   Alas, it is sort of hole in API, we
+                                *   cannot decide what to do unambiguously.
+-                               *   For now we select create/graft, if
+-                               *   user gave KIND, which does not match existing.
++                               *   For now we select create/graft.
+                                */
+-                              if ((n->nlmsg_flags & NLM_F_CREATE) &&
+-                                  (n->nlmsg_flags & NLM_F_REPLACE) &&
+-                                  ((n->nlmsg_flags & NLM_F_EXCL) ||
+-                                   (tca[TCA_KIND] &&
+-                                    nla_strcmp(tca[TCA_KIND], q->ops->id))))
+-                                      goto create_n_graft;
++                              if (tca[TCA_KIND] &&
++                                  nla_strcmp(tca[TCA_KIND], q->ops->id)) {
++                                      if (req_create_or_replace(n) ||
++                                          req_create_exclusive(n))
++                                              goto create_n_graft;
++                                      else if (req_change(n))
++                                              goto create_n_graft2;
++                              }
+                       }
+               }
+       } else {
+@@ -1657,6 +1683,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+               NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
+               return -ENOENT;
+       }
++create_n_graft2:
+       if (clid == TC_H_INGRESS) {
+               if (dev_ingress_queue(dev)) {
+                       q = qdisc_create(dev, dev_ingress_queue(dev), p,
+-- 
+2.40.1
+
diff --git a/queue-5.4/net-validate-veth-and-vxcan-peer-ifindexes.patch b/queue-5.4/net-validate-veth-and-vxcan-peer-ifindexes.patch
new file mode 100644 (file)
index 0000000..d14f4f4
--- /dev/null
@@ -0,0 +1,137 @@
+From ccc2fafed9621a4ca9b4e139efec7d4438751d60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 18:26:02 -0700
+Subject: net: validate veth and vxcan peer ifindexes
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit f534f6581ec084fe94d6759f7672bd009794b07e ]
+
+veth and vxcan need to make sure the ifindexes of the peer
+are not negative, core does not validate this.
+
+Using iproute2 with user-space-level checking removed:
+
+Before:
+
+  # ./ip link add index 10 type veth peer index -1
+  # ip link show
+  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
+    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+  2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
+    link/ether 52:54:00:74:b2:03 brd ff:ff:ff:ff:ff:ff
+  10: veth1@veth0: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+    link/ether 8a:90:ff:57:6d:5d brd ff:ff:ff:ff:ff:ff
+  -1: veth0@veth1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
+    link/ether ae:ed:18:e6:fa:7f brd ff:ff:ff:ff:ff:ff
+
+Now:
+
+  $ ./ip link add index 10 type veth peer index -1
+  Error: ifindex can't be negative.
+
+This problem surfaced in net-next because an explicit WARN()
+was added, the root cause is older.
+
+Fixes: e6f8f1a739b6 ("veth: Allow to create peer link with given ifindex")
+Fixes: a8f820a380a2 ("can: add Virtual CAN Tunnel driver (vxcan)")
+Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/vxcan.c |  7 +------
+ drivers/net/veth.c      |  5 +----
+ include/net/rtnetlink.h |  4 ++--
+ net/core/rtnetlink.c    | 22 ++++++++++++++++++----
+ 4 files changed, 22 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index 282c53ef76d23..1bfede407270d 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
+               nla_peer = data[VXCAN_INFO_PEER];
+               ifmp = nla_data(nla_peer);
+-              err = rtnl_nla_parse_ifla(peer_tb,
+-                                        nla_data(nla_peer) +
+-                                        sizeof(struct ifinfomsg),
+-                                        nla_len(nla_peer) -
+-                                        sizeof(struct ifinfomsg),
+-                                        NULL);
++              err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+               if (err < 0)
+                       return err;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 683425e3a353c..a6445bba4f942 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1255,10 +1255,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
+               nla_peer = data[VETH_INFO_PEER];
+               ifmp = nla_data(nla_peer);
+-              err = rtnl_nla_parse_ifla(peer_tb,
+-                                        nla_data(nla_peer) + sizeof(struct ifinfomsg),
+-                                        nla_len(nla_peer) - sizeof(struct ifinfomsg),
+-                                        NULL);
++              err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+               if (err < 0)
+                       return err;
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 4da61c950e931..5c2a73bbfabee 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -166,8 +166,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ int rtnl_delete_link(struct net_device *dev);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+-                      struct netlink_ext_ack *exterr);
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++                           struct netlink_ext_ack *exterr);
+ struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3eaf7c706b0ec..3dfdf83e6e45f 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2034,13 +2034,27 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+       return err;
+ }
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+-                      struct netlink_ext_ack *exterr)
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++                           struct netlink_ext_ack *exterr)
+ {
+-      return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
++      const struct ifinfomsg *ifmp;
++      const struct nlattr *attrs;
++      size_t len;
++
++      ifmp = nla_data(nla_peer);
++      attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
++      len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
++
++      if (ifmp->ifi_index < 0) {
++              NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
++                                  "ifindex can't be negative");
++              return -EINVAL;
++      }
++
++      return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
+                                   exterr);
+ }
+-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+-- 
+2.40.1
+
diff --git a/queue-5.4/octeontx2-af-sdp-fix-receive-link-config.patch b/queue-5.4/octeontx2-af-sdp-fix-receive-link-config.patch
new file mode 100644 (file)
index 0000000..8dc8c9a
--- /dev/null
@@ -0,0 +1,45 @@
+From 5ee3e83c4e1d3b20092887b54554693a53b9455b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 12:00:06 +0530
+Subject: octeontx2-af: SDP: fix receive link config
+
+From: Hariprasad Kelam <hkelam@marvell.com>
+
+[ Upstream commit 05f3d5bc23524bed6f043dfe6b44da687584f9fb ]
+
+On SDP interfaces, frame oversize and undersize errors are
+observed as driver is not considering packet sizes of all
+subscribers of the link before updating the link config.
+
+This patch fixes the same.
+
+Fixes: 9b7dd87ac071 ("octeontx2-af: Support to modify min/max allowed packet lengths")
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Link: https://lore.kernel.org/r/20230817063006.10366-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 4a7609fd6dd07..5bc54ba68c831 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2430,9 +2430,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+       if (link < 0)
+               return NIX_AF_ERR_RX_LINK_INVALID;
+-      nix_find_link_frs(rvu, req, pcifunc);
+ linkcfg:
++      nix_find_link_frs(rvu, req, pcifunc);
++
+       cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+       cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+       if (req->update_minlen)
+-- 
+2.40.1
+
diff --git a/queue-5.4/pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch b/queue-5.4/pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch
new file mode 100644 (file)
index 0000000..6be2995
--- /dev/null
@@ -0,0 +1,83 @@
+From 7bc5dc1090062f5566763705b12c99c947027a90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 21:15:57 +0200
+Subject: PCI: acpiphp: Reassign resources on bridge if necessary
+
+From: Igor Mammedov <imammedo@redhat.com>
+
+[ Upstream commit 40613da52b13fb21c5566f10b287e0ca8c12c4e9 ]
+
+When using ACPI PCI hotplug, hotplugging a device with large BARs may fail
+if bridge windows programmed by firmware are not large enough.
+
+Reproducer:
+  $ qemu-kvm -monitor stdio -M q35  -m 4G \
+      -global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=on \
+      -device id=rp1,pcie-root-port,bus=pcie.0,chassis=4 \
+      disk_image
+
+ wait till linux guest boots, then hotplug device:
+   (qemu) device_add qxl,bus=rp1
+
+ hotplug on guest side fails with:
+   pci 0000:01:00.0: [1b36:0100] type 00 class 0x038000
+   pci 0000:01:00.0: reg 0x10: [mem 0x00000000-0x03ffffff]
+   pci 0000:01:00.0: reg 0x14: [mem 0x00000000-0x03ffffff]
+   pci 0000:01:00.0: reg 0x18: [mem 0x00000000-0x00001fff]
+   pci 0000:01:00.0: reg 0x1c: [io  0x0000-0x001f]
+   pci 0000:01:00.0: BAR 0: no space for [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 0: failed to assign [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 1: no space for [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 1: failed to assign [mem size 0x04000000]
+   pci 0000:01:00.0: BAR 2: assigned [mem 0xfe800000-0xfe801fff]
+   pci 0000:01:00.0: BAR 3: assigned [io  0x1000-0x101f]
+   qxl 0000:01:00.0: enabling device (0000 -> 0003)
+   Unable to create vram_mapping
+   qxl: probe of 0000:01:00.0 failed with error -12
+
+However when using native PCIe hotplug
+  '-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off'
+it works fine, since kernel attempts to reassign unused resources.
+
+Use the same machinery as native PCIe hotplug to (re)assign resources.
+
+Link: https://lore.kernel.org/r/20230424191557.2464760-1-imammedo@redhat.com
+Signed-off-by: Igor Mammedov <imammedo@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Rafael J. Wysocki <rafael@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/hotplug/acpiphp_glue.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 98be06ac2af24..8a0f2bf888536 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -496,7 +496,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+                               acpiphp_native_scan_bridge(dev);
+               }
+       } else {
+-              LIST_HEAD(add_list);
+               int max, pass;
+               acpiphp_rescan_slot(slot);
+@@ -510,12 +509,10 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+                               if (pass && dev->subordinate) {
+                                       check_hotplug_bridge(slot, dev);
+                                       pcibios_resource_survey_bus(dev->subordinate);
+-                                      __pci_bus_size_bridges(dev->subordinate,
+-                                                             &add_list);
+                               }
+                       }
+               }
+-              __pci_bus_assign_resources(bus, &add_list, NULL);
++              pci_assign_unassigned_bridge_resources(bus->self);
+       }
+       acpiphp_sanitize_bus(bus);
+-- 
+2.40.1
+
diff --git a/queue-5.4/regmap-account-for-register-length-in-smbus-i-o-limi.patch b/queue-5.4/regmap-account-for-register-length-in-smbus-i-o-limi.patch
new file mode 100644 (file)
index 0000000..a9b8c34
--- /dev/null
@@ -0,0 +1,59 @@
+From ff58f056c1f3042d9ee2621e3cf583db75cb9df6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jul 2023 12:16:40 +0100
+Subject: regmap: Account for register length in SMBus I/O limits
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 0c9d2eb5e94792fe64019008a04d4df5e57625af ]
+
+The SMBus I2C buses have limits on the size of transfers they can do but
+do not factor in the register length meaning we may try to do a transfer
+longer than our length limit, the core will not take care of this.
+Future changes will factor this out into the core but there are a number
+of users that assume current behaviour so let's just do something
+conservative here.
+
+This does not take account padding bits but practically speaking these
+are very rarely if ever used on I2C buses given that they generally run
+slowly enough to mean there's no issue.
+
+Cc: stable@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Reviewed-by: Xu Yilun <yilun.xu@intel.com>
+Link: https://lore.kernel.org/r/20230712-regmap-max-transfer-v1-2-80e2aed22e83@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap-i2c.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 7f924797be58e..90990c22be41d 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+       .write = regmap_i2c_smbus_i2c_write,
+       .read = regmap_i2c_smbus_i2c_read,
+-      .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+-      .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++      .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
++      .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
+ };
+ static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
+@@ -299,8 +299,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
+ static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
+       .write = regmap_i2c_smbus_i2c_write_reg16,
+       .read = regmap_i2c_smbus_i2c_read_reg16,
+-      .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+-      .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++      .max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
++      .max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
+ };
+ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+-- 
+2.40.1
+
diff --git a/queue-5.4/regmap-i2c-add-16-bit-width-registers-support.patch b/queue-5.4/regmap-i2c-add-16-bit-width-registers-support.patch
new file mode 100644 (file)
index 0000000..3f23716
--- /dev/null
@@ -0,0 +1,118 @@
+From d7cfb93169b14870e3aade2ed9fe4242e4cc2640 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 20:33:58 +0800
+Subject: regmap-i2c: add 16-bit width registers support
+
+From: AceLan Kao <acelan.kao@canonical.com>
+
+[ Upstream commit 82f25bd73c0bee4d29df47007a4f7290695b7db7 ]
+
+This allows to access data with 16-bit width of registers
+via i2c SMBus block functions.
+
+The multi-command sequence of the reading function is not safe
+and may read the wrong data from other address if other commands
+are sent in-between the SMBus commands in the read function.
+
+Read performance:
+   32768 bytes (33 kB, 32 KiB) copied, 11.4869 s, 2.9 kB/s
+Write performance(with 1-byte page):
+   32768 bytes (33 kB, 32 KiB) copied, 129.591 s, 0.3 kB/s
+
+The implementation is inspired by below commit
+https://patchwork.ozlabs.org/patch/545292/
+
+v2: add more descriptions about the issue that maybe introduced
+    by this commit
+
+Signed-off-by: AceLan Kao <acelan.kao@canonical.com>
+Link: https://lore.kernel.org/r/20200424123358.144850-1-acelan.kao@canonical.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 0c9d2eb5e947 ("regmap: Account for register length in SMBus I/O limits")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap-i2c.c | 61 ++++++++++++++++++++++++++++++++
+ 1 file changed, 61 insertions(+)
+
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index ac9b31c57967d..7f924797be58e 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -246,6 +246,63 @@ static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+       .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+ };
++static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
++                                    size_t count)
++{
++      struct device *dev = context;
++      struct i2c_client *i2c = to_i2c_client(dev);
++
++      if (count < 2)
++              return -EINVAL;
++
++      count--;
++      return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
++                                            (u8 *)data + 1);
++}
++
++static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
++                                   size_t reg_size, void *val,
++                                   size_t val_size)
++{
++      struct device *dev = context;
++      struct i2c_client *i2c = to_i2c_client(dev);
++      int ret, count, len = val_size;
++
++      if (reg_size != 2)
++              return -EINVAL;
++
++      ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff,
++                                      ((u16 *)reg)[0] >> 8);
++      if (ret < 0)
++              return ret;
++
++      count = 0;
++      do {
++              /* Current Address Read */
++              ret = i2c_smbus_read_byte(i2c);
++              if (ret < 0)
++                      break;
++
++              *((u8 *)val++) = ret;
++              count++;
++              len--;
++      } while (len > 0);
++
++      if (count == val_size)
++              return 0;
++      else if (ret < 0)
++              return ret;
++      else
++              return -EIO;
++}
++
++static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
++      .write = regmap_i2c_smbus_i2c_write_reg16,
++      .read = regmap_i2c_smbus_i2c_read_reg16,
++      .max_raw_read = I2C_SMBUS_BLOCK_MAX,
++      .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++};
++
+ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+                                       const struct regmap_config *config)
+ {
+@@ -255,6 +312,10 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+                i2c_check_functionality(i2c->adapter,
+                                        I2C_FUNC_SMBUS_I2C_BLOCK))
+               return &regmap_i2c_smbus_i2c_block;
++      else if (config->val_bits == 8 && config->reg_bits == 16 &&
++              i2c_check_functionality(i2c->adapter,
++                                      I2C_FUNC_SMBUS_I2C_BLOCK))
++              return &regmap_i2c_smbus_i2c_block_reg16;
+       else if (config->val_bits == 16 && config->reg_bits == 8 &&
+                i2c_check_functionality(i2c->adapter,
+                                        I2C_FUNC_SMBUS_WORD_DATA))
+-- 
+2.40.1
+
index 203dff3fa78347c16d887e5fbf53d42a65eabf75..004c71c2b56c63f84e6b043b5e3ad011617fd66d 100644 (file)
@@ -94,3 +94,39 @@ net-fix-the-rto-timer-retransmitting-skb-every-1ms-if-linear-option-is-enabled.p
 net-xfrm-amend-xfrma_sec_ctx-nla_policy-structure.patch
 mmc-f-sdh30-fix-order-of-function-calls-in-sdhci_f_sdh30_remove.patch
 net-phy-broadcom-stub-c45-read-write-for-54810.patch
+pci-acpiphp-reassign-resources-on-bridge-if-necessar.patch
+dlm-improve-plock-logging-if-interrupted.patch
+dlm-replace-usage-of-found-with-dedicated-list-itera.patch
+fs-dlm-add-pid-to-debug-log.patch
+fs-dlm-change-plock-interrupted-message-to-debug-aga.patch
+fs-dlm-use-dlm_plock_info-for-do_unlock_close.patch
+fs-dlm-fix-mismatch-of-plock-results-from-userspace.patch
+mips-cpu-features-enable-octeon_cache-by-cpu_type.patch
+mips-cpu-features-use-boot_cpu_type-for-cpu-type-bas.patch
+fbdev-improve-performance-of-sys_imageblit.patch
+fbdev-fix-sys_imageblit-for-arbitrary-image-widths.patch
+fbdev-fix-potential-oob-read-in-fast_imageblit.patch
+dm-integrity-increase-recalc_sectors-to-improve-reca.patch
+dm-integrity-reduce-vmalloc-space-footprint-on-32-bi.patch
+alsa-pcm-set-per-card-upper-limit-of-pcm-buffer-allo.patch
+alsa-pcm-use-sg-buffer-only-when-direct-dma-is-avail.patch
+alsa-pcm-fix-potential-data-race-at-pcm-memory-alloc.patch
+regmap-i2c-add-16-bit-width-registers-support.patch
+regmap-account-for-register-length-in-smbus-i-o-limi.patch
+asoc-fsl_sai-refine-enable-disable-te-re-sequence-in.patch
+asoc-fsl_sai-add-new-added-registers-and-new-bit-def.patch
+asoc-fsl_sai-disable-bit-clock-with-transmitter.patch
+drm-amd-display-do-not-wait-for-mpc-idle-if-tg-is-di.patch
+drm-amd-display-check-tg-is-non-null-before-checking.patch
+tracing-fix-memleak-due-to-race-between-current_trac.patch
+octeontx2-af-sdp-fix-receive-link-config.patch
+sock-annotate-data-races-around-prot-memory_pressure.patch
+dccp-annotate-data-races-in-dccp_poll.patch
+ipvlan-fix-a-reference-count-leak-warning-in-ipvlan_.patch
+net-bgmac-fix-return-value-check-for-fixed_phy_regis.patch
+net-bcmgenet-fix-return-value-check-for-fixed_phy_re.patch
+net-validate-veth-and-vxcan-peer-ifindexes.patch
+igb-avoid-starting-unnecessary-workqueues.patch
+net-sched-fix-a-qdisc-modification-with-ambiguous-co.patch
+net-remove-bond_slave_has_mac_rcu.patch
+bonding-fix-macvlan-over-alb-bond-support.patch
diff --git a/queue-5.4/sock-annotate-data-races-around-prot-memory_pressure.patch b/queue-5.4/sock-annotate-data-races-around-prot-memory_pressure.patch
new file mode 100644 (file)
index 0000000..8692b5d
--- /dev/null
@@ -0,0 +1,82 @@
+From 6e12e5aa992d6078fdc8b3b8203bc3d97bb4e22d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Aug 2023 01:51:32 +0000
+Subject: sock: annotate data-races around prot->memory_pressure
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 76f33296d2e09f63118db78125c95ef56df438e9 ]
+
+*prot->memory_pressure is read/writen locklessly, we need
+to add proper annotations.
+
+A recent commit added a new race, it is time to audit all accesses.
+
+Fixes: 2d0c88e84e48 ("sock: Fix misuse of sk_under_memory_pressure()")
+Fixes: 4d93df0abd50 ("[SCTP]: Rewrite of sctp buffer management code")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Abel Wu <wuyun.abel@bytedance.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Link: https://lore.kernel.org/r/20230818015132.2699348-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 7 ++++---
+ net/sctp/socket.c  | 2 +-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 61f5872aac24f..f73ef7087a187 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1161,6 +1161,7 @@ struct proto {
+       /*
+        * Pressure flag: try to collapse.
+        * Technical note: it is used by multiple contexts non atomically.
++       * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+        * All the __sk_mem_schedule() is of this nature: accounting
+        * is strict, actions are advisory and have some latency.
+        */
+@@ -1277,7 +1278,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+ {
+       return sk->sk_prot->memory_pressure &&
+-              !!*sk->sk_prot->memory_pressure;
++              !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+@@ -1289,7 +1290,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+           mem_cgroup_under_socket_pressure(sk->sk_memcg))
+               return true;
+-      return !!*sk->sk_prot->memory_pressure;
++      return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+ static inline long
+@@ -1343,7 +1344,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+       if (!prot->memory_pressure)
+               return false;
+-      return !!*prot->memory_pressure;
++      return !!READ_ONCE(*prot->memory_pressure);
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 7cff1a031f761..431b9399a781f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated;
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+-      sctp_memory_pressure = 1;
++      WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+-- 
+2.40.1
+
diff --git a/queue-5.4/tracing-fix-memleak-due-to-race-between-current_trac.patch b/queue-5.4/tracing-fix-memleak-due-to-race-between-current_trac.patch
new file mode 100644 (file)
index 0000000..81e1610
--- /dev/null
@@ -0,0 +1,122 @@
+From 8f83e4fb04f23b25d8d8eb757bd9368c297ef169 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Aug 2023 20:55:39 +0800
+Subject: tracing: Fix memleak due to race between current_tracer and trace
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit eecb91b9f98d6427d4af5fdb8f108f52572a39e7 ]
+
+Kmemleak report a leak in graph_trace_open():
+
+  unreferenced object 0xffff0040b95f4a00 (size 128):
+    comm "cat", pid 204981, jiffies 4301155872 (age 99771.964s)
+    hex dump (first 32 bytes):
+      e0 05 e7 b4 ab 7d 00 00 0b 00 01 00 00 00 00 00 .....}..........
+      f4 00 01 10 00 a0 ff ff 00 00 00 00 65 00 10 00 ............e...
+    backtrace:
+      [<000000005db27c8b>] kmem_cache_alloc_trace+0x348/0x5f0
+      [<000000007df90faa>] graph_trace_open+0xb0/0x344
+      [<00000000737524cd>] __tracing_open+0x450/0xb10
+      [<0000000098043327>] tracing_open+0x1a0/0x2a0
+      [<00000000291c3876>] do_dentry_open+0x3c0/0xdc0
+      [<000000004015bcd6>] vfs_open+0x98/0xd0
+      [<000000002b5f60c9>] do_open+0x520/0x8d0
+      [<00000000376c7820>] path_openat+0x1c0/0x3e0
+      [<00000000336a54b5>] do_filp_open+0x14c/0x324
+      [<000000002802df13>] do_sys_openat2+0x2c4/0x530
+      [<0000000094eea458>] __arm64_sys_openat+0x130/0x1c4
+      [<00000000a71d7881>] el0_svc_common.constprop.0+0xfc/0x394
+      [<00000000313647bf>] do_el0_svc+0xac/0xec
+      [<000000002ef1c651>] el0_svc+0x20/0x30
+      [<000000002fd4692a>] el0_sync_handler+0xb0/0xb4
+      [<000000000c309c35>] el0_sync+0x160/0x180
+
+The root cause is descripted as follows:
+
+  __tracing_open() {  // 1. File 'trace' is being opened;
+    ...
+    *iter->trace = *tr->current_trace;  // 2. Tracer 'function_graph' is
+                                        //    currently set;
+    ...
+    iter->trace->open(iter);  // 3. Call graph_trace_open() here,
+                              //    and memory are allocated in it;
+    ...
+  }
+
+  s_start() {  // 4. The opened file is being read;
+    ...
+    *iter->trace = *tr->current_trace;  // 5. If tracer is switched to
+                                        //    'nop' or others, then memory
+                                        //    in step 3 are leaked!!!
+    ...
+  }
+
+To fix it, in s_start(), close tracer before switching then reopen the
+new tracer after switching. And some tracers like 'wakeup' may not update
+'iter->private' in some cases when reopen, then it should be cleared
+to avoid being mistakenly closed again.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230817125539.1646321-1-zhengyejian1@huawei.com
+
+Fixes: d7350c3f4569 ("tracing/core: make the read callbacks reentrants")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c              | 9 ++++++++-
+ kernel/trace/trace_irqsoff.c      | 3 ++-
+ kernel/trace/trace_sched_wakeup.c | 2 ++
+ 3 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 8006592803e1c..ad0ee4de92485 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3499,8 +3499,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+        * will point to the same string as current_trace->name.
+        */
+       mutex_lock(&trace_types_lock);
+-      if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++      if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++              /* Close iter->trace before switching to the new current tracer */
++              if (iter->trace->close)
++                      iter->trace->close(iter);
+               *iter->trace = *tr->current_trace;
++              /* Reopen the new current tracer */
++              if (iter->trace->open)
++                      iter->trace->open(iter);
++      }
+       mutex_unlock(&trace_types_lock);
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index a745b0cee5d32..07557904dab8a 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -228,7 +228,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+       if (is_graph(iter->tr))
+               graph_trace_open(iter);
+-
++      else
++              iter->private = NULL;
+ }
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 617e297f46dcc..7b2d8f776ae25 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -171,6 +171,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+       if (is_graph(iter->tr))
+               graph_trace_open(iter);
++      else
++              iter->private = NULL;
+ }
+ static void wakeup_trace_close(struct trace_iterator *iter)
+-- 
+2.40.1
+