--- /dev/null
+From foo@baz Tue May 10 01:32:04 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Fri, 6 May 2022 12:10:13 +0300
+Subject: ALSA: pcm: Fix potential AB/BA lock with buffer_mutex and mmap_lock
+To: stable@vger.kernel.org
+Cc: tiwai@suse.de, perex@perex.cz, kirin.say@gmail.com
+Message-ID: <20220506091013.1746159-6-ovidiu.panait@windriver.com>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit bc55cfd5718c7c23e5524582e9fa70b4d10f2433 upstream.
+
+syzbot caught a potential deadlock between the PCM
+runtime->buffer_mutex and the mm->mmap_lock. It was brought by the
+recent fix to cover the racy read/write and other ioctls, and in that
+commit, I overlooked a (hopefully only) corner case that may take the
+revert lock, namely, the OSS mmap. The OSS mmap operation
+exceptionally allows to re-configure the parameters inside the OSS
+mmap syscall, where mm->mmap_mutex is already held. Meanwhile, the
+copy_from/to_user calls at read/write operations also take the
+mm->mmap_lock internally, hence it may lead to a AB/BA deadlock.
+
+A similar problem was already seen in the past and we fixed it with a
+refcount (in commit b248371628aa). The former fix covered only the
+call paths with OSS read/write and OSS ioctls, while we need to cover
+the concurrent access via both ALSA and OSS APIs now.
+
+This patch addresses the problem above by replacing the buffer_mutex
+lock in the read/write operations with a refcount similar as we've
+used for OSS. The new field, runtime->buffer_accessing, keeps the
+number of concurrent read/write operations. Unlike the former
+buffer_mutex protection, this protects only around the
+copy_from/to_user() calls; the other codes are basically protected by
+the PCM stream lock. The refcount can be a negative, meaning blocked
+by the ioctls. If a negative value is seen, the read/write aborts
+with -EBUSY. In the ioctl side, OTOH, they check this refcount, too,
+and set to a negative value for blocking unless it's already being
+accessed.
+
+Reported-by: syzbot+6e5c88838328e99c7e1c@syzkaller.appspotmail.com
+Fixes: dca947d4d26d ("ALSA: pcm: Fix races among concurrent read/write and buffer changes")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/000000000000381a0d05db622a81@google.com
+Link: https://lore.kernel.org/r/20220330120903.4738-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+[OP: backport to 5.4: adjusted context]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/sound/pcm.h | 1 +
+ sound/core/pcm.c | 1 +
+ sound/core/pcm_lib.c | 9 +++++----
+ sound/core/pcm_native.c | 39 ++++++++++++++++++++++++++++++++-------
+ 4 files changed, 39 insertions(+), 11 deletions(-)
+
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -396,6 +396,7 @@ struct snd_pcm_runtime {
+ wait_queue_head_t tsleep; /* transfer sleep */
+ struct fasync_struct *fasync;
+ struct mutex buffer_mutex; /* protect for buffer changes */
++ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
+
+ /* -- private section -- */
+ void *private_data;
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_
+
+ runtime->status->state = SNDRV_PCM_STATE_OPEN;
+ mutex_init(&runtime->buffer_mutex);
++ atomic_set(&runtime->buffer_accessing, 0);
+
+ substream->runtime = runtime;
+ substream->private_data = pcm->private_data;
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1861,11 +1861,9 @@ static int wait_for_avail(struct snd_pcm
+ if (avail >= runtime->twake)
+ break;
+ snd_pcm_stream_unlock_irq(substream);
+- mutex_unlock(&runtime->buffer_mutex);
+
+ tout = schedule_timeout(wait_time);
+
+- mutex_lock(&runtime->buffer_mutex);
+ snd_pcm_stream_lock_irq(substream);
+ set_current_state(TASK_INTERRUPTIBLE);
+ switch (runtime->status->state) {
+@@ -2159,7 +2157,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(str
+
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+
+- mutex_lock(&runtime->buffer_mutex);
+ snd_pcm_stream_lock_irq(substream);
+ err = pcm_accessible_state(runtime);
+ if (err < 0)
+@@ -2214,10 +2211,15 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(str
+ err = -EINVAL;
+ goto _end_unlock;
+ }
++ if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
++ err = -EBUSY;
++ goto _end_unlock;
++ }
+ snd_pcm_stream_unlock_irq(substream);
+ err = writer(substream, appl_ofs, data, offset, frames,
+ transfer);
+ snd_pcm_stream_lock_irq(substream);
++ atomic_dec(&runtime->buffer_accessing);
+ if (err < 0)
+ goto _end_unlock;
+ err = pcm_accessible_state(runtime);
+@@ -2247,7 +2249,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(str
+ if (xfer > 0 && err >= 0)
+ snd_pcm_update_state(substream, runtime);
+ snd_pcm_stream_unlock_irq(substream);
+- mutex_unlock(&runtime->buffer_mutex);
+ return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
+ }
+ EXPORT_SYMBOL(__snd_pcm_lib_xfer);
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -630,6 +630,24 @@ static int snd_pcm_hw_params_choose(stru
+ return 0;
+ }
+
++/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
++ * block the further r/w operations
++ */
++static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
++{
++ if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
++ return -EBUSY;
++ mutex_lock(&runtime->buffer_mutex);
++ return 0; /* keep buffer_mutex, unlocked by below */
++}
++
++/* release buffer_mutex and clear r/w access flag */
++static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
++{
++ mutex_unlock(&runtime->buffer_mutex);
++ atomic_inc(&runtime->buffer_accessing);
++}
++
+ #if IS_ENABLED(CONFIG_SND_PCM_OSS)
+ #define is_oss_stream(substream) ((substream)->oss.oss)
+ #else
+@@ -640,14 +658,16 @@ static int snd_pcm_hw_params(struct snd_
+ struct snd_pcm_hw_params *params)
+ {
+ struct snd_pcm_runtime *runtime;
+- int err = 0, usecs;
++ int err, usecs;
+ unsigned int bits;
+ snd_pcm_uframes_t frames;
+
+ if (PCM_RUNTIME_CHECK(substream))
+ return -ENXIO;
+ runtime = substream->runtime;
+- mutex_lock(&runtime->buffer_mutex);
++ err = snd_pcm_buffer_access_lock(runtime);
++ if (err < 0)
++ return err;
+ snd_pcm_stream_lock_irq(substream);
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_OPEN:
+@@ -752,7 +772,7 @@ static int snd_pcm_hw_params(struct snd_
+ substream->ops->hw_free(substream);
+ }
+ unlock:
+- mutex_unlock(&runtime->buffer_mutex);
++ snd_pcm_buffer_access_unlock(runtime);
+ return err;
+ }
+
+@@ -785,7 +805,9 @@ static int snd_pcm_hw_free(struct snd_pc
+ if (PCM_RUNTIME_CHECK(substream))
+ return -ENXIO;
+ runtime = substream->runtime;
+- mutex_lock(&runtime->buffer_mutex);
++ result = snd_pcm_buffer_access_lock(runtime);
++ if (result < 0)
++ return result;
+ snd_pcm_stream_lock_irq(substream);
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_SETUP:
+@@ -805,7 +827,7 @@ static int snd_pcm_hw_free(struct snd_pc
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ unlock:
+- mutex_unlock(&runtime->buffer_mutex);
++ snd_pcm_buffer_access_unlock(runtime);
+ return result;
+ }
+
+@@ -1221,12 +1243,15 @@ static int snd_pcm_action_nonatomic(cons
+
+ /* Guarantee the group members won't change during non-atomic action */
+ down_read(&snd_pcm_link_rwsem);
+- mutex_lock(&substream->runtime->buffer_mutex);
++ res = snd_pcm_buffer_access_lock(substream->runtime);
++ if (res < 0)
++ goto unlock;
+ if (snd_pcm_stream_linked(substream))
+ res = snd_pcm_action_group(ops, substream, state, 0);
+ else
+ res = snd_pcm_action_single(ops, substream, state);
+- mutex_unlock(&substream->runtime->buffer_mutex);
++ snd_pcm_buffer_access_unlock(substream->runtime);
++ unlock:
+ up_read(&snd_pcm_link_rwsem);
+ return res;
+ }
--- /dev/null
+From foo@baz Tue May 10 01:32:04 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Fri, 6 May 2022 12:10:09 +0300
+Subject: ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
+To: stable@vger.kernel.org
+Cc: tiwai@suse.de, perex@perex.cz, kirin.say@gmail.com
+Message-ID: <20220506091013.1746159-2-ovidiu.panait@windriver.com>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 92ee3c60ec9fe64404dc035e7c41277d74aa26cb upstream.
+
+Currently we have neither proper check nor protection against the
+concurrent calls of PCM hw_params and hw_free ioctls, which may result
+in a UAF. Since the existing PCM stream lock can't be used for
+protecting the whole ioctl operations, we need a new mutex to protect
+those racy calls.
+
+This patch introduced a new mutex, runtime->buffer_mutex, and applies
+it to both hw_params and hw_free ioctl code paths. Along with it, the
+both functions are slightly modified (the mmap_count check is moved
+into the state-check block) for code simplicity.
+
+Reported-by: Hu Jiahui <kirin.say@gmail.com>
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Link: https://lore.kernel.org/r/20220322170720.3529-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+[OP: backport to 5.4: adjusted context]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/sound/pcm.h | 1
+ sound/core/pcm.c | 2 +
+ sound/core/pcm_native.c | 55 +++++++++++++++++++++++++++++++-----------------
+ 3 files changed, 39 insertions(+), 19 deletions(-)
+
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -395,6 +395,7 @@ struct snd_pcm_runtime {
+ wait_queue_head_t sleep; /* poll sleep */
+ wait_queue_head_t tsleep; /* transfer sleep */
+ struct fasync_struct *fasync;
++ struct mutex buffer_mutex; /* protect for buffer changes */
+
+ /* -- private section -- */
+ void *private_data;
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -969,6 +969,7 @@ int snd_pcm_attach_substream(struct snd_
+ init_waitqueue_head(&runtime->tsleep);
+
+ runtime->status->state = SNDRV_PCM_STATE_OPEN;
++ mutex_init(&runtime->buffer_mutex);
+
+ substream->runtime = runtime;
+ substream->private_data = pcm->private_data;
+@@ -1000,6 +1001,7 @@ void snd_pcm_detach_substream(struct snd
+ substream->runtime = NULL;
+ if (substream->timer)
+ spin_unlock_irq(&substream->timer->lock);
++ mutex_destroy(&runtime->buffer_mutex);
+ kfree(runtime);
+ put_pid(substream->pid);
+ substream->pid = NULL;
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -630,33 +630,40 @@ static int snd_pcm_hw_params_choose(stru
+ return 0;
+ }
+
++#if IS_ENABLED(CONFIG_SND_PCM_OSS)
++#define is_oss_stream(substream) ((substream)->oss.oss)
++#else
++#define is_oss_stream(substream) false
++#endif
++
+ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ struct snd_pcm_runtime *runtime;
+- int err, usecs;
++ int err = 0, usecs;
+ unsigned int bits;
+ snd_pcm_uframes_t frames;
+
+ if (PCM_RUNTIME_CHECK(substream))
+ return -ENXIO;
+ runtime = substream->runtime;
++ mutex_lock(&runtime->buffer_mutex);
+ snd_pcm_stream_lock_irq(substream);
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
++ if (!is_oss_stream(substream) &&
++ atomic_read(&substream->mmap_count))
++ err = -EBADFD;
+ break;
+ default:
+- snd_pcm_stream_unlock_irq(substream);
+- return -EBADFD;
++ err = -EBADFD;
++ break;
+ }
+ snd_pcm_stream_unlock_irq(substream);
+-#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+- if (!substream->oss.oss)
+-#endif
+- if (atomic_read(&substream->mmap_count))
+- return -EBADFD;
++ if (err)
++ goto unlock;
+
+ params->rmask = ~0U;
+ err = snd_pcm_hw_refine(substream, params);
+@@ -733,14 +740,19 @@ static int snd_pcm_hw_params(struct snd_
+ if ((usecs = period_to_usecs(runtime)) >= 0)
+ pm_qos_add_request(&substream->latency_pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY, usecs);
+- return 0;
++ err = 0;
+ _error:
+- /* hardware might be unusable from this time,
+- so we force application to retry to set
+- the correct hardware parameter settings */
+- snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+- if (substream->ops->hw_free != NULL)
+- substream->ops->hw_free(substream);
++ if (err) {
++ /* hardware might be unusable from this time,
++ * so we force application to retry to set
++ * the correct hardware parameter settings
++ */
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
++ if (substream->ops->hw_free != NULL)
++ substream->ops->hw_free(substream);
++ }
++ unlock:
++ mutex_unlock(&runtime->buffer_mutex);
+ return err;
+ }
+
+@@ -773,22 +785,27 @@ static int snd_pcm_hw_free(struct snd_pc
+ if (PCM_RUNTIME_CHECK(substream))
+ return -ENXIO;
+ runtime = substream->runtime;
++ mutex_lock(&runtime->buffer_mutex);
+ snd_pcm_stream_lock_irq(substream);
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
++ if (atomic_read(&substream->mmap_count))
++ result = -EBADFD;
+ break;
+ default:
+- snd_pcm_stream_unlock_irq(substream);
+- return -EBADFD;
++ result = -EBADFD;
++ break;
+ }
+ snd_pcm_stream_unlock_irq(substream);
+- if (atomic_read(&substream->mmap_count))
+- return -EBADFD;
++ if (result)
++ goto unlock;
+ if (substream->ops->hw_free)
+ result = substream->ops->hw_free(substream);
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
++ unlock:
++ mutex_unlock(&runtime->buffer_mutex);
+ return result;
+ }
+
--- /dev/null
+From foo@baz Tue May 10 01:32:04 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Fri, 6 May 2022 12:10:12 +0300
+Subject: ALSA: pcm: Fix races among concurrent prealloc proc writes
+To: stable@vger.kernel.org
+Cc: tiwai@suse.de, perex@perex.cz, kirin.say@gmail.com
+Message-ID: <20220506091013.1746159-5-ovidiu.panait@windriver.com>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 69534c48ba8ce552ce383b3dfdb271ffe51820c3 upstream.
+
+We have no protection against concurrent PCM buffer preallocation
+changes via proc files, and it may potentially lead to UAF or some
+weird problem. This patch applies the PCM open_mutex to the proc
+write operation for avoiding the racy proc writes and the PCM stream
+open (and further operations).
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Link: https://lore.kernel.org/r/20220322170720.3529-5-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+[OP: backport to 5.4: adjusted context]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/pcm_memory.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -133,19 +133,20 @@ static void snd_pcm_lib_preallocate_proc
+ size_t size;
+ struct snd_dma_buffer new_dmab;
+
++ mutex_lock(&substream->pcm->open_mutex);
+ if (substream->runtime) {
+ buffer->error = -EBUSY;
+- return;
++ goto unlock;
+ }
+ if (!snd_info_get_line(buffer, line, sizeof(line))) {
+ snd_info_get_str(str, line, sizeof(str));
+ size = simple_strtoul(str, NULL, 10) * 1024;
+ if ((size != 0 && size < 8192) || size > substream->dma_max) {
+ buffer->error = -EINVAL;
+- return;
++ goto unlock;
+ }
+ if (substream->dma_buffer.bytes == size)
+- return;
++ goto unlock;
+ memset(&new_dmab, 0, sizeof(new_dmab));
+ new_dmab.dev = substream->dma_buffer.dev;
+ if (size > 0) {
+@@ -153,7 +154,7 @@ static void snd_pcm_lib_preallocate_proc
+ substream->dma_buffer.dev.dev,
+ size, &new_dmab) < 0) {
+ buffer->error = -ENOMEM;
+- return;
++ goto unlock;
+ }
+ substream->buffer_bytes_max = size;
+ } else {
+@@ -165,6 +166,8 @@ static void snd_pcm_lib_preallocate_proc
+ } else {
+ buffer->error = -EINVAL;
+ }
++ unlock:
++ mutex_unlock(&substream->pcm->open_mutex);
+ }
+
+ static inline void preallocate_info_init(struct snd_pcm_substream *substream)
--- /dev/null
+From foo@baz Tue May 10 01:32:04 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Fri, 6 May 2022 12:10:11 +0300
+Subject: ALSA: pcm: Fix races among concurrent prepare and hw_params/hw_free calls
+To: stable@vger.kernel.org
+Cc: tiwai@suse.de, perex@perex.cz, kirin.say@gmail.com
+Message-ID: <20220506091013.1746159-4-ovidiu.panait@windriver.com>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 3c3201f8c7bb77eb53b08a3ca8d9a4ddc500b4c0 upstream.
+
+Like the previous fixes to hw_params and hw_free ioctl races, we need
+to paper over the concurrent prepare ioctl calls against hw_params and
+hw_free, too.
+
+This patch implements the locking with the existing
+runtime->buffer_mutex for prepare ioctls. Unlike the previous case
+for snd_pcm_hw_hw_params() and snd_pcm_hw_free(), snd_pcm_prepare() is
+performed to the linked streams, hence the lock can't be applied
+simply on the top. For tracking the lock in each linked substream, we
+modify snd_pcm_action_group() slightly and apply the buffer_mutex for
+the case stream_lock=false (formerly there was no lock applied)
+there.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Link: https://lore.kernel.org/r/20220322170720.3529-4-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+[OP: backport to 5.4: adjusted context]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/pcm_native.c | 32 ++++++++++++++++++--------------
+ 1 file changed, 18 insertions(+), 14 deletions(-)
+
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1042,15 +1042,17 @@ struct action_ops {
+ */
+ static int snd_pcm_action_group(const struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+- int state, int do_lock)
++ int state, int stream_lock)
+ {
+ struct snd_pcm_substream *s = NULL;
+ struct snd_pcm_substream *s1;
+ int res = 0, depth = 1;
+
+ snd_pcm_group_for_each_entry(s, substream) {
+- if (do_lock && s != substream) {
+- if (s->pcm->nonatomic)
++ if (s != substream) {
++ if (!stream_lock)
++ mutex_lock_nested(&s->runtime->buffer_mutex, depth);
++ else if (s->pcm->nonatomic)
+ mutex_lock_nested(&s->self_group.mutex, depth);
+ else
+ spin_lock_nested(&s->self_group.lock, depth);
+@@ -1078,18 +1080,18 @@ static int snd_pcm_action_group(const st
+ ops->post_action(s, state);
+ }
+ _unlock:
+- if (do_lock) {
+- /* unlock streams */
+- snd_pcm_group_for_each_entry(s1, substream) {
+- if (s1 != substream) {
+- if (s1->pcm->nonatomic)
+- mutex_unlock(&s1->self_group.mutex);
+- else
+- spin_unlock(&s1->self_group.lock);
+- }
+- if (s1 == s) /* end */
+- break;
++ /* unlock streams */
++ snd_pcm_group_for_each_entry(s1, substream) {
++ if (s1 != substream) {
++ if (!stream_lock)
++ mutex_unlock(&s1->runtime->buffer_mutex);
++ else if (s1->pcm->nonatomic)
++ mutex_unlock(&s1->self_group.mutex);
++ else
++ spin_unlock(&s1->self_group.lock);
+ }
++ if (s1 == s) /* end */
++ break;
+ }
+ return res;
+ }
+@@ -1219,10 +1221,12 @@ static int snd_pcm_action_nonatomic(cons
+
+ /* Guarantee the group members won't change during non-atomic action */
+ down_read(&snd_pcm_link_rwsem);
++ mutex_lock(&substream->runtime->buffer_mutex);
+ if (snd_pcm_stream_linked(substream))
+ res = snd_pcm_action_group(ops, substream, state, 0);
+ else
+ res = snd_pcm_action_single(ops, substream, state);
++ mutex_unlock(&substream->runtime->buffer_mutex);
+ up_read(&snd_pcm_link_rwsem);
+ return res;
+ }
--- /dev/null
+From foo@baz Tue May 10 01:32:04 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Fri, 6 May 2022 12:10:10 +0300
+Subject: ALSA: pcm: Fix races among concurrent read/write and buffer changes
+To: stable@vger.kernel.org
+Cc: tiwai@suse.de, perex@perex.cz, kirin.say@gmail.com
+Message-ID: <20220506091013.1746159-3-ovidiu.panait@windriver.com>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit dca947d4d26dbf925a64a6cfb2ddbc035e831a3d upstream.
+
+In the current PCM design, the read/write syscalls (as well as the
+equivalent ioctls) are allowed before the PCM stream is running, that
+is, at PCM PREPARED state. Meanwhile, we also allow to re-issue
+hw_params and hw_free ioctl calls at the PREPARED state that may
+change or free the buffers, too. The problem is that there is no
+protection against those mix-ups.
+
+This patch applies the previously introduced runtime->buffer_mutex to
+the read/write operations so that the concurrent hw_params or hw_free
+call can no longer interfere during the operation. The mutex is
+unlocked before scheduling, so we don't take it too long.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Jaroslav Kysela <perex@perex.cz>
+Link: https://lore.kernel.org/r/20220322170720.3529-3-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/pcm_lib.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1861,9 +1861,11 @@ static int wait_for_avail(struct snd_pcm
+ if (avail >= runtime->twake)
+ break;
+ snd_pcm_stream_unlock_irq(substream);
++ mutex_unlock(&runtime->buffer_mutex);
+
+ tout = schedule_timeout(wait_time);
+
++ mutex_lock(&runtime->buffer_mutex);
+ snd_pcm_stream_lock_irq(substream);
+ set_current_state(TASK_INTERRUPTIBLE);
+ switch (runtime->status->state) {
+@@ -2157,6 +2159,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(str
+
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+
++ mutex_lock(&runtime->buffer_mutex);
+ snd_pcm_stream_lock_irq(substream);
+ err = pcm_accessible_state(runtime);
+ if (err < 0)
+@@ -2244,6 +2247,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(str
+ if (xfer > 0 && err >= 0)
+ snd_pcm_update_state(substream, runtime);
+ snd_pcm_stream_unlock_irq(substream);
++ mutex_unlock(&runtime->buffer_mutex);
+ return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
+ }
+ EXPORT_SYMBOL(__snd_pcm_lib_xfer);
net-ipv6-ensure-we-call-ipv6_mc_down-at-most-once.patch
block-map-add-__gfp_zero-flag-for-alloc_page-in-function-bio_copy_kern.patch
mm-fix-unexpected-zeroed-page-mapping-with-zram-swap.patch
+alsa-pcm-fix-races-among-concurrent-hw_params-and-hw_free-calls.patch
+alsa-pcm-fix-races-among-concurrent-read-write-and-buffer-changes.patch
+alsa-pcm-fix-races-among-concurrent-prepare-and-hw_params-hw_free-calls.patch
+alsa-pcm-fix-races-among-concurrent-prealloc-proc-writes.patch
+alsa-pcm-fix-potential-ab-ba-lock-with-buffer_mutex-and-mmap_lock.patch
+tcp-make-sure-treq-af_specific-is-initialized.patch
--- /dev/null
+From ba5a4fdd63ae0c575707030db0b634b160baddd7 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 24 Apr 2022 13:35:09 -0700
+Subject: tcp: make sure treq->af_specific is initialized
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit ba5a4fdd63ae0c575707030db0b634b160baddd7 upstream.
+
+syzbot complained about a recent change in TCP stack,
+hitting a NULL pointer [1]
+
+tcp request sockets have an af_specific pointer, which
+was used before the blamed change only for SYNACK generation
+in non SYNCOOKIE mode.
+
+tcp requests sockets momentarily created when third packet
+coming from client in SYNCOOKIE mode were not using
+treq->af_specific.
+
+Make sure this field is populated, in the same way normal
+TCP requests sockets do in tcp_conn_request().
+
+[1]
+TCP: request_sock_TCPv6: Possible SYN flooding on port 20002. Sending cookies. Check SNMP counters.
+general protection fault, probably for non-canonical address 0xdffffc0000000001: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000008-0x000000000000000f]
+CPU: 1 PID: 3695 Comm: syz-executor864 Not tainted 5.18.0-rc3-syzkaller-00224-g5fd1fe4807f9 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:tcp_create_openreq_child+0xe16/0x16b0 net/ipv4/tcp_minisocks.c:534
+Code: 48 c1 ea 03 80 3c 02 00 0f 85 e5 07 00 00 4c 8b b3 28 01 00 00 48 b8 00 00 00 00 00 fc ff df 49 8d 7e 08 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 c9 07 00 00 48 8b 3c 24 48 89 de 41 ff 56 08 48
+RSP: 0018:ffffc90000de0588 EFLAGS: 00010202
+RAX: dffffc0000000000 RBX: ffff888076490330 RCX: 0000000000000100
+RDX: 0000000000000001 RSI: ffffffff87d67ff0 RDI: 0000000000000008
+RBP: ffff88806ee1c7f8 R08: 0000000000000000 R09: 0000000000000000
+R10: ffffffff87d67f00 R11: 0000000000000000 R12: ffff88806ee1bfc0
+R13: ffff88801b0e0368 R14: 0000000000000000 R15: 0000000000000000
+FS: 00007f517fe58700(0000) GS:ffff8880b9d00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007ffcead76960 CR3: 000000006f97b000 CR4: 00000000003506e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <IRQ>
+ tcp_v6_syn_recv_sock+0x199/0x23b0 net/ipv6/tcp_ipv6.c:1267
+ tcp_get_cookie_sock+0xc9/0x850 net/ipv4/syncookies.c:207
+ cookie_v6_check+0x15c3/0x2340 net/ipv6/syncookies.c:258
+ tcp_v6_cookie_check net/ipv6/tcp_ipv6.c:1131 [inline]
+ tcp_v6_do_rcv+0x1148/0x13b0 net/ipv6/tcp_ipv6.c:1486
+ tcp_v6_rcv+0x3305/0x3840 net/ipv6/tcp_ipv6.c:1725
+ ip6_protocol_deliver_rcu+0x2e9/0x1900 net/ipv6/ip6_input.c:422
+ ip6_input_finish+0x14c/0x2c0 net/ipv6/ip6_input.c:464
+ NF_HOOK include/linux/netfilter.h:307 [inline]
+ NF_HOOK include/linux/netfilter.h:301 [inline]
+ ip6_input+0x9c/0xd0 net/ipv6/ip6_input.c:473
+ dst_input include/net/dst.h:461 [inline]
+ ip6_rcv_finish net/ipv6/ip6_input.c:76 [inline]
+ NF_HOOK include/linux/netfilter.h:307 [inline]
+ NF_HOOK include/linux/netfilter.h:301 [inline]
+ ipv6_rcv+0x27f/0x3b0 net/ipv6/ip6_input.c:297
+ __netif_receive_skb_one_core+0x114/0x180 net/core/dev.c:5405
+ __netif_receive_skb+0x24/0x1b0 net/core/dev.c:5519
+ process_backlog+0x3a0/0x7c0 net/core/dev.c:5847
+ __napi_poll+0xb3/0x6e0 net/core/dev.c:6413
+ napi_poll net/core/dev.c:6480 [inline]
+ net_rx_action+0x8ec/0xc60 net/core/dev.c:6567
+ __do_softirq+0x29b/0x9c2 kernel/softirq.c:558
+ invoke_softirq kernel/softirq.c:432 [inline]
+ __irq_exit_rcu+0x123/0x180 kernel/softirq.c:637
+ irq_exit_rcu+0x5/0x20 kernel/softirq.c:649
+ sysvec_apic_timer_interrupt+0x93/0xc0 arch/x86/kernel/apic/apic.c:1097
+
+Fixes: 5b0b9e4c2c89 ("tcp: md5: incorrect tcp_header_len for incoming connections")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Francesco Ruggeri <fruggeri@arista.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[fruggeri: Account for backport conflicts from 35b2c3211609 and 6fc8c827dd4f]
+Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tcp.h | 5 +++++
+ net/ipv4/syncookies.c | 1 +
+ net/ipv4/tcp_ipv4.c | 2 +-
+ net/ipv6/syncookies.c | 1 +
+ net/ipv6/tcp_ipv6.c | 2 +-
+ 5 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2015,6 +2015,11 @@ struct tcp_request_sock_ops {
+ enum tcp_synack_type synack_type);
+ };
+
++extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
++#if IS_ENABLED(CONFIG_IPV6)
++extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
++#endif
++
+ #ifdef CONFIG_SYN_COOKIES
+ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+ const struct sock *sk, struct sk_buff *skb,
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock
+
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
++ treq->af_specific = &tcp_request_sock_ipv4_ops;
+ treq->rcv_isn = ntohl(th->seq) - 1;
+ treq->snt_isn = cookie;
+ treq->ts_off = 0;
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1383,7 +1383,7 @@ struct request_sock_ops tcp_request_sock
+ .syn_ack_timeout = tcp_syn_ack_timeout,
+ };
+
+-static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
++const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+ .mss_clamp = TCP_MSS_DEFAULT,
+ #ifdef CONFIG_TCP_MD5SIG
+ .req_md5_lookup = tcp_v4_md5_lookup,
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -176,6 +176,7 @@ struct sock *cookie_v6_check(struct sock
+
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
++ treq->af_specific = &tcp_request_sock_ipv6_ops;
+ treq->tfo_listener = false;
+
+ if (security_inet_conn_request(sk, skb, req))
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -800,7 +800,7 @@ struct request_sock_ops tcp6_request_soc
+ .syn_ack_timeout = tcp_syn_ack_timeout,
+ };
+
+-static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
++const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+ .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
+ sizeof(struct ipv6hdr),
+ #ifdef CONFIG_TCP_MD5SIG