--- /dev/null
+From fb294b1c0ba982144ca467a75e7d01ff26304e2b Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 6 Mar 2023 11:17:58 -0500
+Subject: dm crypt: add cond_resched() to dmcrypt_write()
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit fb294b1c0ba982144ca467a75e7d01ff26304e2b upstream.
+
+The loop in dmcrypt_write may be running for unbounded amount of time,
+thus we need cond_resched() in it.
+
+This commit fixes the following warning:
+
+[ 3391.153255][ C12] watchdog: BUG: soft lockup - CPU#12 stuck for 23s! [dmcrypt_write/2:2897]
+...
+[ 3391.387210][ C12] Call trace:
+[ 3391.390338][ C12] blk_attempt_bio_merge.part.6+0x38/0x158
+[ 3391.395970][ C12] blk_attempt_plug_merge+0xc0/0x1b0
+[ 3391.401085][ C12] blk_mq_submit_bio+0x398/0x550
+[ 3391.405856][ C12] submit_bio_noacct+0x308/0x380
+[ 3391.410630][ C12] dmcrypt_write+0x1e4/0x208 [dm_crypt]
+[ 3391.416005][ C12] kthread+0x130/0x138
+[ 3391.419911][ C12] ret_from_fork+0x10/0x18
+
+Reported-by: yangerkun <yangerkun@huawei.com>
+Fixes: dc2676210c42 ("dm crypt: offload writes to thread")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-crypt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1661,6 +1661,7 @@ pop_from_list:
+ io = crypt_io_from_node(rb_first(&write_tree));
+ rb_erase(&io->rb_node, &write_tree);
+ kcryptd_io_write(io);
++ cond_resched();
+ } while (!RB_EMPTY_ROOT(&write_tree));
+ blk_finish_plug(&plug);
+ }
--- /dev/null
+From d3aa3e060c4a80827eb801fc448debc9daa7c46b Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Thu, 16 Mar 2023 14:55:06 +0800
+Subject: dm stats: check for and propagate alloc_percpu failure
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+commit d3aa3e060c4a80827eb801fc448debc9daa7c46b upstream.
+
+Check alloc_precpu()'s return value and return an error from
+dm_stats_init() if it fails. Update alloc_dev() to fail if
+dm_stats_init() does.
+
+Otherwise, a NULL pointer dereference will occur in dm_stats_cleanup()
+even if dm-stats isn't being actively used.
+
+Fixes: fd2ed4d25270 ("dm: add statistics support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-stats.c | 7 ++++++-
+ drivers/md/dm-stats.h | 2 +-
+ drivers/md/dm.c | 4 +++-
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_s
+ atomic_read(&shared->in_flight[WRITE]);
+ }
+
+-void dm_stats_init(struct dm_stats *stats)
++int dm_stats_init(struct dm_stats *stats)
+ {
+ int cpu;
+ struct dm_stats_last_position *last;
+@@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stat
+ mutex_init(&stats->mutex);
+ INIT_LIST_HEAD(&stats->list);
+ stats->last = alloc_percpu(struct dm_stats_last_position);
++ if (!stats->last)
++ return -ENOMEM;
++
+ for_each_possible_cpu(cpu) {
+ last = per_cpu_ptr(stats->last, cpu);
+ last->last_sector = (sector_t)ULLONG_MAX;
+ last->last_rw = UINT_MAX;
+ }
++
++ return 0;
+ }
+
+ void dm_stats_cleanup(struct dm_stats *stats)
+--- a/drivers/md/dm-stats.h
++++ b/drivers/md/dm-stats.h
+@@ -22,7 +22,7 @@ struct dm_stats_aux {
+ unsigned long long duration_ns;
+ };
+
+-void dm_stats_init(struct dm_stats *st);
++int dm_stats_init(struct dm_stats *st);
+ void dm_stats_cleanup(struct dm_stats *st);
+
+ struct mapped_device;
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2021,7 +2021,9 @@ static struct mapped_device *alloc_dev(i
+ bio_set_dev(&md->flush_bio, md->bdev);
+ md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+
+- dm_stats_init(&md->stats);
++ r = dm_stats_init(&md->stats);
++ if (r < 0)
++ goto bad;
+
+ /* Populate the mapping, nobody knows we exist yet */
+ spin_lock(&_minor_lock);
--- /dev/null
+From 92fbb6d1296f81f41f65effd7f5f8c0f74943d15 Mon Sep 17 00:00:00 2001
+From: Wei Chen <harperchen1110@gmail.com>
+Date: Tue, 14 Mar 2023 16:54:21 +0000
+Subject: i2c: xgene-slimpro: Fix out-of-bounds bug in xgene_slimpro_i2c_xfer()
+
+From: Wei Chen <harperchen1110@gmail.com>
+
+commit 92fbb6d1296f81f41f65effd7f5f8c0f74943d15 upstream.
+
+The data->block[0] variable comes from user and is a number between
+0-255. Without proper check, the variable may be very large to cause
+an out-of-bounds when performing memcpy in slimpro_i2c_blkwr.
+
+Fix this bug by checking the value of writelen.
+
+Fixes: f6505fbabc42 ("i2c: add SLIMpro I2C device driver on APM X-Gene platform")
+Signed-off-by: Wei Chen <harperchen1110@gmail.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-xgene-slimpro.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
++++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
+@@ -321,6 +321,9 @@ static int slimpro_i2c_blkwr(struct slim
+ u32 msg[3];
+ int rc;
+
++ if (writelen > I2C_SMBUS_BLOCK_MAX)
++ return -EINVAL;
++
+ memcpy(ctx->dma_buffer, data, writelen);
+ paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
+ DMA_TO_DEVICE);
--- /dev/null
+From a53ce18cacb477dd0513c607f187d16f0fa96f71 Mon Sep 17 00:00:00 2001
+From: Vincent Guittot <vincent.guittot@linaro.org>
+Date: Fri, 17 Mar 2023 17:08:10 +0100
+Subject: sched/fair: Sanitize vruntime of entity being migrated
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+commit a53ce18cacb477dd0513c607f187d16f0fa96f71 upstream.
+
+Commit 829c1651e9c4 ("sched/fair: sanitize vruntime of entity being placed")
+fixes an overflowing bug, but ignore a case that se->exec_start is reset
+after a migration.
+
+For fixing this case, we delay the reset of se->exec_start after
+placing the entity which se->exec_start to detect long sleeping task.
+
+In order to take into account a possible divergence between the clock_task
+of 2 rqs, we increase the threshold to around 104 days.
+
+Fixes: 829c1651e9c4 ("sched/fair: sanitize vruntime of entity being placed")
+Originally-by: Zhang Qiao <zhangqiao22@huawei.com>
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Zhang Qiao <zhangqiao22@huawei.com>
+Link: https://lore.kernel.org/r/20230317160810.107988-1-vincent.guittot@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 3 ++
+ kernel/sched/fair.c | 53 ++++++++++++++++++++++++++++++++++++++++++----------
+ 2 files changed, 46 insertions(+), 10 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -741,6 +741,9 @@ static inline void dequeue_task(struct r
+
+ void activate_task(struct rq *rq, struct task_struct *p, int flags)
+ {
++ if (task_on_rq_migrating(p))
++ flags |= ENQUEUE_MIGRATED;
++
+ if (task_contributes_to_load(p))
+ rq->nr_uninterruptible--;
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3854,11 +3854,33 @@ static void check_spread(struct cfs_rq *
+ #endif
+ }
+
++static inline bool entity_is_long_sleeper(struct sched_entity *se)
++{
++ struct cfs_rq *cfs_rq;
++ u64 sleep_time;
++
++ if (se->exec_start == 0)
++ return false;
++
++ cfs_rq = cfs_rq_of(se);
++
++ sleep_time = rq_clock_task(rq_of(cfs_rq));
++
++ /* Happen while migrating because of clock task divergence */
++ if (sleep_time <= se->exec_start)
++ return false;
++
++ sleep_time -= se->exec_start;
++ if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
++ return true;
++
++ return false;
++}
++
+ static void
+ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ {
+ u64 vruntime = cfs_rq->min_vruntime;
+- u64 sleep_time;
+
+ /*
+ * The 'current' period is already promised to the current tasks,
+@@ -3885,13 +3907,24 @@ place_entity(struct cfs_rq *cfs_rq, stru
+
+ /*
+ * Pull vruntime of the entity being placed to the base level of
+- * cfs_rq, to prevent boosting it if placed backwards. If the entity
+- * slept for a long time, don't even try to compare its vruntime with
+- * the base as it may be too far off and the comparison may get
+- * inversed due to s64 overflow.
++ * cfs_rq, to prevent boosting it if placed backwards.
++ * However, min_vruntime can advance much faster than real time, with
++ * the extreme being when an entity with the minimal weight always runs
++ * on the cfs_rq. If the waking entity slept for a long time, its
++ * vruntime difference from min_vruntime may overflow s64 and their
++ * comparison may get inversed, so ignore the entity's original
++ * vruntime in that case.
++ * The maximal vruntime speedup is given by the ratio of normal to
++ * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
++ * When placing a migrated waking entity, its exec_start has been set
++ * from a different rq. In order to take into account a possible
++ * divergence between new and prev rq's clocks task because of irq and
++ * stolen time, we take an additional margin.
++ * So, cutting off on the sleep time of
++ * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
++ * should be safe.
+ */
+- sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
+- if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
++ if (entity_is_long_sleeper(se))
+ se->vruntime = vruntime;
+ else
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
+@@ -3989,6 +4022,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
+
+ if (flags & ENQUEUE_WAKEUP)
+ place_entity(cfs_rq, se, 0);
++ /* Entity has migrated, no longer consider this task hot */
++ if (flags & ENQUEUE_MIGRATED)
++ se->exec_start = 0;
+
+ check_schedstat_required();
+ update_stats_enqueue(cfs_rq, se, flags);
+@@ -6555,9 +6591,6 @@ static void migrate_task_rq_fair(struct
+ /* Tell new CPU we are migrated */
+ p->se.avg.last_update_time = 0;
+
+- /* We have migrated, no longer consider this task hot */
+- p->se.exec_start = 0;
+-
+ update_scan_period(p, new_cpu);
+ }
+
--- /dev/null
+From 829c1651e9c4a6f78398d3e67651cef9bb6b42cc Mon Sep 17 00:00:00 2001
+From: Zhang Qiao <zhangqiao22@huawei.com>
+Date: Mon, 30 Jan 2023 13:22:16 +0100
+Subject: sched/fair: sanitize vruntime of entity being placed
+
+From: Zhang Qiao <zhangqiao22@huawei.com>
+
+commit 829c1651e9c4a6f78398d3e67651cef9bb6b42cc upstream.
+
+When a scheduling entity is placed onto cfs_rq, its vruntime is pulled
+to the base level (around cfs_rq->min_vruntime), so that the entity
+doesn't gain extra boost when placed backwards.
+
+However, if the entity being placed wasn't executed for a long time, its
+vruntime may get too far behind (e.g. while cfs_rq was executing a
+low-weight hog), which can inverse the vruntime comparison due to s64
+overflow. This results in the entity being placed with its original
+vruntime way forwards, so that it will effectively never get to the cpu.
+
+To prevent that, ignore the vruntime of the entity being placed if it
+didn't execute for much longer than the characteristic sheduler time
+scale.
+
+[rkagan: formatted, adjusted commit log, comments, cutoff value]
+Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com>
+Co-developed-by: Roman Kagan <rkagan@amazon.de>
+Signed-off-by: Roman Kagan <rkagan@amazon.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20230130122216.3555094-1-rkagan@amazon.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3858,6 +3858,7 @@ static void
+ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ {
+ u64 vruntime = cfs_rq->min_vruntime;
++ u64 sleep_time;
+
+ /*
+ * The 'current' period is already promised to the current tasks,
+@@ -3882,8 +3883,18 @@ place_entity(struct cfs_rq *cfs_rq, stru
+ vruntime -= thresh;
+ }
+
+- /* ensure we never gain time by being placed backwards. */
+- se->vruntime = max_vruntime(se->vruntime, vruntime);
++ /*
++ * Pull vruntime of the entity being placed to the base level of
++ * cfs_rq, to prevent boosting it if placed backwards. If the entity
++ * slept for a long time, don't even try to compare its vruntime with
++ * the base as it may be too far off and the comparison may get
++ * inversed due to s64 overflow.
++ */
++ sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
++ if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
++ se->vruntime = vruntime;
++ else
++ se->vruntime = max_vruntime(se->vruntime, vruntime);
+ }
+
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
usb-chipdea-core-fix-return-einval-if-request-role-is-the-same-with-current-role.patch
usb-chipidea-core-fix-possible-concurrent-when-switch-role.patch
nilfs2-fix-kernel-infoleak-in-nilfs_ioctl_wrap_copy.patch
+i2c-xgene-slimpro-fix-out-of-bounds-bug-in-xgene_slimpro_i2c_xfer.patch
+dm-stats-check-for-and-propagate-alloc_percpu-failure.patch
+dm-crypt-add-cond_resched-to-dmcrypt_write.patch
+sched-fair-sanitize-vruntime-of-entity-being-placed.patch
+sched-fair-sanitize-vruntime-of-entity-being-migrated.patch