--- /dev/null
+From e20af389222212d9a523e624e06b0d40fe6bd722 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Oct 2024 10:07:48 +0100
+Subject: block: fix queue limits checks in blk_rq_map_user_bvec for real
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit be0e822bb3f5259c7f9424ba97e8175211288813 ]
+
+blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits,
+and the last fix to it enabled valid NVMe I/O to pass, but also allowed
+invalid one for drivers that set a max_segment_size or seg_boundary
+limit.
+
+Fix it once for all by using the bio_split_rw_at helper from the I/O
+path that indicates if and where a bio would be have to be split to
+adhere to the queue limits, and it returns a positive value, turn that
+into -EREMOTEIO to retry using the copy path.
+
+Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-map.c | 56 +++++++++++++++----------------------------------
+ 1 file changed, 17 insertions(+), 39 deletions(-)
+
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 6ef2ec1f7d78b..b5fd1d8574615 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
+ /* Prepare bio for passthrough IO given ITER_BVEC iter */
+ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
+ {
+- struct request_queue *q = rq->q;
+- size_t nr_iter = iov_iter_count(iter);
+- size_t nr_segs = iter->nr_segs;
+- struct bio_vec *bvecs, *bvprvp = NULL;
+- const struct queue_limits *lim = &q->limits;
+- unsigned int nsegs = 0, bytes = 0;
++ const struct queue_limits *lim = &rq->q->limits;
++ unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
++ unsigned int nsegs;
+ struct bio *bio;
+- size_t i;
++ int ret;
+
+- if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
+- return -EINVAL;
+- if (nr_segs > queue_max_segments(q))
++ if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
+ return -EINVAL;
+
+- /* no iovecs to alloc, as we already have a BVEC iterator */
++ /* reuse the bvecs from the iterator instead of allocating new ones */
+ bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
+- if (bio == NULL)
++ if (!bio)
+ return -ENOMEM;
+-
+ bio_iov_bvec_set(bio, (struct iov_iter *)iter);
+- blk_rq_bio_prep(rq, bio, nr_segs);
+-
+- /* loop to perform a bunch of sanity checks */
+- bvecs = (struct bio_vec *)iter->bvec;
+- for (i = 0; i < nr_segs; i++) {
+- struct bio_vec *bv = &bvecs[i];
+-
+- /*
+- * If the queue doesn't support SG gaps and adding this
+- * offset would create a gap, fallback to copy.
+- */
+- if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+- blk_mq_map_bio_put(bio);
+- return -EREMOTEIO;
+- }
+- /* check full condition */
+- if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
+- goto put_bio;
+- if (bytes + bv->bv_len > nr_iter)
+- break;
+
+- nsegs++;
+- bytes += bv->bv_len;
+- bvprvp = bv;
++ /* check that the data layout matches the hardware restrictions */
++ ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
++ if (ret) {
++ /* if we would have to split the bio, copy instead */
++ if (ret > 0)
++ ret = -EREMOTEIO;
++ blk_mq_map_bio_put(bio);
++ return ret;
+ }
++
++ blk_rq_bio_prep(rq, bio, nsegs);
+ return 0;
+-put_bio:
+- blk_mq_map_bio_put(bio);
+- return -EINVAL;
+ }
+
+ /**
+--
+2.43.0
+
--- /dev/null
+From 939f3b8ee2a48fafbc26541789861553268153fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Aug 2024 19:37:54 +0200
+Subject: block: rework bio splitting
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit b35243a447b9fe6457fa8e1352152b818436ba5a ]
+
+The current setup with bio_may_exceed_limit and __bio_split_to_limits
+is a bit of a mess.
+
+Change it so that __bio_split_to_limits does all the work and is just
+a variant of bio_split_to_limits that returns nr_segs. This is done
+by inlining it and instead have the various bio_split_* helpers directly
+submit the potentially split bios.
+
+To support btrfs, the rw version has a lower level helper split out
+that just returns the offset to split. This turns out to nicely clean
+up the btrfs flow as well.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: David Sterba <dsterba@suse.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Tested-by: Hans Holmberg <hans.holmberg@wdc.com>
+Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
+Link: https://lore.kernel.org/r/20240826173820.1690925-2-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: be0e822bb3f5 ("block: fix queue limits checks in blk_rq_map_user_bvec for real")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-merge.c | 146 +++++++++++++++++---------------------------
+ block/blk-mq.c | 11 ++--
+ block/blk.h | 63 +++++++++++++------
+ fs/btrfs/bio.c | 30 +++++----
+ include/linux/bio.h | 4 +-
+ 5 files changed, 125 insertions(+), 129 deletions(-)
+
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index de5281bcadc53..c7222c4685e06 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -105,9 +105,33 @@ static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
+ return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
+ }
+
+-static struct bio *bio_split_discard(struct bio *bio,
+- const struct queue_limits *lim,
+- unsigned *nsegs, struct bio_set *bs)
++static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
++{
++ if (unlikely(split_sectors < 0)) {
++ bio->bi_status = errno_to_blk_status(split_sectors);
++ bio_endio(bio);
++ return NULL;
++ }
++
++ if (split_sectors) {
++ struct bio *split;
++
++ split = bio_split(bio, split_sectors, GFP_NOIO,
++ &bio->bi_bdev->bd_disk->bio_split);
++ split->bi_opf |= REQ_NOMERGE;
++ blkcg_bio_issue_init(split);
++ bio_chain(split, bio);
++ trace_block_split(split, bio->bi_iter.bi_sector);
++ WARN_ON_ONCE(bio_zone_write_plugging(bio));
++ submit_bio_noacct(bio);
++ return split;
++ }
++
++ return bio;
++}
++
++struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
++ unsigned *nsegs)
+ {
+ unsigned int max_discard_sectors, granularity;
+ sector_t tmp;
+@@ -121,10 +145,10 @@ static struct bio *bio_split_discard(struct bio *bio,
+ min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
+ max_discard_sectors -= max_discard_sectors % granularity;
+ if (unlikely(!max_discard_sectors))
+- return NULL;
++ return bio;
+
+ if (bio_sectors(bio) <= max_discard_sectors)
+- return NULL;
++ return bio;
+
+ split_sectors = max_discard_sectors;
+
+@@ -139,19 +163,18 @@ static struct bio *bio_split_discard(struct bio *bio,
+ if (split_sectors > tmp)
+ split_sectors -= tmp;
+
+- return bio_split(bio, split_sectors, GFP_NOIO, bs);
++ return bio_submit_split(bio, split_sectors);
+ }
+
+-static struct bio *bio_split_write_zeroes(struct bio *bio,
+- const struct queue_limits *lim,
+- unsigned *nsegs, struct bio_set *bs)
++struct bio *bio_split_write_zeroes(struct bio *bio,
++ const struct queue_limits *lim, unsigned *nsegs)
+ {
+ *nsegs = 0;
+ if (!lim->max_write_zeroes_sectors)
+- return NULL;
++ return bio;
+ if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
+- return NULL;
+- return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
++ return bio;
++ return bio_submit_split(bio, lim->max_write_zeroes_sectors);
+ }
+
+ static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
+@@ -274,27 +297,19 @@ static bool bvec_split_segs(const struct queue_limits *lim,
+ }
+
+ /**
+- * bio_split_rw - split a bio in two bios
++ * bio_split_rw_at - check if and where to split a read/write bio
+ * @bio: [in] bio to be split
+ * @lim: [in] queue limits to split based on
+ * @segs: [out] number of segments in the bio with the first half of the sectors
+- * @bs: [in] bio set to allocate the clone from
+ * @max_bytes: [in] maximum number of bytes per bio
+ *
+- * Clone @bio, update the bi_iter of the clone to represent the first sectors
+- * of @bio and update @bio->bi_iter to represent the remaining sectors. The
+- * following is guaranteed for the cloned bio:
+- * - That it has at most @max_bytes worth of data
+- * - That it has at most queue_max_segments(@q) segments.
+- *
+- * Except for discard requests the cloned bio will point at the bi_io_vec of
+- * the original bio. It is the responsibility of the caller to ensure that the
+- * original bio is not freed before the cloned bio. The caller is also
+- * responsible for ensuring that @bs is only destroyed after processing of the
+- * split bio has finished.
++ * Find out if @bio needs to be split to fit the queue limits in @lim and a
++ * maximum size of @max_bytes. Returns a negative error number if @bio can't be
++ * split, 0 if the bio doesn't have to be split, or a positive sector offset if
++ * @bio needs to be split.
+ */
+-struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+- unsigned *segs, struct bio_set *bs, unsigned max_bytes)
++int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
++ unsigned *segs, unsigned max_bytes)
+ {
+ struct bio_vec bv, bvprv, *bvprvp = NULL;
+ struct bvec_iter iter;
+@@ -324,22 +339,17 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+ }
+
+ *segs = nsegs;
+- return NULL;
++ return 0;
+ split:
+- if (bio->bi_opf & REQ_ATOMIC) {
+- bio->bi_status = BLK_STS_INVAL;
+- bio_endio(bio);
+- return ERR_PTR(-EINVAL);
+- }
++ if (bio->bi_opf & REQ_ATOMIC)
++ return -EINVAL;
++
+ /*
+ * We can't sanely support splitting for a REQ_NOWAIT bio. End it
+ * with EAGAIN if splitting is required and return an error pointer.
+ */
+- if (bio->bi_opf & REQ_NOWAIT) {
+- bio->bi_status = BLK_STS_AGAIN;
+- bio_endio(bio);
+- return ERR_PTR(-EAGAIN);
+- }
++ if (bio->bi_opf & REQ_NOWAIT)
++ return -EAGAIN;
+
+ *segs = nsegs;
+
+@@ -356,58 +366,16 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+ * big IO can be trival, disable iopoll when split needed.
+ */
+ bio_clear_polled(bio);
+- return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
++ return bytes >> SECTOR_SHIFT;
+ }
+-EXPORT_SYMBOL_GPL(bio_split_rw);
++EXPORT_SYMBOL_GPL(bio_split_rw_at);
+
+-/**
+- * __bio_split_to_limits - split a bio to fit the queue limits
+- * @bio: bio to be split
+- * @lim: queue limits to split based on
+- * @nr_segs: returns the number of segments in the returned bio
+- *
+- * Check if @bio needs splitting based on the queue limits, and if so split off
+- * a bio fitting the limits from the beginning of @bio and return it. @bio is
+- * shortened to the remainder and re-submitted.
+- *
+- * The split bio is allocated from @q->bio_split, which is provided by the
+- * block layer.
+- */
+-struct bio *__bio_split_to_limits(struct bio *bio,
+- const struct queue_limits *lim,
+- unsigned int *nr_segs)
++struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
++ unsigned *nr_segs)
+ {
+- struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
+- struct bio *split;
+-
+- switch (bio_op(bio)) {
+- case REQ_OP_DISCARD:
+- case REQ_OP_SECURE_ERASE:
+- split = bio_split_discard(bio, lim, nr_segs, bs);
+- break;
+- case REQ_OP_WRITE_ZEROES:
+- split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
+- break;
+- default:
+- split = bio_split_rw(bio, lim, nr_segs, bs,
+- get_max_io_size(bio, lim) << SECTOR_SHIFT);
+- if (IS_ERR(split))
+- return NULL;
+- break;
+- }
+-
+- if (split) {
+- /* there isn't chance to merge the split bio */
+- split->bi_opf |= REQ_NOMERGE;
+-
+- blkcg_bio_issue_init(split);
+- bio_chain(split, bio);
+- trace_block_split(split, bio->bi_iter.bi_sector);
+- WARN_ON_ONCE(bio_zone_write_plugging(bio));
+- submit_bio_noacct(bio);
+- return split;
+- }
+- return bio;
++ return bio_submit_split(bio,
++ bio_split_rw_at(bio, lim, nr_segs,
++ get_max_io_size(bio, lim) << SECTOR_SHIFT));
+ }
+
+ /**
+@@ -426,9 +394,7 @@ struct bio *bio_split_to_limits(struct bio *bio)
+ const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
+ unsigned int nr_segs;
+
+- if (bio_may_exceed_limits(bio, lim))
+- return __bio_split_to_limits(bio, lim, &nr_segs);
+- return bio;
++ return __bio_split_to_limits(bio, lim, &nr_segs);
+ }
+ EXPORT_SYMBOL(bio_split_to_limits);
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index b56a1c0dd1387..a2401e4d8c974 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2939,7 +2939,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ struct blk_plug *plug = current->plug;
+ const int is_sync = op_is_sync(bio->bi_opf);
+ struct blk_mq_hw_ctx *hctx;
+- unsigned int nr_segs = 1;
++ unsigned int nr_segs;
+ struct request *rq;
+ blk_status_t ret;
+
+@@ -2981,11 +2981,10 @@ void blk_mq_submit_bio(struct bio *bio)
+ goto queue_exit;
+ }
+
+- if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+- if (!bio)
+- goto queue_exit;
+- }
++ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
++ if (!bio)
++ goto queue_exit;
++
+ if (!bio_integrity_prep(bio))
+ goto queue_exit;
+
+diff --git a/block/blk.h b/block/blk.h
+index e180863f918b1..0d8cd64c12606 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -331,33 +331,58 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
+ ssize_t part_timeout_store(struct device *, struct device_attribute *,
+ const char *, size_t);
+
+-static inline bool bio_may_exceed_limits(struct bio *bio,
+- const struct queue_limits *lim)
++struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
++ unsigned *nsegs);
++struct bio *bio_split_write_zeroes(struct bio *bio,
++ const struct queue_limits *lim, unsigned *nsegs);
++struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
++ unsigned *nr_segs);
++
++/*
++ * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
++ *
++ * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
++ * always valid if a bio has data. The check might lead to occasional false
++ * positives when bios are cloned, but compared to the performance impact of
++ * cloned bios themselves the loop below doesn't matter anyway.
++ */
++static inline bool bio_may_need_split(struct bio *bio,
++ const struct queue_limits *lim)
++{
++ return lim->chunk_sectors || bio->bi_vcnt != 1 ||
++ bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
++}
++
++/**
++ * __bio_split_to_limits - split a bio to fit the queue limits
++ * @bio: bio to be split
++ * @lim: queue limits to split based on
++ * @nr_segs: returns the number of segments in the returned bio
++ *
++ * Check if @bio needs splitting based on the queue limits, and if so split off
++ * a bio fitting the limits from the beginning of @bio and return it. @bio is
++ * shortened to the remainder and re-submitted.
++ *
++ * The split bio is allocated from @q->bio_split, which is provided by the
++ * block layer.
++ */
++static inline struct bio *__bio_split_to_limits(struct bio *bio,
++ const struct queue_limits *lim, unsigned int *nr_segs)
+ {
+ switch (bio_op(bio)) {
++ default:
++ if (bio_may_need_split(bio, lim))
++ return bio_split_rw(bio, lim, nr_segs);
++ *nr_segs = 1;
++ return bio;
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
++ return bio_split_discard(bio, lim, nr_segs);
+ case REQ_OP_WRITE_ZEROES:
+- return true; /* non-trivial splitting decisions */
+- default:
+- break;
++ return bio_split_write_zeroes(bio, lim, nr_segs);
+ }
+-
+- /*
+- * All drivers must accept single-segments bios that are <= PAGE_SIZE.
+- * This is a quick and dirty check that relies on the fact that
+- * bi_io_vec[0] is always valid if a bio has data. The check might
+- * lead to occasional false negatives when bios are cloned, but compared
+- * to the performance impact of cloned bios themselves the loop below
+- * doesn't matter anyway.
+- */
+- return lim->chunk_sectors || bio->bi_vcnt != 1 ||
+- bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
+ }
+
+-struct bio *__bio_split_to_limits(struct bio *bio,
+- const struct queue_limits *lim,
+- unsigned int *nr_segs);
+ int ll_back_merge_fn(struct request *req, struct bio *bio,
+ unsigned int nr_segs);
+ bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 31e437d94869d..a98fa0ccae601 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -74,20 +74,13 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+
+ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
+ struct btrfs_bio *orig_bbio,
+- u64 map_length, bool use_append)
++ u64 map_length)
+ {
+ struct btrfs_bio *bbio;
+ struct bio *bio;
+
+- if (use_append) {
+- unsigned int nr_segs;
+-
+- bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs,
+- &btrfs_clone_bioset, map_length);
+- } else {
+- bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT,
+- GFP_NOFS, &btrfs_clone_bioset);
+- }
++ bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
++ &btrfs_clone_bioset);
+ bbio = btrfs_bio(bio);
+ btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
+ bbio->inode = orig_bbio->inode;
+@@ -648,6 +641,19 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
+ return true;
+ }
+
++static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
++{
++ unsigned int nr_segs;
++ int sector_offset;
++
++ map_length = min(map_length, bbio->fs_info->max_zone_append_size);
++ sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
++ &nr_segs, map_length);
++ if (sector_offset)
++ return sector_offset << SECTOR_SHIFT;
++ return map_length;
++}
++
+ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ {
+ struct btrfs_inode *inode = bbio->inode;
+@@ -674,10 +680,10 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+
+ map_length = min(map_length, length);
+ if (use_append)
+- map_length = min(map_length, fs_info->max_zone_append_size);
++ map_length = btrfs_append_map_length(bbio, map_length);
+
+ if (map_length < length) {
+- bbio = btrfs_split_bio(fs_info, bbio, map_length, use_append);
++ bbio = btrfs_split_bio(fs_info, bbio, map_length);
+ bio = &bbio->bio;
+ }
+
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index a46e2047bea4d..faceadb040f9a 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -324,8 +324,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+ void bio_trim(struct bio *bio, sector_t offset, sector_t size);
+ extern struct bio *bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs);
+-struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+- unsigned *segs, struct bio_set *bs, unsigned max_bytes);
++int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
++ unsigned *segs, unsigned max_bytes);
+
+ /**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+--
+2.43.0
+
--- /dev/null
+From 2dae2a793d00c63171170d7449b36af0c21aac37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2024 16:44:10 +0530
+Subject: drm/xe/guc/ct: Flush g2h worker in case of g2h response timeout
+
+From: Badal Nilawar <badal.nilawar@intel.com>
+
+[ Upstream commit 22ef43c78647dd37b0dafe2182b8650b99dbbe59 ]
+
+In case if g2h worker doesn't get opportunity to within specified
+timeout delay then flush the g2h worker explicitly.
+
+v2:
+ - Describe change in the comment and add TODO (Matt B/John H)
+ - Add xe_gt_warn on fence done after G2H flush (John H)
+v3:
+ - Updated the comment with root cause
+ - Clean up xe_gt_warn message (John H)
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1620
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2902
+Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: John Harrison <John.C.Harrison@Intel.com>
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Acked-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241017111410.2553784-2-badal.nilawar@intel.com
+(cherry picked from commit e5152723380404acb8175e0777b1cea57f319a01)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Stable-dep-of: 55e8a3f37e54 ("drm/xe: Move LNL scheduling WA to xe_device.h")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_guc_ct.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
+index cd9918e3896c0..ab24053f8766f 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.c
++++ b/drivers/gpu/drm/xe/xe_guc_ct.c
+@@ -888,6 +888,24 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+
+ ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+
++ /*
++ * Occasionally it is seen that the G2H worker starts running after a delay of more than
++ * a second even after being queued and activated by the Linux workqueue subsystem. This
++ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
++ * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
++ * and this is beyond xe kmd.
++ *
++ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
++ */
++ if (!ret) {
++ flush_work(&ct->g2h_worker);
++ if (g2h_fence.done) {
++ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
++ g2h_fence.seqno, action[0]);
++ ret = 1;
++ }
++ }
++
+ /*
+ * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
+ * the stack, since we have no clue if it will fire after the timeout before we can erase
+--
+2.43.0
+
--- /dev/null
+From 65b78f80958e08f03219ad99073beef6c81db63b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Oct 2024 13:01:17 +0100
+Subject: drm/xe/guc/tlb: Flush g2h worker in case of tlb timeout
+
+From: Nirmoy Das <nirmoy.das@intel.com>
+
+[ Upstream commit 1491efb39acee3848b61fcb3e5cc4be8de304352 ]
+
+Flush the g2h worker explicitly if TLB timeout happens which is
+observed on LNL and that points to the recent scheduling issue with
+E-cores on LNL.
+
+This is similar to the recent fix:
+commit e51527233804 ("drm/xe/guc/ct: Flush g2h worker in case of g2h
+response timeout") and should be removed once there is E core
+scheduling fix.
+
+v2: Add platform check(Himal)
+v3: Remove gfx platform check as the issue related to cpu
+ platform(John)
+ Use the common WA macro(John) and print when the flush
+ resolves timeout(Matt B)
+v4: Remove the resolves log and do the flush before taking
+ pending_lock(Matt A)
+
+Cc: Badal Nilawar <badal.nilawar@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: John Harrison <John.C.Harrison@Intel.com>
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Cc: stable@vger.kernel.org # v6.11+
+Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2687
+Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241029120117.449694-3-nirmoy.das@intel.com
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+(cherry picked from commit e1f6fa55664a0eeb0a641f497e1adfcf6672e995)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index 82795133e129e..836c15253ce7e 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -71,6 +71,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_gt_tlb_invalidation_fence *fence, *next;
+
++ LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker);
++
+ spin_lock_irq(>->tlb_invalidation.pending_lock);
+ list_for_each_entry_safe(fence, next,
+ >->tlb_invalidation.pending_fences, link) {
+--
+2.43.0
+
--- /dev/null
+From 9c0cbcb408ba375c88fc5bb36cd3c45408d572a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Oct 2024 13:01:15 +0100
+Subject: drm/xe: Move LNL scheduling WA to xe_device.h
+
+From: Nirmoy Das <nirmoy.das@intel.com>
+
+[ Upstream commit 55e8a3f37e54eb1c7b914d6d5565a37282ec1978 ]
+
+Move LNL scheduling WA to xe_device.h so this can be used in other
+places without needing keep the same comment about removal of this WA
+in the future. The WA, which flushes work or workqueues, is now wrapped
+in macros and can be reused wherever needed.
+
+Cc: Badal Nilawar <badal.nilawar@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+cc: stable@vger.kernel.org # v6.11+
+Suggested-by: John Harrison <John.C.Harrison@Intel.com>
+Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241029120117.449694-1-nirmoy.das@intel.com
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+(cherry picked from commit cbe006a6492c01a0058912ae15d473f4c149896c)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_device.h | 14 ++++++++++++++
+ drivers/gpu/drm/xe/xe_guc_ct.c | 11 +----------
+ 2 files changed, 15 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
+index 533ccfb2567a2..41d6ca3cce96a 100644
+--- a/drivers/gpu/drm/xe/xe_device.h
++++ b/drivers/gpu/drm/xe/xe_device.h
+@@ -174,4 +174,18 @@ void xe_device_declare_wedged(struct xe_device *xe);
+ struct xe_file *xe_file_get(struct xe_file *xef);
+ void xe_file_put(struct xe_file *xef);
+
++/*
++ * Occasionally it is seen that the G2H worker starts running after a delay of more than
++ * a second even after being queued and activated by the Linux workqueue subsystem. This
++ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
++ * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
++ * and this is beyond xe kmd.
++ *
++ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
++ */
++#define LNL_FLUSH_WORKQUEUE(wq__) \
++ flush_workqueue(wq__)
++#define LNL_FLUSH_WORK(wrk__) \
++ flush_work(wrk__)
++
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
+index ab24053f8766f..12e1fe6a8da28 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.c
++++ b/drivers/gpu/drm/xe/xe_guc_ct.c
+@@ -888,17 +888,8 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+
+ ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+
+- /*
+- * Occasionally it is seen that the G2H worker starts running after a delay of more than
+- * a second even after being queued and activated by the Linux workqueue subsystem. This
+- * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+- * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
+- * and this is beyond xe kmd.
+- *
+- * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+- */
+ if (!ret) {
+- flush_work(&ct->g2h_worker);
++ LNL_FLUSH_WORK(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+--
+2.43.0
+
--- /dev/null
+From eb16327beafbed6cb88b8d064eb3df24c4f66d23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Oct 2024 13:01:16 +0100
+Subject: drm/xe/ufence: Flush xe ordered_wq in case of ufence timeout
+
+From: Nirmoy Das <nirmoy.das@intel.com>
+
+[ Upstream commit 7d1e2580ed166f36949b468373b468d188880cd3 ]
+
+Flush xe ordered_wq in case of ufence timeout which is observed
+on LNL and that points to recent scheduling issue with E-cores.
+
+This is similar to the recent fix:
+commit e51527233804 ("drm/xe/guc/ct: Flush g2h worker in case of g2h
+response timeout") and should be removed once there is a E-core
+scheduling fix for LNL.
+
+v2: Add platform check(Himal)
+ s/__flush_workqueue/flush_workqueue(Jani)
+v3: Remove gfx platform check as the issue related to cpu
+ platform(John)
+v4: Use the Common macro(John) and print when the flush resolves
+ timeout(Matt B)
+
+Cc: Badal Nilawar <badal.nilawar@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: John Harrison <John.C.Harrison@Intel.com>
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Cc: stable@vger.kernel.org # v6.11+
+Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2754
+Suggested-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241029120117.449694-2-nirmoy.das@intel.com
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+(cherry picked from commit 38c4c8722bd74452280951edc44c23de47612001)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_wait_user_fence.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
+index 92f65b9c52801..2bff43c5962e0 100644
+--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
++++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
+@@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
+ }
+
+ if (!timeout) {
++ LNL_FLUSH_WORKQUEUE(xe->ordered_wq);
++ err = do_compare(addr, args->value, args->mask,
++ args->op);
++ if (err <= 0) {
++ drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n");
++ break;
++ }
+ err = -ETIME;
+ break;
+ }
+--
+2.43.0
+
--- /dev/null
+From cbec87c78133a35a8a0d3fb4edfe3b873691b159 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jul 2024 21:26:54 +0530
+Subject: firmware: qcom: scm: Refactor code to support multiple dload mode
+
+From: Mukesh Ojha <quic_mojha@quicinc.com>
+
+[ Upstream commit c802b0a2ed0f67fcec8cc0cac685c8fd0dd0aa6f ]
+
+Currently on Qualcomm SoC, download_mode is enabled if
+CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is selected or
+passed a boolean value from command line.
+
+Refactor the code such that it supports multiple download
+modes and drop CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT config
+instead, give interface to set the download mode from
+module parameter while being backword compatible at the
+same time.
+
+Signed-off-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Link: https://lore.kernel.org/r/20240715155655.1811178-1-quic_mojha@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Stable-dep-of: d67907154808 ("firmware: qcom: scm: suppress download mode error")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/qcom/Kconfig | 11 ------
+ drivers/firmware/qcom/qcom_scm.c | 60 +++++++++++++++++++++++++++-----
+ 2 files changed, 52 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
+index 73a1a41bf92dd..b477d54b495a6 100644
+--- a/drivers/firmware/qcom/Kconfig
++++ b/drivers/firmware/qcom/Kconfig
+@@ -41,17 +41,6 @@ config QCOM_TZMEM_MODE_SHMBRIDGE
+
+ endchoice
+
+-config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+- bool "Qualcomm download mode enabled by default"
+- depends on QCOM_SCM
+- help
+- A device with "download mode" enabled will upon an unexpected
+- warm-restart enter a special debug mode that allows the user to
+- "download" memory content over USB for offline postmortem analysis.
+- The feature can be enabled/disabled on the kernel command line.
+-
+- Say Y here to enable "download mode" by default.
+-
+ config QCOM_QSEECOM
+ bool "Qualcomm QSEECOM interface driver"
+ depends on QCOM_SCM=y
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 6436bd09587a5..26b0eb7d147db 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -18,6 +18,7 @@
+ #include <linux/init.h>
+ #include <linux/interconnect.h>
+ #include <linux/interrupt.h>
++#include <linux/kstrtox.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+@@ -32,8 +33,7 @@
+ #include "qcom_scm.h"
+ #include "qcom_tzmem.h"
+
+-static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+-module_param(download_mode, bool, 0);
++static u32 download_mode;
+
+ struct qcom_scm {
+ struct device *dev;
+@@ -135,6 +135,11 @@ static const char * const qcom_scm_convention_names[] = {
+ [SMC_CONVENTION_LEGACY] = "smc legacy",
+ };
+
++static const char * const download_mode_name[] = {
++ [QCOM_DLOAD_NODUMP] = "off",
++ [QCOM_DLOAD_FULLDUMP] = "full",
++};
++
+ static struct qcom_scm *__scm;
+
+ static int qcom_scm_clk_enable(void)
+@@ -527,17 +532,16 @@ static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val
+ return qcom_scm_io_writel(addr, new);
+ }
+
+-static void qcom_scm_set_download_mode(bool enable)
++static void qcom_scm_set_download_mode(u32 dload_mode)
+ {
+- u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP;
+ int ret = 0;
+
+ if (__scm->dload_mode_addr) {
+ ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
+- FIELD_PREP(QCOM_DLOAD_MASK, val));
++ FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
+ } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
+- ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
++ ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+@@ -1897,6 +1901,46 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
++static int get_download_mode(char *buffer, const struct kernel_param *kp)
++{
++ if (download_mode >= ARRAY_SIZE(download_mode_name))
++ return sysfs_emit(buffer, "unknown mode\n");
++
++ return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
++}
++
++static int set_download_mode(const char *val, const struct kernel_param *kp)
++{
++ bool tmp;
++ int ret;
++
++ ret = sysfs_match_string(download_mode_name, val);
++ if (ret < 0) {
++ ret = kstrtobool(val, &tmp);
++ if (ret < 0) {
++ pr_err("qcom_scm: err: %d\n", ret);
++ return ret;
++ }
++
++ ret = tmp ? 1 : 0;
++ }
++
++ download_mode = ret;
++ if (__scm)
++ qcom_scm_set_download_mode(download_mode);
++
++ return 0;
++}
++
++static const struct kernel_param_ops download_mode_param_ops = {
++ .get = get_download_mode,
++ .set = set_download_mode,
++};
++
++module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
++MODULE_PARM_DESC(download_mode,
++ "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode");
++
+ static int qcom_scm_probe(struct platform_device *pdev)
+ {
+ struct qcom_tzmem_pool_config pool_config;
+@@ -1961,7 +2005,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ __get_convention();
+
+ /*
+- * If requested enable "download mode", from this point on warmboot
++ * If "download mode" is requested, from this point on warmboot
+ * will cause the boot stages to enter download mode, unless
+ * disabled below by a clean shutdown/reboot.
+ */
+@@ -2012,7 +2056,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ static void qcom_scm_shutdown(struct platform_device *pdev)
+ {
+ /* Clean shutdown, disable download mode to allow normal restart */
+- qcom_scm_set_download_mode(false);
++ qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
+ }
+
+ static const struct of_device_id qcom_scm_dt_match[] = {
+--
+2.43.0
+
--- /dev/null
+From 1b392af736b5605c2454f8ef0cb08c7509f31fdc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Oct 2024 12:01:21 +0200
+Subject: firmware: qcom: scm: suppress download mode error
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit d67907154808745b0fae5874edc7b0f78d33991c ]
+
+Stop spamming the logs with errors about missing mechanism for setting
+the so called download (or dump) mode for users that have not requested
+that feature to be enabled in the first place.
+
+This avoids the follow error being logged on boot as well as on
+shutdown when the feature it not available and download mode has not
+been enabled on the kernel command line:
+
+ qcom_scm firmware:scm: No available mechanism for setting download mode
+
+Fixes: 79cb2cb8d89b ("firmware: qcom: scm: Disable SDI and write no dump to dump mode")
+Fixes: 781d32d1c970 ("firmware: qcom_scm: Clear download bit during reboot")
+Cc: Mukesh Ojha <quic_mojha@quicinc.com>
+Cc: stable@vger.kernel.org # 6.4
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Link: https://lore.kernel.org/r/20241002100122.18809-2-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/qcom/qcom_scm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 26b0eb7d147db..e10500cd4658f 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -542,7 +542,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode)
+ } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
+- } else {
++ } else if (dload_mode) {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+--
+2.43.0
+
ocfs2-remove-entry-once-instead-of-null-ptr-dereference-in-ocfs2_xa_remove.patch
ucounts-fix-counter-leak-in-inc_rlimit_get_ucounts.patch
selftests-hugetlb_dio-check-for-initial-conditions-to-skip-in-the-start.patch
+firmware-qcom-scm-refactor-code-to-support-multiple-.patch
+firmware-qcom-scm-suppress-download-mode-error.patch
+block-rework-bio-splitting.patch
+block-fix-queue-limits-checks-in-blk_rq_map_user_bve.patch
+drm-xe-guc-ct-flush-g2h-worker-in-case-of-g2h-respon.patch
+drm-xe-move-lnl-scheduling-wa-to-xe_device.h.patch
+drm-xe-ufence-flush-xe-ordered_wq-in-case-of-ufence-.patch
+drm-xe-guc-tlb-flush-g2h-worker-in-case-of-tlb-timeo.patch