From: Greg Kroah-Hartman Date: Wed, 6 Nov 2024 10:58:43 +0000 (+0100) Subject: 6.11-stable patches X-Git-Tag: v4.19.323~12 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2177da621f7daf7ecf5fe1eb2c611a9b7648f13e;p=thirdparty%2Fkernel%2Fstable-queue.git 6.11-stable patches added patches: block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch --- diff --git a/queue-6.11/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch b/queue-6.11/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch new file mode 100644 index 00000000000..c3db47adbb7 --- /dev/null +++ b/queue-6.11/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch @@ -0,0 +1,104 @@ +From be0e822bb3f5259c7f9424ba97e8175211288813 Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig +Date: Mon, 28 Oct 2024 10:07:48 +0100 +Subject: block: fix queue limits checks in blk_rq_map_user_bvec for real + +From: Christoph Hellwig + +commit be0e822bb3f5259c7f9424ba97e8175211288813 upstream. + +blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits, +and the last fix to it enabled valid NVMe I/O to pass, but also allowed +invalid one for drivers that set a max_segment_size or seg_boundary +limit. + +Fix it once for all by using the bio_split_rw_at helper from the I/O +path that indicates if and where a bio would be have to be split to +adhere to the queue limits, and it returns a positive value, turn that +into -EREMOTEIO to retry using the copy path. + +Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec") +Signed-off-by: Christoph Hellwig +Reviewed-by: John Garry +Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/blk-map.c | 56 +++++++++++++++++--------------------------------------- + 1 file changed, 17 insertions(+), 39 deletions(-) + +--- a/block/blk-map.c ++++ b/block/blk-map.c +@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio); + /* Prepare bio for passthrough IO given ITER_BVEC iter */ + static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) + { +- struct request_queue *q = rq->q; +- size_t nr_iter = iov_iter_count(iter); +- size_t nr_segs = iter->nr_segs; +- struct bio_vec *bvecs, *bvprvp = NULL; +- const struct queue_limits *lim = &q->limits; +- unsigned int nsegs = 0, bytes = 0; ++ const struct queue_limits *lim = &rq->q->limits; ++ unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT; ++ unsigned int nsegs; + struct bio *bio; +- size_t i; ++ int ret; + +- if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) +- return -EINVAL; +- if (nr_segs > queue_max_segments(q)) ++ if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes) + return -EINVAL; + +- /* no iovecs to alloc, as we already have a BVEC iterator */ ++ /* reuse the bvecs from the iterator instead of allocating new ones */ + bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); +- if (bio == NULL) ++ if (!bio) + return -ENOMEM; +- + bio_iov_bvec_set(bio, (struct iov_iter *)iter); +- blk_rq_bio_prep(rq, bio, nr_segs); +- +- /* loop to perform a bunch of sanity checks */ +- bvecs = (struct bio_vec *)iter->bvec; +- for (i = 0; i < nr_segs; i++) { +- struct bio_vec *bv = &bvecs[i]; + +- /* +- * If the queue doesn't support SG gaps and adding this +- * offset would create a gap, fallback to copy. +- */ +- if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { +- blk_mq_map_bio_put(bio); +- return -EREMOTEIO; +- } +- /* check full condition */ +- if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) +- goto put_bio; +- if (bytes + bv->bv_len > nr_iter) +- break; +- +- nsegs++; +- bytes += bv->bv_len; +- bvprvp = bv; ++ /* check that the data layout matches the hardware restrictions */ ++ ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes); ++ if (ret) { ++ /* if we would have to split the bio, copy instead */ ++ if (ret > 0) ++ ret = -EREMOTEIO; ++ blk_mq_map_bio_put(bio); ++ return ret; + } ++ ++ blk_rq_bio_prep(rq, bio, nsegs); + return 0; +-put_bio: +- blk_mq_map_bio_put(bio); +- return -EINVAL; + } + + /** diff --git a/queue-6.11/drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch b/queue-6.11/drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch new file mode 100644 index 00000000000..94ce685acf8 --- /dev/null +++ b/queue-6.11/drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch @@ -0,0 +1,51 @@ +From 7c210ca5a2d72868e5a052fc533d5dcb7e070f89 Mon Sep 17 00:00:00 2001 +From: Alex Deucher +Date: Fri, 18 Oct 2024 12:35:51 -0400 +Subject: drm/amdgpu: handle default profile on on devices without fullscreen 3D + +From: Alex Deucher + +commit 7c210ca5a2d72868e5a052fc533d5dcb7e070f89 upstream. + +Some devices do not support fullscreen 3D. + +v2: Make the check generic. + +Fixes: ec1aab7816b0 ("drm/amdgpu/swsmu: default to fullscreen 3D profile for dGPUs") +Reviewed-by: Lijo Lazar +Signed-off-by: Alex Deucher +Cc: Kenneth Feng +Cc: Lijo Lazar +(cherry picked from commit 1cdd67510e54e3832f14a885dbf5858584558650) +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +@@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(stru + } + } + ++static bool smu_is_workload_profile_available(struct smu_context *smu, ++ u32 profile) ++{ ++ if (profile >= PP_SMC_POWER_PROFILE_COUNT) ++ return false; ++ return smu->workload_map && smu->workload_map[profile].valid_mapping; ++} ++ + static int smu_sw_init(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +@@ -1265,7 +1273,8 @@ static int smu_sw_init(void *handle) + smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + +- if (smu->is_apu) ++ if (smu->is_apu || ++ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) + smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + else + smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; diff --git a/queue-6.11/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch b/queue-6.11/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch new file mode 100644 index 00000000000..fd12392eb06 --- /dev/null +++ b/queue-6.11/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch @@ -0,0 +1,71 @@ +From 090f612756a9720ec18b0b130e28be49839d7cb5 Mon Sep 17 00:00:00 2001 +From: Konstantin Komarov +Date: Thu, 5 Sep 2024 15:03:48 +0300 +Subject: fs/ntfs3: Sequential field availability check in mi_enum_attr() + +From: Konstantin Komarov + +commit 090f612756a9720ec18b0b130e28be49839d7cb5 upstream. + +The code is slightly reformatted to consistently check field availability +without duplication. + +Fixes: 556bdf27c2dd ("ntfs3: Add bounds checking to mi_enum_attr()") +Signed-off-by: Konstantin Komarov +Signed-off-by: Greg Kroah-Hartman +--- + fs/ntfs3/record.c | 15 +++++++-------- + 1 file changed, 7 insertions(+), 8 deletions(-) + +--- a/fs/ntfs3/record.c ++++ b/fs/ntfs3/record.c +@@ -237,6 +237,7 @@ struct ATTRIB *mi_enum_attr(struct mft_i + } + + /* Can we use the first field (attr->type). */ ++ /* NOTE: this code also checks attr->size availability. */ + if (off + 8 > used) { + static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8); + return NULL; +@@ -257,10 +258,6 @@ struct ATTRIB *mi_enum_attr(struct mft_i + return NULL; + + asize = le32_to_cpu(attr->size); +- if (asize < SIZEOF_RESIDENT) { +- /* Impossible 'cause we should not return such attribute. */ +- return NULL; +- } + + /* Check overflow and boundary. */ + if (off + asize < off || off + asize > used) +@@ -290,6 +287,10 @@ struct ATTRIB *mi_enum_attr(struct mft_i + if (attr->non_res != 1) + return NULL; + ++ /* Can we use memory including attr->nres.valid_size? */ ++ if (asize < SIZEOF_NONRESIDENT) ++ return NULL; ++ + t16 = le16_to_cpu(attr->nres.run_off); + if (t16 > asize) + return NULL; +@@ -316,7 +317,8 @@ struct ATTRIB *mi_enum_attr(struct mft_i + + if (!attr->nres.svcn && is_attr_ext(attr)) { + /* First segment of sparse/compressed attribute */ +- if (asize + 8 < SIZEOF_NONRESIDENT_EX) ++ /* Can we use memory including attr->nres.total_size? */ ++ if (asize < SIZEOF_NONRESIDENT_EX) + return NULL; + + tot_size = le64_to_cpu(attr->nres.total_size); +@@ -326,9 +328,6 @@ struct ATTRIB *mi_enum_attr(struct mft_i + if (tot_size > alloc_size) + return NULL; + } else { +- if (asize + 8 < SIZEOF_NONRESIDENT) +- return NULL; +- + if (attr->nres.c_unit) + return NULL; + diff --git a/queue-6.11/series b/queue-6.11/series index 524cc8ab1aa..8d052f95570 100644 --- a/queue-6.11/series +++ b/queue-6.11/series @@ -241,3 +241,6 @@ drm-xe-define-stateless_compression_ctrl-as-mcr-register.patch drm-xe-write-all-slices-if-its-mcr-register.patch drm-amdgpu-swsmu-fix-ordering-for-setting-workload_mask.patch drm-amdgpu-swsmu-default-to-fullscreen-3d-profile-for-dgpus.patch +fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch +drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch +block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch