From: Greg Kroah-Hartman Date: Mon, 7 Mar 2016 23:15:55 +0000 (-0800) Subject: 4.4-stable patches X-Git-Tag: v3.10.100~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=233497364e9ab8ff7c408d0d6a5ae969c09478a3;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: block-check-virt-boundary-in-bio_will_gap.patch block-get-the-1st-and-last-bvec-via-helpers.patch drm-amdgpu-use-drm_calloc_large-for-vm-page_tables-array.patch --- diff --git a/queue-4.4/block-check-virt-boundary-in-bio_will_gap.patch b/queue-4.4/block-check-virt-boundary-in-bio_will_gap.patch new file mode 100644 index 00000000000..860bfe78bf7 --- /dev/null +++ b/queue-4.4/block-check-virt-boundary-in-bio_will_gap.patch @@ -0,0 +1,64 @@ +From e0af29171aa8912e1ca95023b75ef336cd70d661 Mon Sep 17 00:00:00 2001 +From: Ming Lei +Date: Fri, 26 Feb 2016 23:40:51 +0800 +Subject: block: check virt boundary in bio_will_gap() + +From: Ming Lei + +commit e0af29171aa8912e1ca95023b75ef336cd70d661 upstream. + +In the following patch, the way for figuring out +the last bvec will be changed with a bit cost introduced, +so return immediately if the queue doesn't have virt +boundary limit. Actually most of devices have not +this limit. + +Reviewed-by: Sagi Grimberg +Reviewed-by: Christoph Hellwig +Signed-off-by: Ming Lei +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/blkdev.h | 16 +++++++++++----- + 1 file changed, 11 insertions(+), 5 deletions(-) + +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1367,6 +1367,13 @@ static inline void put_dev_sector(Sector + page_cache_release(p.v); + } + ++static inline bool __bvec_gap_to_prev(struct request_queue *q, ++ struct bio_vec *bprv, unsigned int offset) ++{ ++ return offset || ++ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); ++} ++ + /* + * Check if adding a bio_vec after bprv with offset would create a gap in + * the SG list. Most drivers don't care about this, but some do. +@@ -1376,18 +1383,17 @@ static inline bool bvec_gap_to_prev(stru + { + if (!queue_virt_boundary(q)) + return false; +- return offset || +- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); ++ return __bvec_gap_to_prev(q, bprv, offset); + } + + static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) + { +- if (!bio_has_data(prev)) ++ if (!bio_has_data(prev) || !queue_virt_boundary(q)) + return false; + +- return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], +- next->bi_io_vec[0].bv_offset); ++ return __bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], ++ next->bi_io_vec[0].bv_offset); + } + + static inline bool req_gap_back_merge(struct request *req, struct bio *bio) diff --git a/queue-4.4/block-get-the-1st-and-last-bvec-via-helpers.patch b/queue-4.4/block-get-the-1st-and-last-bvec-via-helpers.patch new file mode 100644 index 00000000000..2b853711747 --- /dev/null +++ b/queue-4.4/block-get-the-1st-and-last-bvec-via-helpers.patch @@ -0,0 +1,47 @@ +From 25e71a99f10e444cd00bb2ebccb11e1c9fb672b1 Mon Sep 17 00:00:00 2001 +From: Ming Lei +Date: Fri, 26 Feb 2016 23:40:52 +0800 +Subject: block: get the 1st and last bvec via helpers + +From: Ming Lei + +commit 25e71a99f10e444cd00bb2ebccb11e1c9fb672b1 upstream. + +This patch applies the two introduced helpers to +figure out the 1st and last bvec, and fixes the +original way after bio splitting. + +Reported-by: Sagi Grimberg +Reviewed-by: Sagi Grimberg +Reviewed-by: Christoph Hellwig +Signed-off-by: Ming Lei +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/blkdev.h | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1389,11 +1389,16 @@ static inline bool bvec_gap_to_prev(stru + static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) + { +- if (!bio_has_data(prev) || !queue_virt_boundary(q)) +- return false; ++ if (bio_has_data(prev) && queue_virt_boundary(q)) { ++ struct bio_vec pb, nb; + +- return __bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], +- next->bi_io_vec[0].bv_offset); ++ bio_get_last_bvec(prev, &pb); ++ bio_get_first_bvec(next, &nb); ++ ++ return __bvec_gap_to_prev(q, &pb, nb.bv_offset); ++ } ++ ++ return false; + } + + static inline bool req_gap_back_merge(struct request *req, struct bio *bio) diff --git a/queue-4.4/drm-amdgpu-use-drm_calloc_large-for-vm-page_tables-array.patch b/queue-4.4/drm-amdgpu-use-drm_calloc_large-for-vm-page_tables-array.patch new file mode 100644 index 00000000000..647e5f352c5 --- /dev/null +++ b/queue-4.4/drm-amdgpu-use-drm_calloc_large-for-vm-page_tables-array.patch @@ -0,0 +1,56 @@ +From 9571e1d84042f5670df9fabdcbe7dd5da3abe43e Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Michel=20D=C3=A4nzer?= +Date: Tue, 19 Jan 2016 17:59:46 +0900 +Subject: drm/amdgpu: Use drm_calloc_large for VM page_tables array +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Michel Dänzer + +commit 9571e1d84042f5670df9fabdcbe7dd5da3abe43e upstream. + +It can be big, depending on the VM address space size, which is tunable +via the vm_size module parameter. + +Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93721 +Reviewed-by: Christian König +Signed-off-by: Michel Dänzer +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman + + +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +@@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device + { + const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, + AMDGPU_VM_PTE_COUNT * 8); +- unsigned pd_size, pd_entries, pts_size; ++ unsigned pd_size, pd_entries; + int i, r; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { +@@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device + pd_entries = amdgpu_vm_num_pdes(adev); + + /* allocate page table array */ +- pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); +- vm->page_tables = kzalloc(pts_size, GFP_KERNEL); ++ vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); + if (vm->page_tables == NULL) { + DRM_ERROR("Cannot allocate memory for page table array\n"); + return -ENOMEM; +@@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device + + for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) + amdgpu_bo_unref(&vm->page_tables[i].bo); +- kfree(vm->page_tables); ++ drm_free_large(vm->page_tables); + + amdgpu_bo_unref(&vm->page_directory); + fence_put(vm->page_directory_fence); diff --git a/queue-4.4/series b/queue-4.4/series index a5dad2946bc..92afe1f32b4 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -65,3 +65,6 @@ cxl-fix-psl-timebase-synchronization-detection.patch ubi-fix-out-of-bounds-write-in-volume-update-code.patch i2c-brcmstb-allocate-correct-amount-of-memory-for-regmap.patch thermal-cpu_cooling-fix-out-of-bounds-access-in-time_in_idle.patch +drm-amdgpu-use-drm_calloc_large-for-vm-page_tables-array.patch +block-check-virt-boundary-in-bio_will_gap.patch +block-get-the-1st-and-last-bvec-via-helpers.patch