--- /dev/null
+From 4a2f704eb2d831a2d73d7f4cdd54f45c49c3c353 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Sat, 11 Jan 2020 20:57:43 +0800
+Subject: block: fix get_max_segment_size() overflow on 32bit arch
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 4a2f704eb2d831a2d73d7f4cdd54f45c49c3c353 upstream.
+
+Commit 429120f3df2d starts to take account of segment's start dma address
+when computing max segment size, and data type of 'unsigned long'
+is used to do that. However, the segment mask may be 0xffffffff, so
+the figured out segment size may be overflowed in case of zero physical
+address on 32bit arch.
+
+Fix the issue by returning queue_max_segment_size() directly when that
+happens.
+
+Fixes: 429120f3df2d ("block: fix splitting segments on boundary masks")
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Cc: Christoph Hellwig <hch@lst.de>
+Tested-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-merge.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -164,8 +164,13 @@ static inline unsigned get_max_segment_s
+ unsigned long mask = queue_segment_boundary(q);
+
+ offset = mask & (page_to_phys(start_page) + offset);
+- return min_t(unsigned long, mask - offset + 1,
+- queue_max_segment_size(q));
++
++ /*
++ * overflow may be triggered in case of zero page physical address
++ * on 32bit arch, use queue's max segment size when that happens.
++ */
++ return min_not_zero(mask - offset + 1,
++ (unsigned long)queue_max_segment_size(q));
+ }
+
+ /**
--- /dev/null
+From 429120f3df2dba2bf3a4a19f4212a53ecefc7102 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Sun, 29 Dec 2019 10:32:30 +0800
+Subject: block: fix splitting segments on boundary masks
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit 429120f3df2dba2bf3a4a19f4212a53ecefc7102 upstream.
+
+We ran into a problem with a mpt3sas based controller, where we would
+see random (and hard to reproduce) file corruption). The issue seemed
+specific to this controller, but wasn't specific to the file system.
+After a lot of debugging, we find out that it's caused by segments
+spanning a 4G memory boundary. This shouldn't happen, as the default
+setting for segment boundary masks is 4G.
+
+Turns out there are two issues in get_max_segment_size():
+
+1) The default segment boundary mask is bypassed
+
+2) The segment start address isn't taken into account when checking
+ segment boundary limit
+
+Fix these two issues by removing the bypass of the segment boundary
+check even if the mask is set to the default value, and taking into
+account the actual start address of the request when checking if a
+segment needs splitting.
+
+Cc: stable@vger.kernel.org # v5.1+
+Reviewed-by: Chris Mason <clm@fb.com>
+Tested-by: Chris Mason <clm@fb.com>
+Fixes: dcebd755926b ("block: use bio_for_each_bvec() to compute multi-page bvec count")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Dropped const on the page pointer, ppc page_to_phys() doesn't mark the
+page as const...
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+
+---
+ block/blk-merge.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(s
+ return sectors & (lbs - 1);
+ }
+
+-static unsigned get_max_segment_size(const struct request_queue *q,
+- unsigned offset)
++static inline unsigned get_max_segment_size(const struct request_queue *q,
++ struct page *start_page,
++ unsigned long offset)
+ {
+ unsigned long mask = queue_segment_boundary(q);
+
+- /* default segment boundary mask means no boundary limit */
+- if (mask == BLK_SEG_BOUNDARY_MASK)
+- return queue_max_segment_size(q);
+-
+- return min_t(unsigned long, mask - (mask & offset) + 1,
++ offset = mask & (page_to_phys(start_page) + offset);
++ return min_t(unsigned long, mask - offset + 1,
+ queue_max_segment_size(q));
+ }
+
+@@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct
+ unsigned seg_size = 0;
+
+ while (len && *nsegs < max_segs) {
+- seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
++ seg_size = get_max_segment_size(q, bv->bv_page,
++ bv->bv_offset + total_len);
+ seg_size = min(seg_size, len);
+
+ (*nsegs)++;
+@@ -404,7 +403,8 @@ static unsigned blk_bvec_map_sg(struct r
+
+ while (nbytes > 0) {
+ unsigned offset = bvec->bv_offset + total;
+- unsigned len = min(get_max_segment_size(q, offset), nbytes);
++ unsigned len = min(get_max_segment_size(q, bvec->bv_page,
++ offset), nbytes);
+ struct page *page = bvec->bv_page;
+
+ /*