--- /dev/null
+From 1345921393ba23b60d3fcf15933e699232ad25ae Mon Sep 17 00:00:00 2001
+From: Jason Yan <yanaijie@huawei.com>
+Date: Fri, 10 Mar 2017 11:49:12 +0800
+Subject: md: fix incorrect use of lexx_to_cpu in does_sb_need_changing
+
+From: Jason Yan <yanaijie@huawei.com>
+
+commit 1345921393ba23b60d3fcf15933e699232ad25ae upstream.
+
+The sb->layout is of type __le32, so we shoud use le32_to_cpu.
+
+Signed-off-by: Jason Yan <yanaijie@huawei.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -2273,7 +2273,7 @@ static bool does_sb_need_changing(struct
+ /* Check if any mddev parameters have changed */
+ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
+ (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
+- (mddev->layout != le64_to_cpu(sb->layout)) ||
++ (mddev->layout != le32_to_cpu(sb->layout)) ||
+ (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
+ (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
+ return true;
--- /dev/null
+From 3fb632e40d7667d8bedfabc28850ac06d5493f54 Mon Sep 17 00:00:00 2001
+From: Jason Yan <yanaijie@huawei.com>
+Date: Fri, 10 Mar 2017 11:27:23 +0800
+Subject: md: fix super_offset endianness in super_1_rdev_size_change
+
+From: Jason Yan <yanaijie@huawei.com>
+
+commit 3fb632e40d7667d8bedfabc28850ac06d5493f54 upstream.
+
+The sb->super_offset should be big-endian, but the rdev->sb_start is in
+host byte order, so fix this by adding cpu_to_le64.
+
+Signed-off-by: Jason Yan <yanaijie@huawei.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1866,7 +1866,7 @@ super_1_rdev_size_change(struct md_rdev
+ }
+ sb = page_address(rdev->sb_page);
+ sb->data_size = cpu_to_le64(num_sectors);
+- sb->super_offset = rdev->sb_start;
++ sb->super_offset = cpu_to_le64(rdev->sb_start);
+ sb->sb_csum = calc_sb_1_csum(sb);
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
--- /dev/null
+From d88270eef4b56bd7973841dd1fed387ccfa83709 Mon Sep 17 00:00:00 2001
+From: Neal Cardwell <ncardwell@google.com>
+Date: Mon, 25 Jan 2016 14:01:53 -0800
+Subject: tcp: fix tcp_mark_head_lost to check skb len before fragmenting
+
+From: Neal Cardwell <ncardwell@google.com>
+
+commit d88270eef4b56bd7973841dd1fed387ccfa83709 upstream.
+
+This commit fixes a corner case in tcp_mark_head_lost() which was
+causing the WARN_ON(len > skb->len) in tcp_fragment() to fire.
+
+tcp_mark_head_lost() was assuming that if a packet has
+tcp_skb_pcount(skb) of N, then it's safe to fragment off a prefix of
+M*mss bytes, for any M < N. But with the tricky way TCP pcounts are
+maintained, this is not always true.
+
+For example, suppose the sender sends 4 1-byte packets and have the
+last 3 packet sacked. It will merge the last 3 packets in the write
+queue into an skb with pcount = 3 and len = 3 bytes. If another
+recovery happens after a sack reneging event, tcp_mark_head_lost()
+may attempt to split the skb assuming it has more than 2*MSS bytes.
+
+This sounds very counterintuitive, but as the commit description for
+the related commit c0638c247f55 ("tcp: don't fragment SACKed skbs in
+tcp_mark_head_lost()") notes, this is because tcp_shifted_skb()
+coalesces adjacent regions of SACKed skbs, and when doing this it
+preserves the sum of their packet counts in order to reflect the
+real-world dynamics on the wire. The c0638c247f55 commit tried to
+avoid problems by not fragmenting SACKed skbs, since SACKed skbs are
+where the non-proportionality between pcount and skb->len/mss is known
+to be possible. However, that commit did not handle the case where
+during a reneging event one of these weird SACKed skbs becomes an
+un-SACKed skb, which tcp_mark_head_lost() can then try to fragment.
+
+The fix is to simply mark the entire skb lost when this happens.
+This makes the recovery slightly more aggressive in such corner
+cases before we detect reordering. But once we detect reordering
+this code path is by-passed because FACK is disabled.
+
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Vinson Lee <vlee@freedesktop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/tcp_input.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2165,8 +2165,7 @@ static void tcp_mark_head_lost(struct so
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+- int cnt, oldcnt;
+- int err;
++ int cnt, oldcnt, lost;
+ unsigned int mss;
+ /* Use SACK to deduce losses of new sequences sent during recovery */
+ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
+@@ -2206,9 +2205,10 @@ static void tcp_mark_head_lost(struct so
+ break;
+
+ mss = tcp_skb_mss(skb);
+- err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
+- mss, GFP_ATOMIC);
+- if (err < 0)
++ /* If needed, chop off the prefix to mark as lost. */
++ lost = (packets - oldcnt) * mss;
++ if (lost < skb->len &&
++ tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
+ break;
+ cnt = packets;
+ }