--- /dev/null
+From 7a0c5c5b834fb60764b494b0e39c239da3b0774b Mon Sep 17 00:00:00 2001
+From: Dmitry Bilunov <kmeaw@yandex-team.ru>
+Date: Thu, 30 Mar 2017 18:14:26 +0300
+Subject: dm raid: fix NULL pointer dereference for raid1 without bitmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dmitry Bilunov <kmeaw@yandex-team.ru>
+
+commit 7a0c5c5b834fb60764b494b0e39c239da3b0774b upstream.
+
+Commit 4257e08 ("dm raid: support to change bitmap region size")
+introduced a bitmap resize call during preresume phase. User can create
+a DM device with "raid" target configured as raid1 with no metadata
+devices to hold superblock/bitmap info. It can be achieved using the
+following sequence:
+
+ truncate -s 32M /dev/shm/raid-test
+ LOOP=$(losetup --show -f /dev/shm/raid-test)
+ dmsetup create raid-test-linear0 --table "0 1024 linear $LOOP 0"
+ dmsetup create raid-test-linear1 --table "0 1024 linear $LOOP 1024"
+ dmsetup create raid-test --table "0 1024 raid raid1 1 2048 2 - /dev/mapper/raid-test-linear0 - /dev/mapper/raid-test-linear1"
+
+This results in the following crash:
+
+[ 4029.110216] device-mapper: raid: Ignoring chunk size parameter for RAID 1
+[ 4029.110217] device-mapper: raid: Choosing default region size of 4MiB
+[ 4029.111349] md/raid1:mdX: active with 2 out of 2 mirrors
+[ 4029.114770] BUG: unable to handle kernel NULL pointer dereference at 0000000000000030
+[ 4029.114802] IP: bitmap_resize+0x25/0x7c0 [md_mod]
+[ 4029.114816] PGD 0
+…
+[ 4029.115059] Hardware name: Aquarius Pro P30 S85 BUY-866/B85M-E, BIOS 2304 05/25/2015
+[ 4029.115079] task: ffff88015cc29a80 task.stack: ffffc90001a5c000
+[ 4029.115097] RIP: 0010:bitmap_resize+0x25/0x7c0 [md_mod]
+[ 4029.115112] RSP: 0018:ffffc90001a5fb68 EFLAGS: 00010246
+[ 4029.115127] RAX: 0000000000000005 RBX: 0000000000000000 RCX: 0000000000000000
+[ 4029.115146] RDX: 0000000000000000 RSI: 0000000000000400 RDI: 0000000000000000
+[ 4029.115166] RBP: ffffc90001a5fc28 R08: 0000000800000000 R09: 00000008ffffffff
+[ 4029.115185] R10: ffffea0005661600 R11: ffff88015cc29a80 R12: ffff88021231f058
+[ 4029.115204] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+[ 4029.115223] FS: 00007fe73a6b4740(0000) GS:ffff88021ea80000(0000) knlGS:0000000000000000
+[ 4029.115245] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 4029.115261] CR2: 0000000000000030 CR3: 0000000159a74000 CR4: 00000000001426e0
+[ 4029.115281] Call Trace:
+[ 4029.115291] ? raid_iterate_devices+0x63/0x80 [dm_raid]
+[ 4029.115309] ? dm_table_all_devices_attribute.isra.23+0x41/0x70 [dm_mod]
+[ 4029.115329] ? dm_table_set_restrictions+0x225/0x2d0 [dm_mod]
+[ 4029.115346] raid_preresume+0x81/0x2e0 [dm_raid]
+[ 4029.115361] dm_table_resume_targets+0x47/0xe0 [dm_mod]
+[ 4029.115378] dm_resume+0xa8/0xd0 [dm_mod]
+[ 4029.115391] dev_suspend+0x123/0x250 [dm_mod]
+[ 4029.115405] ? table_load+0x350/0x350 [dm_mod]
+[ 4029.115419] ctl_ioctl+0x1c2/0x490 [dm_mod]
+[ 4029.115433] dm_ctl_ioctl+0xe/0x20 [dm_mod]
+[ 4029.115447] do_vfs_ioctl+0x8d/0x5a0
+[ 4029.115459] ? ____fput+0x9/0x10
+[ 4029.115470] ? task_work_run+0x79/0xa0
+[ 4029.115481] SyS_ioctl+0x3c/0x70
+[ 4029.115493] entry_SYSCALL_64_fastpath+0x13/0x94
+
+The raid_preresume() function incorrectly assumes that the raid_set has
+a bitmap enabled if RT_FLAG_RS_BITMAP_LOADED is set. But
+RT_FLAG_RS_BITMAP_LOADED is getting set in __load_dirty_region_bitmap()
+even if there is no bitmap present (and bitmap_load() happily returns 0
+even if a bitmap isn't present). So the only way forward in the
+near-term is to check if the bitmap is present by seeing if
+mddev->bitmap is not NULL after bitmap_load() has been called.
+
+By doing so the above NULL pointer is avoided.
+
+Fixes: 4257e08 ("dm raid: support to change bitmap region size")
+Signed-off-by: Dmitry Bilunov <kmeaw@yandex-team.ru>
+Signed-off-by: Andrey Smetanin <asmetanin@yandex-team.ru>
+Acked-by: Heinz Mauelshagen <heinzm@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-raid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3594,7 +3594,7 @@ static int raid_preresume(struct dm_targ
+ return r;
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
+- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
++ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
+ r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
+ to_bytes(rs->requested_bitmap_chunk_sectors), 0);
--- /dev/null
+From 2958b9013fcbabeeba221161d0712f5259f1e15d Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Fri, 17 Mar 2017 12:11:20 +0800
+Subject: drm/i915/gvt: Fix gvt scheduler interval time
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit 2958b9013fcbabeeba221161d0712f5259f1e15d upstream.
+
+Fix to correctly assign 1ms for gvt scheduler interval time,
+as previous code using HZ is pretty broken. And use no delay
+for start gvt scheduler function.
+
+Fixes: 4b63960ebd3f ("drm/i915/gvt: vGPU schedule policy framework")
+Cc: Zhi Wang <zhi.a.wang@intel.com>
+Acked-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gvt/sched_policy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
++++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
+@@ -101,7 +101,7 @@ struct tbs_sched_data {
+ struct list_head runq_head;
+ };
+
+-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
++#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
+
+ static void tbs_sched_func(struct work_struct *work)
+ {
+@@ -224,7 +224,7 @@ static void tbs_sched_start_schedule(str
+ return;
+
+ list_add_tail(&vgpu_data->list, &sched_data->runq_head);
+- schedule_delayed_work(&sched_data->work, sched_data->period);
++ schedule_delayed_work(&sched_data->work, 0);
+ }
+
+ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
--- /dev/null
+From 5180edc2421117766fcb9c2d2dc6bfaeefdeb709 Mon Sep 17 00:00:00 2001
+From: Changbin Du <changbin.du@intel.com>
+Date: Thu, 16 Mar 2017 09:45:09 +0800
+Subject: drm/i915/kvmgt: fix suspicious rcu dereference usage
+
+From: Changbin Du <changbin.du@intel.com>
+
+commit 5180edc2421117766fcb9c2d2dc6bfaeefdeb709 upstream.
+
+The srcu read lock must be held while accessing kvm memslots.
+This patch fix below warning for function kvmgt_rw_gpa().
+
+[ 165.345093] [ ERR: suspicious RCU usage. ]
+[ 165.416538] Call Trace:
+[ 165.418989] dump_stack+0x85/0xc2
+[ 165.422310] lockdep_rcu_suspicious+0xd7/0x110
+[ 165.426769] kvm_read_guest_page+0x195/0x1b0 [kvm]
+[ 165.431574] kvm_read_guest+0x50/0x90 [kvm]
+[ 165.440492] kvmgt_rw_gpa+0x43/0xa0 [kvmgt]
+[ 165.444683] kvmgt_read_gpa+0x11/0x20 [kvmgt]
+[ 165.449061] gtt_get_entry64+0x4d/0xc0 [i915]
+[ 165.453438] ppgtt_populate_shadow_page_by_guest_entry+0x380/0xdc0 [i915]
+[ 165.460254] shadow_mm+0xd1/0x460 [i915]
+[ 165.472488] intel_vgpu_create_mm+0x1ab/0x210 [i915]
+[ 165.477472] intel_vgpu_g2v_create_ppgtt_mm+0x5f/0xc0 [i915]
+[ 165.483154] pvinfo_mmio_write+0x19b/0x1d0 [i915]
+[ 165.499068] intel_vgpu_emulate_mmio_write+0x3f9/0x600 [i915]
+[ 165.504827] intel_vgpu_rw+0x114/0x150 [kvmgt]
+[ 165.509281] intel_vgpu_write+0x16f/0x1a0 [kvmgt]
+[ 165.513993] vfio_mdev_write+0x20/0x30 [vfio_mdev]
+[ 165.518793] vfio_device_fops_write+0x24/0x30 [vfio]
+[ 165.523770] __vfs_write+0x28/0x120
+[ 165.540529] vfs_write+0xce/0x1f0
+
+v2: fix Cc format for stable
+
+Signed-off-by: Changbin Du <changbin.du@intel.com>
+Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
+Reviewed-by: Jike Song <jike.song@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gvt/kvmgt.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
+@@ -1422,7 +1422,7 @@ static int kvmgt_rw_gpa(unsigned long ha
+ {
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+- int ret;
++ int idx, ret;
+ bool kthread = current->mm == NULL;
+
+ if (!handle_valid(handle))
+@@ -1434,8 +1434,10 @@ static int kvmgt_rw_gpa(unsigned long ha
+ if (kthread)
+ use_mm(kvm->mm);
+
++ idx = srcu_read_lock(&kvm->srcu);
+ ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
+ kvm_read_guest(kvm, gpa, buf, len);
++ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (kthread)
+ unuse_mm(kvm->mm);
--- /dev/null
+From 280489daa68bd20364b322c11e3f429a0212c611 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 13 Mar 2017 17:43:48 +0100
+Subject: drm/msm: adreno: fix build error without debugfs
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 280489daa68bd20364b322c11e3f429a0212c611 upstream.
+
+The newly added a5xx support fails to build when debugfs is diabled:
+
+drivers/gpu/drm/msm/adreno/a5xx_gpu.c:849:4: error: 'struct msm_gpu_funcs' has no member named 'show'
+drivers/gpu/drm/msm/adreno/a5xx_gpu.c:849:11: error: 'a5xx_show' undeclared here (not in a function); did you mean 'a5xx_irq'?
+
+This adds a missing #ifdef.
+
+Fixes: b5f103ab98c7 ("drm/msm: gpu: Add A5XX target support")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -846,7 +846,9 @@ static const struct adreno_gpu_funcs fun
+ .idle = a5xx_idle,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
++#ifdef CONFIG_DEBUG_FS
+ .show = a5xx_show,
++#endif
+ },
+ .get_timestamp = a5xx_get_timestamp,
+ };
--- /dev/null
+From fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 13:35:01 +0100
+Subject: metag/usercopy: Add early abort to copy_to_user
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b upstream.
+
+When copying to userland on Meta, if any faults are encountered
+immediately abort the copy instead of continuing on and repeatedly
+faulting, and worse potentially copying further bytes successfully to
+subsequent valid pages.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -538,23 +538,31 @@ unsigned long __copy_user(void __user *p
+ if ((unsigned long) src & 1) {
+ __asm_copy_to_user_1(dst, src, retn);
+ n--;
++ if (retn)
++ return retn + n;
+ }
+ if ((unsigned long) dst & 1) {
+ /* Worst case - byte copy */
+ while (n > 0) {
+ __asm_copy_to_user_1(dst, src, retn);
+ n--;
++ if (retn)
++ return retn + n;
+ }
+ }
+ if (((unsigned long) src & 2) && n >= 2) {
+ __asm_copy_to_user_2(dst, src, retn);
+ n -= 2;
++ if (retn)
++ return retn + n;
+ }
+ if ((unsigned long) dst & 2) {
+ /* Second worst case - word copy */
+ while (n >= 2) {
+ __asm_copy_to_user_2(dst, src, retn);
+ n -= 2;
++ if (retn)
++ return retn + n;
+ }
+ }
+
+@@ -569,6 +577,8 @@ unsigned long __copy_user(void __user *p
+ while (n >= 8) {
+ __asm_copy_to_user_8x64(dst, src, retn);
+ n -= 8;
++ if (retn)
++ return retn + n;
+ }
+ }
+ if (n >= RAPF_MIN_BUF_SIZE) {
+@@ -581,6 +591,8 @@ unsigned long __copy_user(void __user *p
+ while (n >= 8) {
+ __asm_copy_to_user_8x64(dst, src, retn);
+ n -= 8;
++ if (retn)
++ return retn + n;
+ }
+ }
+ #endif
+@@ -588,11 +600,15 @@ unsigned long __copy_user(void __user *p
+ while (n >= 16) {
+ __asm_copy_to_user_16(dst, src, retn);
+ n -= 16;
++ if (retn)
++ return retn + n;
+ }
+
+ while (n >= 4) {
+ __asm_copy_to_user_4(dst, src, retn);
+ n -= 4;
++ if (retn)
++ return retn + n;
+ }
+
+ switch (n) {
+@@ -609,6 +625,10 @@ unsigned long __copy_user(void __user *p
+ break;
+ }
+
++ /*
++ * If we get here, retn correctly reflects the number of failing
++ * bytes.
++ */
+ return retn;
+ }
+ EXPORT_SYMBOL(__copy_user);
--- /dev/null
+From b884a190afcecdbef34ca508ea5ee88bb7c77861 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 4 Apr 2017 08:51:34 +0100
+Subject: metag/usercopy: Add missing fixups
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit b884a190afcecdbef34ca508ea5ee88bb7c77861 upstream.
+
+The rapf copy loops in the Meta usercopy code is missing some extable
+entries for HTP cores with unaligned access checking enabled, where
+faults occur on the instruction immediately after the faulting access.
+
+Add the fixup labels and extable entries for these cases so that corner
+case user copy failures don't cause kernel crashes.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c | 72 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 48 insertions(+), 24 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -259,27 +259,31 @@
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+- "SUB %3, %3, #32\n" \
+ "23:\n" \
+- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
++ "SUB %3, %3, #32\n" \
+ "24:\n" \
++ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
++ "25:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "26:\n" \
+ "SUB %3, %3, #32\n" \
+ "DCACHE [%1+#-64], D0Ar6\n" \
+ "BR $Lloop"id"\n" \
+ \
+ "MOV RAPF, %1\n" \
+- "25:\n" \
++ "27:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "26:\n" \
++ "28:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "29:\n" \
+ "SUB %3, %3, #32\n" \
+- "27:\n" \
++ "30:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "28:\n" \
++ "31:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "32:\n" \
+ "SUB %0, %0, #8\n" \
+- "29:\n" \
++ "33:\n" \
+ "SETL [%0++], D0.7, D1.7\n" \
+ "SUB %3, %3, #32\n" \
+ "1:" \
+@@ -311,7 +315,11 @@
+ " .long 26b,3b\n" \
+ " .long 27b,3b\n" \
+ " .long 28b,3b\n" \
+- " .long 29b,4b\n" \
++ " .long 29b,3b\n" \
++ " .long 30b,3b\n" \
++ " .long 31b,3b\n" \
++ " .long 32b,3b\n" \
++ " .long 33b,4b\n" \
+ " .previous\n" \
+ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
+ : "0" (to), "1" (from), "2" (ret), "3" (n) \
+@@ -402,47 +410,55 @@
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+- "SUB %3, %3, #16\n" \
+ "23:\n" \
+- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "24:\n" \
+- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "SUB %3, %3, #16\n" \
+- "25:\n" \
++ "24:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "26:\n" \
++ "25:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "26:\n" \
+ "SUB %3, %3, #16\n" \
+ "27:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "28:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "29:\n" \
++ "SUB %3, %3, #16\n" \
++ "30:\n" \
++ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
++ "31:\n" \
++ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "32:\n" \
+ "SUB %3, %3, #16\n" \
+ "DCACHE [%1+#-64], D0Ar6\n" \
+ "BR $Lloop"id"\n" \
+ \
+ "MOV RAPF, %1\n" \
+- "29:\n" \
++ "33:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "30:\n" \
++ "34:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "35:\n" \
+ "SUB %3, %3, #16\n" \
+- "31:\n" \
++ "36:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "32:\n" \
++ "37:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "38:\n" \
+ "SUB %3, %3, #16\n" \
+- "33:\n" \
++ "39:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "34:\n" \
++ "40:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "41:\n" \
+ "SUB %3, %3, #16\n" \
+- "35:\n" \
++ "42:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+- "36:\n" \
++ "43:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
++ "44:\n" \
+ "SUB %0, %0, #4\n" \
+- "37:\n" \
++ "45:\n" \
+ "SETD [%0++], D0.7\n" \
+ "SUB %3, %3, #16\n" \
+ "1:" \
+@@ -482,7 +498,15 @@
+ " .long 34b,3b\n" \
+ " .long 35b,3b\n" \
+ " .long 36b,3b\n" \
+- " .long 37b,4b\n" \
++ " .long 37b,3b\n" \
++ " .long 38b,3b\n" \
++ " .long 39b,3b\n" \
++ " .long 40b,3b\n" \
++ " .long 41b,3b\n" \
++ " .long 42b,3b\n" \
++ " .long 43b,3b\n" \
++ " .long 44b,3b\n" \
++ " .long 45b,4b\n" \
+ " .previous\n" \
+ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
+ : "0" (to), "1" (from), "2" (ret), "3" (n) \
--- /dev/null
+From ef62a2d81f73d9cddef14bc3d9097a57010d551c Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 10:37:44 +0100
+Subject: metag/usercopy: Drop unused macros
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit ef62a2d81f73d9cddef14bc3d9097a57010d551c upstream.
+
+Metag's lib/usercopy.c has a bunch of copy_from_user macros for larger
+copies between 5 and 16 bytes which are completely unused. Before fixing
+zeroing lets drop these macros so there is less to fix.
+
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c | 113 ----------------------------------------------
+ 1 file changed, 113 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -651,119 +651,6 @@ EXPORT_SYMBOL(__copy_user);
+ #define __asm_copy_from_user_4(to, from, ret) \
+ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+
+-#define __asm_copy_from_user_5(to, from, ret) \
+- __asm_copy_from_user_4x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "4: SETB [%0++],D1Ar1\n", \
+- "5: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 4b,5b\n")
+-
+-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_4x_cont(to, from, ret, \
+- " GETW D1Ar1,[%1++]\n" \
+- "4: SETW [%0++],D1Ar1\n" COPY, \
+- "5: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
+- " .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_6(to, from, ret) \
+- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_7(to, from, ret) \
+- __asm_copy_from_user_6x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "6: SETB [%0++],D1Ar1\n", \
+- "7: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_4x_cont(to, from, ret, \
+- " GETD D1Ar1,[%1++]\n" \
+- "4: SETD [%0++],D1Ar1\n" COPY, \
+- "5: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
+- " .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_8(to, from, ret) \
+- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_9(to, from, ret) \
+- __asm_copy_from_user_8x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "6: SETB [%0++],D1Ar1\n", \
+- "7: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_8x_cont(to, from, ret, \
+- " GETW D1Ar1,[%1++]\n" \
+- "6: SETW [%0++],D1Ar1\n" COPY, \
+- "7: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
+- " .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_10(to, from, ret) \
+- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_11(to, from, ret) \
+- __asm_copy_from_user_10x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "8: SETB [%0++],D1Ar1\n", \
+- "9: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_8x_cont(to, from, ret, \
+- " GETD D1Ar1,[%1++]\n" \
+- "6: SETD [%0++],D1Ar1\n" COPY, \
+- "7: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
+- " .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_12(to, from, ret) \
+- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_13(to, from, ret) \
+- __asm_copy_from_user_12x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "8: SETB [%0++],D1Ar1\n", \
+- "9: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_12x_cont(to, from, ret, \
+- " GETW D1Ar1,[%1++]\n" \
+- "8: SETW [%0++],D1Ar1\n" COPY, \
+- "9: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
+- " .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_14(to, from, ret) \
+- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_15(to, from, ret) \
+- __asm_copy_from_user_14x_cont(to, from, ret, \
+- " GETB D1Ar1,[%1++]\n" \
+- "10: SETB [%0++],D1Ar1\n", \
+- "11: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
+- " .long 10b,11b\n")
+-
+-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+- __asm_copy_from_user_12x_cont(to, from, ret, \
+- " GETD D1Ar1,[%1++]\n" \
+- "8: SETD [%0++],D1Ar1\n" COPY, \
+- "9: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
+- " .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_16(to, from, ret) \
+- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+ asm volatile ( \
--- /dev/null
+From 2257211942bbbf6c798ab70b487d7e62f7835a1a Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 11:23:18 +0100
+Subject: metag/usercopy: Fix alignment error checking
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 2257211942bbbf6c798ab70b487d7e62f7835a1a upstream.
+
+Fix the error checking of the alignment adjustment code in
+raw_copy_from_user(), which mistakenly considers it safe to skip the
+error check when aligning the source buffer on a 2 or 4 byte boundary.
+
+If the destination buffer was unaligned it may have started to copy
+using byte or word accesses, which could well be at the start of a new
+(valid) source page. This would result in it appearing to have copied 1
+or 2 bytes at the end of the first (invalid) page rather than none at
+all.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -717,6 +717,8 @@ unsigned long __copy_user_zeroing(void *
+ if ((unsigned long) src & 1) {
+ __asm_copy_from_user_1(dst, src, retn);
+ n--;
++ if (retn)
++ goto copy_exception_bytes;
+ }
+ if ((unsigned long) dst & 1) {
+ /* Worst case - byte copy */
+@@ -730,6 +732,8 @@ unsigned long __copy_user_zeroing(void *
+ if (((unsigned long) src & 2) && n >= 2) {
+ __asm_copy_from_user_2(dst, src, retn);
+ n -= 2;
++ if (retn)
++ goto copy_exception_bytes;
+ }
+ if ((unsigned long) dst & 2) {
+ /* Second worst case - word copy */
+@@ -741,12 +745,6 @@ unsigned long __copy_user_zeroing(void *
+ }
+ }
+
+- /* We only need one check after the unalignment-adjustments,
+- because if both adjustments were done, either both or
+- neither reference had an exception. */
+- if (retn != 0)
+- goto copy_exception_bytes;
+-
+ #ifdef USE_RAPF
+ /* 64 bit copy loop */
+ if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
--- /dev/null
+From 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Mon, 3 Apr 2017 17:41:40 +0100
+Subject: metag/usercopy: Fix src fixup in from user rapf loops
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 upstream.
+
+The fixup code to rewind the source pointer in
+__asm_copy_from_user_{32,64}bit_rapf_loop() always rewound the source by
+a single unit (4 or 8 bytes), however this is insufficient if the fault
+didn't occur on the first load in the loop, as the source pointer will
+have been incremented but nothing will have been stored until all 4
+register [pairs] are loaded.
+
+Read the LSM_STEP field of TXSTATUS (which is already loaded into a
+register), a bit like the copy_to_user versions, to determine how many
+iterations of MGET[DL] have taken place, all of which need rewinding.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c | 36 ++++++++++++++++++++++++++++--------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -687,29 +687,49 @@ EXPORT_SYMBOL(__copy_user);
+ *
+ * Rationale:
+ * A fault occurs while reading from user buffer, which is the
+- * source. Since the fault is at a single address, we only
+- * need to rewind by 8 bytes.
++ * source.
+ * Since we don't write to kernel buffer until we read first,
+ * the kernel buffer is at the right state and needn't be
+- * corrected.
++ * corrected, but the source must be rewound to the beginning of
++ * the block, which is LSM_STEP*8 bytes.
++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ * and stored in D0Ar2
++ *
++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ * a fault happens at the 4th write, LSM_STEP will be 0
++ * instead of 4. The code copes with that.
+ */
+ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
+ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
+- "SUB %1, %1, #8\n")
++ "LSR D0Ar2, D0Ar2, #5\n" \
++ "ANDS D0Ar2, D0Ar2, #0x38\n" \
++ "ADDZ D0Ar2, D0Ar2, #32\n" \
++ "SUB %1, %1, D0Ar2\n")
+
+ /* rewind 'from' pointer when a fault occurs
+ *
+ * Rationale:
+ * A fault occurs while reading from user buffer, which is the
+- * source. Since the fault is at a single address, we only
+- * need to rewind by 4 bytes.
++ * source.
+ * Since we don't write to kernel buffer until we read first,
+ * the kernel buffer is at the right state and needn't be
+- * corrected.
++ * corrected, but the source must be rewound to the beginning of
++ * the block, which is LSM_STEP*4 bytes.
++ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ * and stored in D0Ar2
++ *
++ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ * a fault happens at the 4th write, LSM_STEP will be 0
++ * instead of 4. The code copes with that.
+ */
+ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
+ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
+- "SUB %1, %1, #4\n")
++ "LSR D0Ar2, D0Ar2, #6\n" \
++ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
++ "ADDZ D0Ar2, D0Ar2, #16\n" \
++ "SUB %1, %1, D0Ar2\n")
+
+
+ /*
--- /dev/null
+From fd40eee1290ad7add7aa665e3ce6b0f9fe9734b4 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 4 Apr 2017 11:43:26 +0100
+Subject: metag/usercopy: Set flags before ADDZ
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit fd40eee1290ad7add7aa665e3ce6b0f9fe9734b4 upstream.
+
+The fixup code for the copy_to_user rapf loops reads TXStatus.LSM_STEP
+to decide how far to rewind the source pointer. There is a special case
+for the last execution of an MGETL/MGETD, since it leaves LSM_STEP=0
+even though the number of MGETLs/MGETDs attempted was 4. This uses ADDZ
+which is conditional upon the Z condition flag, but the AND instruction
+which masked the TXStatus.LSM_STEP field didn't set the condition flags
+based on the result.
+
+Fix that now by using ANDS which does set the flags, and also marking
+the condition codes as clobbered by the inline assembly.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/lib/usercopy.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -315,7 +315,7 @@
+ " .previous\n" \
+ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
+ : "0" (to), "1" (from), "2" (ret), "3" (n) \
+- : "D1Ar1", "D0Ar2", "memory")
++ : "D1Ar1", "D0Ar2", "cc", "memory")
+
+ /* rewind 'to' and 'from' pointers when a fault occurs
+ *
+@@ -341,7 +341,7 @@
+ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
+ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
+ "LSR D0Ar2, D0Ar2, #8\n" \
+- "AND D0Ar2, D0Ar2, #0x7\n" \
++ "ANDS D0Ar2, D0Ar2, #0x7\n" \
+ "ADDZ D0Ar2, D0Ar2, #4\n" \
+ "SUB D0Ar2, D0Ar2, #1\n" \
+ "MOV D1Ar1, #4\n" \
+@@ -486,7 +486,7 @@
+ " .previous\n" \
+ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
+ : "0" (to), "1" (from), "2" (ret), "3" (n) \
+- : "D1Ar1", "D0Ar2", "memory")
++ : "D1Ar1", "D0Ar2", "cc", "memory")
+
+ /* rewind 'to' and 'from' pointers when a fault occurs
+ *
+@@ -512,7 +512,7 @@
+ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
+ "LSR D0Ar2, D0Ar2, #8\n" \
+- "AND D0Ar2, D0Ar2, #0x7\n" \
++ "ANDS D0Ar2, D0Ar2, #0x7\n" \
+ "ADDZ D0Ar2, D0Ar2, #4\n" \
+ "SUB D0Ar2, D0Ar2, #1\n" \
+ "MOV D1Ar1, #4\n" \
--- /dev/null
+From 563ddc1076109f2b3f88e6d355eab7b6fd4662cb Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 31 Mar 2017 11:14:02 +0100
+Subject: metag/usercopy: Zero rest of buffer from copy_from_user
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 563ddc1076109f2b3f88e6d355eab7b6fd4662cb upstream.
+
+Currently we try to zero the destination for a failed read from userland
+in fixup code in the usercopy.c macros. The rest of the destination
+buffer is then zeroed from __copy_user_zeroing(), which is used for both
+copy_from_user() and __copy_from_user().
+
+Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0
+before the fixup code entry labels, and __copy_from_user() shouldn't even
+be zeroing the rest of the buffer.
+
+Move the zeroing out into copy_from_user() and rename
+__copy_user_zeroing() to raw_copy_from_user() since it no longer does
+any zeroing. This also conveniently matches the name needed for
+RAW_COPY_USER support in a later patch.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/include/asm/uaccess.h | 15 +++++-----
+ arch/metag/lib/usercopy.c | 57 ++++++++++++---------------------------
+ 2 files changed, 26 insertions(+), 46 deletions(-)
+
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(co
+
+ #define strlen_user(str) strnlen_user(str, 32767)
+
+-extern unsigned long __must_check __copy_user_zeroing(void *to,
+- const void __user *from,
+- unsigned long n);
++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
++ unsigned long n);
+
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+- return __copy_user_zeroing(to, from, n);
+- memset(to, 0, n);
+- return n;
++ res = raw_copy_from_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
+ }
+
+-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
+ #define __copy_from_user_inatomic __copy_from_user
+
+ extern unsigned long __must_check __copy_user(void __user *to,
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -29,7 +29,6 @@
+ COPY \
+ "1:\n" \
+ " .section .fixup,\"ax\"\n" \
+- " MOV D1Ar1,#0\n" \
+ FIXUP \
+ " MOVT D1Ar1,#HI(1b)\n" \
+ " JUMP D1Ar1,#LO(1b)\n" \
+@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user);
+ __asm_copy_user_cont(to, from, ret, \
+ " GETB D1Ar1,[%1++]\n" \
+ "2: SETB [%0++],D1Ar1\n", \
+- "3: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
++ "3: ADD %2,%2,#1\n", \
+ " .long 2b,3b\n")
+
+ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+ __asm_copy_user_cont(to, from, ret, \
+ " GETW D1Ar1,[%1++]\n" \
+ "2: SETW [%0++],D1Ar1\n" COPY, \
+- "3: ADD %2,%2,#2\n" \
+- " SETW [%0++],D1Ar1\n" FIXUP, \
++ "3: ADD %2,%2,#2\n" FIXUP, \
+ " .long 2b,3b\n" TENTRY)
+
+ #define __asm_copy_from_user_2(to, from, ret) \
+@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user);
+ __asm_copy_from_user_2x_cont(to, from, ret, \
+ " GETB D1Ar1,[%1++]\n" \
+ "4: SETB [%0++],D1Ar1\n", \
+- "5: ADD %2,%2,#1\n" \
+- " SETB [%0++],D1Ar1\n", \
++ "5: ADD %2,%2,#1\n", \
+ " .long 4b,5b\n")
+
+ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+ __asm_copy_user_cont(to, from, ret, \
+ " GETD D1Ar1,[%1++]\n" \
+ "2: SETD [%0++],D1Ar1\n" COPY, \
+- "3: ADD %2,%2,#4\n" \
+- " SETD [%0++],D1Ar1\n" FIXUP, \
++ "3: ADD %2,%2,#4\n" FIXUP, \
+ " .long 2b,3b\n" TENTRY)
+
+ #define __asm_copy_from_user_4(to, from, ret) \
+ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+
+-
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+ asm volatile ( \
+ " GETL D0Ar2,D1Ar1,[%1++]\n" \
+ "2: SETL [%0++],D0Ar2,D1Ar1\n" \
+ "1:\n" \
+ " .section .fixup,\"ax\"\n" \
+- " MOV D1Ar1,#0\n" \
+- " MOV D0Ar2,#0\n" \
+ "3: ADD %2,%2,#8\n" \
+- " SETL [%0++],D0Ar2,D1Ar1\n" \
+ " MOVT D0Ar2,#HI(1b)\n" \
+ " JUMP D0Ar2,#LO(1b)\n" \
+ " .previous\n" \
+@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user);
+ "SUB %1, %1, #4\n")
+
+
+-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+- userland. The return-value is the number of bytes that were
+- inaccessible. */
+-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+- unsigned long n)
++/*
++ * Copy from user to kernel. The return-value is the number of bytes that were
++ * inaccessible.
++ */
++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
++ unsigned long n)
+ {
+ register char *dst asm ("A0.2") = pdst;
+ register const char __user *src asm ("A1.2") = psrc;
+@@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *
+ __asm_copy_from_user_1(dst, src, retn);
+ n--;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ if ((unsigned long) dst & 1) {
+ /* Worst case - byte copy */
+@@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *
+ __asm_copy_from_user_1(dst, src, retn);
+ n--;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+ if (((unsigned long) src & 2) && n >= 2) {
+ __asm_copy_from_user_2(dst, src, retn);
+ n -= 2;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ if ((unsigned long) dst & 2) {
+ /* Second worst case - word copy */
+@@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *
+ __asm_copy_from_user_2(dst, src, retn);
+ n -= 2;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+
+@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *
+ __asm_copy_from_user_8x64(dst, src, retn);
+ n -= 8;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+
+@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *
+ __asm_copy_from_user_8x64(dst, src, retn);
+ n -= 8;
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+ }
+ #endif
+@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *
+ n -= 4;
+
+ if (retn)
+- goto copy_exception_bytes;
++ return retn + n;
+ }
+
+ /* If we get here, there were no memory read faults. */
+@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *
+ /* If we get here, retn correctly reflects the number of failing
+ bytes. */
+ return retn;
+-
+- copy_exception_bytes:
+- /* We already have "retn" bytes cleared, and need to clear the
+- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
+- memset is preferred here, since this isn't speed-critical code and
+- we'd rather have this a leaf-function than calling memset. */
+- {
+- char *endp;
+- for (endp = dst + n; dst < endp; dst++)
+- *dst = 0;
+- }
+-
+- return retn + n;
+ }
+-EXPORT_SYMBOL(__copy_user_zeroing);
++EXPORT_SYMBOL(raw_copy_from_user);
+
+ #define __asm_clear_8x64(to, ret) \
+ asm volatile ( \
--- /dev/null
+From 033cffeedbd11c140952b98e8639bf652091a17d Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:25 +0800
+Subject: MIPS: Add MIPS_CPU_FTLB for Loongson-3A R2
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 033cffeedbd11c140952b98e8639bf652091a17d upstream.
+
+Loongson-3A R2 and newer CPU have FTLB, but Config0.MT is 1, so add
+MIPS_CPU_FTLB to the CPU options.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15752/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cpu-probe.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(st
+ }
+
+ decode_configs(c);
+- c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
++ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ break;
+ default:
--- /dev/null
+From 0be032c190abcdcfa948082b6a1e0d461184ba4d Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:29 +0800
+Subject: MIPS: c-r4k: Fix Loongson-3's vcache/scache waysize calculation
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 0be032c190abcdcfa948082b6a1e0d461184ba4d upstream.
+
+If scache.waysize is 0, r4k___flush_cache_all() will do nothing and
+then cause bugs. BTW, though vcache.waysize isn't being used by now,
+we also fix its calculation.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15756/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/c-r4k.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -1558,6 +1558,7 @@ static void probe_vcache(void)
+ vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
+
+ c->vcache.waybit = 0;
++ c->vcache.waysize = vcache_size / c->vcache.ways;
+
+ pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+ vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+@@ -1660,6 +1661,7 @@ static void __init loongson3_sc_init(voi
+ /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
+ scache_size *= 4;
+ c->scache.waybit = 0;
++ c->scache.waysize = scache_size / c->scache.ways;
+ pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
+ scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
+ if (scache_size)
--- /dev/null
+From 5a34133167dce36666ea054e30a561b7f4413b7f Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:26 +0800
+Subject: MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 5a34133167dce36666ea054e30a561b7f4413b7f upstream.
+
+Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
+means: when a JTLB entry is replaced by hardware, there may be an old
+valid entry exists in ITLB. So, a TLB miss exception may occur while
+handle_ri_rdhwr() is running because it try to access EPC's content.
+However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
+exception be treated as a TLB Invalid exception and tlbp may fail. In
+this case, if FTLB (which is usually set-associative instead of set-
+associative) is enabled, a tlbp failure will cause an invalid tlbwi,
+which will hang the whole system.
+
+This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
+it for Loongson-3. It try to solve the same problem described as below,
+but more straightforwards.
+
+https://patchwork.linux-mips.org/patch/12591/
+
+I think Loongson-2 has the same problem, but it has no FTLB, so we just
+keep it as is.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: Rui Wang <wangr@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: Huacai Chen <chenhc@lemote.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15753/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/genex.S | 4 ++--
+ arch/mips/kernel/traps.c | 17 +++++++++++++----
+ 2 files changed, 15 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/kernel/genex.S
++++ b/arch/mips/kernel/genex.S
+@@ -448,7 +448,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
+ BUILD_HANDLER reserved reserved sti verbose /* others */
+
+ .align 5
+- LEAF(handle_ri_rdhwr_vivt)
++ LEAF(handle_ri_rdhwr_tlbp)
+ .set push
+ .set noat
+ .set noreorder
+@@ -467,7 +467,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+- END(handle_ri_rdhwr_vivt)
++ END(handle_ri_rdhwr_tlbp)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -81,7 +81,7 @@ extern asmlinkage void handle_dbe(void);
+ extern asmlinkage void handle_sys(void);
+ extern asmlinkage void handle_bp(void);
+ extern asmlinkage void handle_ri(void);
+-extern asmlinkage void handle_ri_rdhwr_vivt(void);
++extern asmlinkage void handle_ri_rdhwr_tlbp(void);
+ extern asmlinkage void handle_ri_rdhwr(void);
+ extern asmlinkage void handle_cpu(void);
+ extern asmlinkage void handle_ov(void);
+@@ -2352,9 +2352,18 @@ void __init trap_init(void)
+
+ set_except_vector(EXCCODE_SYS, handle_sys);
+ set_except_vector(EXCCODE_BP, handle_bp);
+- set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
+- (cpu_has_vtag_icache ?
+- handle_ri_rdhwr_vivt : handle_ri_rdhwr));
++
++ if (rdhwr_noopt)
++ set_except_vector(EXCCODE_RI, handle_ri);
++ else {
++ if (cpu_has_vtag_icache)
++ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
++ else if (current_cpu_type() == CPU_LOONGSON3)
++ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
++ else
++ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
++ }
++
+ set_except_vector(EXCCODE_CPU, handle_cpu);
+ set_except_vector(EXCCODE_OV, handle_ov);
+ set_except_vector(EXCCODE_TR, handle_tr);
--- /dev/null
+From 4b5347a24a0f2d3272032c120664b484478455de Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Thu, 23 Feb 2017 14:50:24 +0000
+Subject: MIPS: End spinlocks with .insn
+
+From: Paul Burton <paul.burton@imgtec.com>
+
+commit 4b5347a24a0f2d3272032c120664b484478455de upstream.
+
+When building for microMIPS we need to ensure that the assembler always
+knows that there is code at the target of a branch or jump. Recent
+toolchains will fail to link a microMIPS kernel when this isn't the case
+due to what it thinks is a branch to non-microMIPS code.
+
+mips-mti-linux-gnu-ld kernel/built-in.o: .spinlock.text+0x2fc: Unsupported branch between ISA modes.
+mips-mti-linux-gnu-ld final link failed: Bad value
+
+This is due to inline assembly labels in spinlock.h not being followed
+by an instruction mnemonic, either due to a .subsection pseudo-op or the
+end of the inline asm block.
+
+Fix this with a .insn direction after such labels.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Reviewed-by: Maciej W. Rozycki <macro@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/15325/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/spinlock.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/include/asm/spinlock.h
++++ b/arch/mips/include/asm/spinlock.h
+@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_s
+ " andi %[ticket], %[ticket], 0xffff \n"
+ " bne %[ticket], %[my_ticket], 4f \n"
+ " subu %[ticket], %[my_ticket], %[ticket] \n"
+- "2: \n"
++ "2: .insn \n"
+ " .subsection 2 \n"
+ "4: andi %[ticket], %[ticket], 0xffff \n"
+ " sll %[ticket], 5 \n"
+@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_try
+ " sc %[ticket], %[ticket_ptr] \n"
+ " beqz %[ticket], 1b \n"
+ " li %[ticket], 1 \n"
+- "2: \n"
++ "2: .insn \n"
+ " .subsection 2 \n"
+ "3: b 2b \n"
+ " li %[ticket], 0 \n"
+@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch
+ " .set reorder \n"
+ __WEAK_LLSC_MB
+ " li %2, 1 \n"
+- "2: \n"
++ "2: .insn \n"
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
+ : "memory");
+@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arc
+ " lui %1, 0x8000 \n"
+ " sc %1, %0 \n"
+ " li %2, 1 \n"
+- "2: \n"
++ "2: .insn \n"
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
+ "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
--- /dev/null
+From 0115f6cbf26663c86496bc56eeea293f85b77897 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:27 +0800
+Subject: MIPS: Flush wrong invalid FTLB entry for huge page
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 0115f6cbf26663c86496bc56eeea293f85b77897 upstream.
+
+On VTLB+FTLB platforms (such as Loongson-3A R2), FTLB's pagesize is
+usually configured the same as PAGE_SIZE. In such a case, Huge page
+entry is not suitable to write in FTLB.
+
+Unfortunately, when a huge page is created, its page table entries
+haven't created immediately. Then the TLB refill handler will fetch an
+invalid page table entry which has no "HUGE" bit, and this entry may be
+written to FTLB. Since it is invalid, TLB load/store handler will then
+use tlbwi to write the valid entry at the same place. However, the
+valid entry is a huge page entry which isn't suitable for FTLB.
+
+Our solution is to modify build_huge_handler_tail. Flush the invalid
+old entry (whether it is in FTLB or VTLB, this is in order to reduce
+branches) and use tlbwr to write the valid new entry.
+
+Signed-off-by: Rui Wang <wangr@lemote.com>
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: Huacai Chen <chenhc@lemote.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15754/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/tlbex.c | 25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -762,7 +762,8 @@ static void build_huge_update_entries(u3
+ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+ struct uasm_label **l,
+ unsigned int pte,
+- unsigned int ptr)
++ unsigned int ptr,
++ unsigned int flush)
+ {
+ #ifdef CONFIG_SMP
+ UASM_i_SC(p, pte, 0, ptr);
+@@ -771,6 +772,22 @@ static void build_huge_handler_tail(u32
+ #else
+ UASM_i_SW(p, pte, 0, ptr);
+ #endif
++ if (cpu_has_ftlb && flush) {
++ BUG_ON(!cpu_has_tlbinv);
++
++ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
++ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++ build_tlb_write_entry(p, l, r, tlb_indexed);
++
++ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++ build_huge_update_entries(p, pte, ptr);
++ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
++
++ return;
++ }
++
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+ }
+@@ -2197,7 +2214,7 @@ static void build_r4000_tlb_load_handler
+ uasm_l_tlbl_goaround2(&l, p);
+ }
+ uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+
+ uasm_l_nopage_tlbl(&l, p);
+@@ -2252,7 +2269,7 @@ static void build_r4000_tlb_store_handle
+ build_tlb_probe_entry(&p);
+ uasm_i_ori(&p, wr.r1, wr.r1,
+ _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+
+ uasm_l_nopage_tlbs(&l, p);
+@@ -2308,7 +2325,7 @@ static void build_r4000_tlb_modify_handl
+ build_tlb_probe_entry(&p);
+ uasm_i_ori(&p, wr.r1, wr.r1,
+ _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+ #endif
+
+ uasm_l_nopage_tlbm(&l, p);
--- /dev/null
+From 2e6c7747730296a6d4fd700894286db1132598c4 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 16 Feb 2017 12:39:01 +0000
+Subject: MIPS: Force o32 fp64 support on 32bit MIPS64r6 kernels
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 2e6c7747730296a6d4fd700894286db1132598c4 upstream.
+
+When a 32-bit kernel is configured to support MIPS64r6 (CPU_MIPS64_R6),
+MIPS_O32_FP64_SUPPORT won't be selected as it should be because
+MIPS32_O32 is disabled (o32 is already the default ABI available on
+32-bit kernels).
+
+This results in userland FP breakage as CP0_Status.FR is read-only 1
+since r6 (when an FPU is present) so __enable_fpu() will fail to clear
+FR. This causes the FPU emulator to get used which will incorrectly
+emulate 32-bit FPU registers.
+
+Force o32 fp64 support in this case by also selecting
+MIPS_O32_FP64_SUPPORT from CPU_MIPS64_R6 if 32BIT.
+
+Fixes: 4e9d324d4288 ("MIPS: Require O32 FP64 support for MIPS64 with O32 compat")
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Reviewed-by: Paul Burton <paul.burton@imgtec.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15310/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1526,7 +1526,7 @@ config CPU_MIPS64_R6
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ select GENERIC_CSUM
+- select MIPS_O32_FP64_SUPPORT if MIPS32_O32
++ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
+ select HAVE_KVM
+ help
+ Choose this option to build a kernel for release 6 or later of the
--- /dev/null
+From 6ef90877eee63a0d03e83183bb44b64229b624e6 Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Wed, 15 Mar 2017 23:26:42 +0100
+Subject: MIPS: Lantiq: fix missing xbar kernel panic
+
+From: Hauke Mehrtens <hauke@hauke-m.de>
+
+commit 6ef90877eee63a0d03e83183bb44b64229b624e6 upstream.
+
+Commit 08b3c894e565 ("MIPS: lantiq: Disable xbar fpi burst mode")
+accidentally requested the resources from the pmu address region
+instead of the xbar registers region, but the check for the return
+value of request_mem_region() was wrong. Commit 98ea51cb0c8c ("MIPS:
+Lantiq: Fix another request_mem_region() return code check") fixed the
+check of the return value of request_mem_region() which made the kernel
+panics.
+This patch now makes use of the correct memory region for the cross bar.
+
+Fixes: 08b3c894e565 ("MIPS: lantiq: Disable xbar fpi burst mode")
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+Cc: John Crispin <john@phrozen.org>
+Cc: james.hogan@imgtec.com
+Cc: arnd@arndb.de
+Cc: sergei.shtylyov@cogentembedded.com
+Cc: john@phrozen.org
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15751
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/lantiq/xway/sysctrl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
+
+ if (!np_xbar)
+ panic("Failed to load xbar nodes from devicetree");
+- if (of_address_to_resource(np_pmu, 0, &res_xbar))
++ if (of_address_to_resource(np_xbar, 0, &res_xbar))
+ panic("Failed to get xbar resources");
+ if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
+ res_xbar.name) < 0)
--- /dev/null
+From 7c5a3d813050ee235817b0220dd8c42359a9efd8 Mon Sep 17 00:00:00 2001
+From: John Crispin <john@phrozen.org>
+Date: Sat, 25 Feb 2017 11:54:23 +0100
+Subject: MIPS: ralink: Fix typos in rt3883 pinctrl
+
+From: John Crispin <john@phrozen.org>
+
+commit 7c5a3d813050ee235817b0220dd8c42359a9efd8 upstream.
+
+There are two copy & paste errors in the definition of the 5GHz LNA and
+second ethernet pinmux.
+
+Fixes: f576fb6a0700 ("MIPS: ralink: cleanup the soc specific pinmux data")
+Signed-off-by: John Crispin <john@phrozen.org>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15328/
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/ralink/rt3883.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/ralink/rt3883.c
++++ b/arch/mips/ralink/rt3883.c
+@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_f
+ static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+ static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+ static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+ static struct rt2880_pmx_func pci_func[] = {
+ FUNC("pci-dev", 0, 40, 32),
+ FUNC("pci-host2", 1, 40, 32),
+@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[]
+ FUNC("pci-fnc", 3, 40, 32)
+ };
+ static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+
+ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
--- /dev/null
+From 921d701e6f31e1ffaca3560416af1aa04edb4c4f Mon Sep 17 00:00:00 2001
+From: Tobias Klauser <tklauser@distanz.ch>
+Date: Sun, 2 Apr 2017 20:08:04 -0700
+Subject: nios2: reserve boot memory for device tree
+
+From: Tobias Klauser <tklauser@distanz.ch>
+
+commit 921d701e6f31e1ffaca3560416af1aa04edb4c4f upstream.
+
+Make sure to reserve the boot memory for the flattened device tree.
+Otherwise it might get overwritten, e.g. when initial_boot_params is
+copied, leading to a corrupted FDT and a boot hang/crash:
+
+ bootconsole [early0] enabled
+ Early console on uart16650 initialized at 0xf8001600
+ OF: fdt: Error -11 processing FDT
+ Kernel panic - not syncing: setup_cpuinfo: No CPU found in devicetree!
+
+ ---[ end Kernel panic - not syncing: setup_cpuinfo: No CPU found in devicetree!
+
+Guenter Roeck says:
+
+> I think I found the problem. In unflatten_and_copy_device_tree(), with added
+> debug information:
+>
+> OF: fdt: initial_boot_params=c861e400, dt=c861f000 size=28874 (0x70ca)
+>
+> ... and then initial_boot_params is copied to dt, which results in corrupted
+> fdt since the memory overlaps. Looks like the initial_boot_params memory
+> is not reserved and (re-)allocated by early_init_dt_alloc_memory_arch().
+
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Reference: http://lkml.kernel.org/r/20170226210338.GA19476@roeck-us.net
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Tobias Klauser <tklauser@distanz.ch>
+Acked-by: Ley Foon Tan <ley.foon.tan@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/nios2/kernel/prom.c | 7 +++++++
+ arch/nios2/kernel/setup.c | 3 +++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/nios2/kernel/prom.c
++++ b/arch/nios2/kernel/prom.c
+@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory
+ return alloc_bootmem_align(size, align);
+ }
+
++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
++ bool nomap)
++{
++ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
++ return 0;
++}
++
+ void __init early_init_devtree(void *params)
+ {
+ __be32 *dtb = (u32 *)__dtb_start;
+--- a/arch/nios2/kernel/setup.c
++++ b/arch/nios2/kernel/setup.c
+@@ -200,6 +200,9 @@ void __init setup_arch(char **cmdline_p)
+ }
+ #endif /* CONFIG_BLK_DEV_INITRD */
+
++ early_init_fdt_reserve_self();
++ early_init_fdt_scan_reserved_mem();
++
+ unflatten_and_copy_device_tree();
+
+ setup_cpuinfo();
--- /dev/null
+From 8f5f525d5b83f7d76a6baf9c4e94d4bf312ea7f6 Mon Sep 17 00:00:00 2001
+From: Oliver O'Halloran <oohall@gmail.com>
+Date: Mon, 3 Apr 2017 13:25:12 +1000
+Subject: powerpc/64: Fix flush_(d|i)cache_range() called from modules
+
+From: Oliver O'Halloran <oohall@gmail.com>
+
+commit 8f5f525d5b83f7d76a6baf9c4e94d4bf312ea7f6 upstream.
+
+When the kernel is compiled to use 64bit ABIv2 the _GLOBAL() macro does
+not include a global entry point. A function's global entry point is
+used when the function is called from a different TOC context and in the
+kernel this typically means a call from a module into the vmlinux (or
+vice-versa).
+
+There are a few exported asm functions declared with _GLOBAL() and
+calling them from a module will likely crash the kernel since any TOC
+relative load will yield garbage.
+
+flush_icache_range() and flush_dcache_range() are both exported to
+modules, and use the TOC, so must use _GLOBAL_TOC().
+
+Fixes: 721aeaa9fdf3 ("powerpc: Build little endian ppc64 kernel with ABIv2")
+Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/misc_64.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -67,7 +67,7 @@ PPC64_CACHES:
+ * flush all bytes from start through stop-1 inclusive
+ */
+
+-_GLOBAL(flush_icache_range)
++_GLOBAL_TOC(flush_icache_range)
+ BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
+ blr
+@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
+ *
+ * flush all bytes from start to stop-1 inclusive
+ */
+-_GLOBAL(flush_dcache_range)
++_GLOBAL_TOC(flush_dcache_range)
+
+ /*
+ * Flush the data cache to memory
--- /dev/null
+From 4749228f022893faf54a3dbc70796f78b7d4f342 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 6 Apr 2017 23:34:38 +1000
+Subject: powerpc/crypto/crc32c-vpmsum: Fix missing preempt_disable()
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 4749228f022893faf54a3dbc70796f78b7d4f342 upstream.
+
+In crc32c_vpmsum() we call enable_kernel_altivec() without first
+disabling preemption, which is not allowed:
+
+ WARNING: CPU: 9 PID: 2949 at ../arch/powerpc/kernel/process.c:277 enable_kernel_altivec+0x100/0x120
+ Modules linked in: dm_thin_pool dm_persistent_data dm_bio_prison dm_bufio libcrc32c vmx_crypto ...
+ CPU: 9 PID: 2949 Comm: docker Not tainted 4.11.0-rc5-compiler_gcc-6.3.1-00033-g308ac7563944 #381
+ ...
+ NIP [c00000000001e320] enable_kernel_altivec+0x100/0x120
+ LR [d000000003df0910] crc32c_vpmsum+0x108/0x150 [crc32c_vpmsum]
+ Call Trace:
+ 0xc138fd09 (unreliable)
+ crc32c_vpmsum+0x108/0x150 [crc32c_vpmsum]
+ crc32c_vpmsum_update+0x3c/0x60 [crc32c_vpmsum]
+ crypto_shash_update+0x88/0x1c0
+ crc32c+0x64/0x90 [libcrc32c]
+ dm_bm_checksum+0x48/0x80 [dm_persistent_data]
+ sb_check+0x84/0x120 [dm_thin_pool]
+ dm_bm_validate_buffer.isra.0+0xc0/0x1b0 [dm_persistent_data]
+ dm_bm_read_lock+0x80/0xf0 [dm_persistent_data]
+ __create_persistent_data_objects+0x16c/0x810 [dm_thin_pool]
+ dm_pool_metadata_open+0xb0/0x1a0 [dm_thin_pool]
+ pool_ctr+0x4cc/0xb60 [dm_thin_pool]
+ dm_table_add_target+0x16c/0x3c0
+ table_load+0x184/0x400
+ ctl_ioctl+0x2f0/0x560
+ dm_ctl_ioctl+0x38/0x50
+ do_vfs_ioctl+0xd8/0x920
+ SyS_ioctl+0x68/0xc0
+ system_call+0x38/0xfc
+
+It used to be sufficient just to call pagefault_disable(), because that
+also disabled preemption. But the two were decoupled in commit 8222dbe21e79
+("sched/preempt, mm/fault: Decouple preemption from the page fault
+logic") in mid 2015.
+
+So add the missing preempt_disable/enable(). We should also call
+disable_kernel_fp(), although it does nothing by default, there is a
+debug switch to make it active and all enables should be paired with
+disables.
+
+Fixes: 6dd7a82cc54e ("crypto: powerpc - Add POWER8 optimised crc32c")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/crypto/crc32c-vpmsum_glue.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
++++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsign
+ }
+
+ if (len & ~VMX_ALIGN_MASK) {
++ preempt_disable();
+ pagefault_disable();
+ enable_kernel_altivec();
+ crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
++ disable_kernel_altivec();
+ pagefault_enable();
++ preempt_enable();
+ }
+
+ tail = len & VMX_ALIGN_MASK;
--- /dev/null
+From 7ed23e1bae8bf7e37fd555066550a00b95a3a98b Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Mon, 20 Mar 2017 17:49:03 +1100
+Subject: powerpc: Disable HFSCR[TM] if TM is not supported
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 7ed23e1bae8bf7e37fd555066550a00b95a3a98b upstream.
+
+On Power8 & Power9 the early CPU inititialisation in __init_HFSCR()
+turns on HFSCR[TM] (Hypervisor Facility Status and Control Register
+[Transactional Memory]), but that doesn't take into account that TM
+might be disabled by CPU features, or disabled by the kernel being built
+with CONFIG_PPC_TRANSACTIONAL_MEM=n.
+
+So later in boot, when we have setup the CPU features, clear HSCR[TM] if
+the TM CPU feature has been disabled. We use CPU_FTR_TM_COMP to account
+for the CONFIG_PPC_TRANSACTIONAL_MEM=n case.
+
+Without this a KVM guest might try use TM, even if told not to, and
+cause an oops in the host kernel. Typically the oops is seen in
+__kvmppc_vcore_entry() and may or may not be fatal to the host, but is
+always bad news.
+
+In practice all shipping CPU revisions do support TM, and all host
+kernels we are aware of build with TM support enabled, so no one should
+actually be able to hit this in the wild.
+
+Fixes: 2a3563b023e5 ("powerpc: Setup in HFSCR for POWER8")
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Tested-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
+[mpe: Rewrite change log with input from Sam, add Fixes/stable]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/setup_64.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -245,6 +245,15 @@ static void cpu_ready_for_interrupts(voi
+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ }
+
++ /*
++ * Fixup HFSCR:TM based on CPU features. The bit is set by our
++ * early asm init because at that point we haven't updated our
++ * CPU features from firmware and device-tree. Here we have,
++ * so let's do it.
++ */
++ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
++ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
++
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+ }
--- /dev/null
+From 48fe9e9488743eec9b7c1addd3c93f12f2123d54 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 4 Apr 2017 14:56:05 +1000
+Subject: powerpc: Don't try to fix up misaligned load-with-reservation instructions
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 48fe9e9488743eec9b7c1addd3c93f12f2123d54 upstream.
+
+In the past, there was only one load-with-reservation instruction,
+lwarx, and if a program attempted a lwarx on a misaligned address, it
+would take an alignment interrupt and the kernel handler would emulate
+it as though it was lwzx, which was not really correct, but benign since
+it is loading the right amount of data, and the lwarx should be paired
+with a stwcx. to the same address, which would also cause an alignment
+interrupt which would result in a SIGBUS being delivered to the process.
+
+We now have 5 different sizes of load-with-reservation instruction. Of
+those, lharx and ldarx cause an immediate SIGBUS by luck since their
+entries in aligninfo[] overlap instructions which were not fixed up, but
+lqarx overlaps with lhz and will be emulated as such. lbarx can never
+generate an alignment interrupt since it only operates on 1 byte.
+
+To straighten this out and fix the lqarx case, this adds code to detect
+the l[hwdq]arx instructions and return without fixing them up, resulting
+in a SIGBUS being delivered to the process.
+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/align.c | 27 +++++++++++++++++++--------
+ 1 file changed, 19 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
+ nb = aligninfo[instr].len;
+ flags = aligninfo[instr].flags;
+
+- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+- nb = 8;
+- flags = LD+SW;
+- } else if (IS_XFORM(instruction) &&
+- ((instruction >> 1) & 0x3ff) == 660) {
+- nb = 8;
+- flags = ST+SW;
++ /*
++ * Handle some cases which give overlaps in the DSISR values.
++ */
++ if (IS_XFORM(instruction)) {
++ switch (get_xop(instruction)) {
++ case 532: /* ldbrx */
++ nb = 8;
++ flags = LD+SW;
++ break;
++ case 660: /* stdbrx */
++ nb = 8;
++ flags = ST+SW;
++ break;
++ case 20: /* lwarx */
++ case 84: /* ldarx */
++ case 116: /* lharx */
++ case 276: /* lqarx */
++ return 0; /* not emulated ever */
++ }
+ }
+
+ /* Byteswap little endian loads and stores */
--- /dev/null
+From 88b1bf7268f56887ca88eb09c6fb0f4fc970121a Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Date: Wed, 29 Mar 2017 19:19:42 +0200
+Subject: powerpc/mm: Add missing global TLB invalidate if cxl is active
+
+From: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+
+commit 88b1bf7268f56887ca88eb09c6fb0f4fc970121a upstream.
+
+Commit 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl") converted local
+TLB invalidates to global if the cxl driver is active. This is necessary
+because the CAPP snoops invalidations to forward them to the PSL on the
+cxl adapter. However one path was forgotten. native_flush_hash_range()
+still does local TLB invalidates, as found out the hard way recently.
+
+This patch fixes it by following the same logic as previously: if the
+cxl driver is active, the local TLB invalidates are 'upgraded' to
+global.
+
+Fixes: 4c6d9acce1f4 ("powerpc/mm: Add hooks for cxl")
+Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hash_native_64.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsi
+ unsigned long psize = batch->psize;
+ int ssize = batch->ssize;
+ int i;
++ unsigned int use_local;
++
++ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
++ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
+
+ local_irq_save(flags);
+
+@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsi
+ } pte_iterate_hashed_end();
+ }
+
+- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+- mmu_psize_defs[psize].tlbiel && local) {
++ if (use_local) {
+ asm volatile("ptesync":::"memory");
+ for (i = 0; i < number; i++) {
+ vpn = batch->vpn[i];
--- /dev/null
+From d82c0d12c92705ef468683c9b7a8298dd61ed191 Mon Sep 17 00:00:00 2001
+From: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Date: Mon, 13 Mar 2017 12:14:58 -0300
+Subject: s390/decompressor: fix initrd corruption caused by bss clear
+
+From: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+
+commit d82c0d12c92705ef468683c9b7a8298dd61ed191 upstream.
+
+Reorder the operations in decompress_kernel() to ensure initrd is moved
+to a safe location before the bss section is zeroed.
+
+During decompression bss can overlap with the initrd and this can
+corrupt the initrd contents depending on the size of the compressed
+kernel (which affects where the initrd is placed by the bootloader) and
+the size of the bss section of the decompressor.
+
+Also use the correct initrd size when checking for overlaps with
+parmblock.
+
+Fixes: 06c0dd72aea3 ([S390] fix boot failures with compressed kernels)
+Reviewed-by: Joy Latten <joy.latten@canonical.com>
+Reviewed-by: Vineetha HariPai <vineetha.hari.pai@canonical.com>
+Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/boot/compressed/misc.c | 35 +++++++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *st
+
+ unsigned long decompress_kernel(void)
+ {
+- unsigned long output_addr;
+- unsigned char *output;
++ void *output, *kernel_end;
+
+- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+- memset(&_bss, 0, &_ebss - &_bss);
+- free_mem_ptr = (unsigned long)&_end;
+- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+- output = (unsigned char *) output_addr;
++ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
++ kernel_end = output + SZ__bss_start;
++ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * Move the initrd right behind the end of the decompressed
+- * kernel image.
++ * kernel image. This also prevents initrd corruption caused by
++ * bss clearing since kernel_end will always be located behind the
++ * current bss section..
+ */
+- if (INITRD_START && INITRD_SIZE &&
+- INITRD_START < (unsigned long) output + SZ__bss_start) {
+- check_ipl_parmblock(output + SZ__bss_start,
+- INITRD_START + INITRD_SIZE);
+- memmove(output + SZ__bss_start,
+- (void *) INITRD_START, INITRD_SIZE);
+- INITRD_START = (unsigned long) output + SZ__bss_start;
++ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
++ check_ipl_parmblock(kernel_end, INITRD_SIZE);
++ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
++ INITRD_START = (unsigned long) kernel_end;
+ }
+ #endif
+
++ /*
++ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
++ * initialized afterwards since they reside in bss.
++ */
++ memset(&_bss, 0, &_ebss - &_bss);
++ free_mem_ptr = (unsigned long) &_end;
++ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
+ puts("Uncompressing Linux... ");
+ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ puts("Ok, booting the kernel.\n");
--- /dev/null
+From d09c5373e8e4eaaa09233552cbf75dc4c4f21203 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 27 Mar 2017 09:48:04 +0200
+Subject: s390/uaccess: get_user() should zero on failure (again)
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit d09c5373e8e4eaaa09233552cbf75dc4c4f21203 upstream.
+
+Commit fd2d2b191fe7 ("s390: get_user() should zero on failure")
+intended to fix s390's get_user() implementation which did not zero
+the target operand if the read from user space faulted. Unfortunately
+the patch has no effect: the corresponding inline assembly specifies
+that the operand is only written to ("=") and the previous value is
+discarded.
+
+Therefore the compiler is free to and actually does omit the zero
+initialization.
+
+To fix this simply change the contraint modifier to "+", so the
+compiler cannot omit the initialization anymore.
+
+Fixes: c9ca78415ac1 ("s390/uaccess: provide inline variants of get_user/put_user")
+Fixes: fd2d2b191fe7 ("s390: get_user() should zero on failure")
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/uaccess.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -144,7 +144,7 @@ unsigned long __must_check __copy_to_use
+ " jg 2b\n" \
+ ".popsection\n" \
+ EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
+- : "=d" (__rc), "=Q" (*(to)) \
++ : "=d" (__rc), "+Q" (*(to)) \
+ : "d" (size), "Q" (*(from)), \
+ "d" (__reg0), "K" (-EFAULT) \
+ : "cc"); \
ring-buffer-fix-return-value-check-in-test_ringbuffer.patch
mac80211-unconditionally-start-new-netdev-queues-with-itxq-support.patch
brcmfmac-use-local-iftype-avoiding-use-after-free-of-virtual-interface.patch
+metag-usercopy-drop-unused-macros.patch
+metag-usercopy-fix-alignment-error-checking.patch
+metag-usercopy-add-early-abort-to-copy_to_user.patch
+metag-usercopy-zero-rest-of-buffer-from-copy_from_user.patch
+metag-usercopy-set-flags-before-addz.patch
+metag-usercopy-fix-src-fixup-in-from-user-rapf-loops.patch
+metag-usercopy-add-missing-fixups.patch
+drm-msm-adreno-fix-build-error-without-debugfs.patch
+powerpc-disable-hfscr-if-tm-is-not-supported.patch
+powerpc-mm-add-missing-global-tlb-invalidate-if-cxl-is-active.patch
+powerpc-64-fix-flush_-d-i-cache_range-called-from-modules.patch
+powerpc-don-t-try-to-fix-up-misaligned-load-with-reservation-instructions.patch
+powerpc-crypto-crc32c-vpmsum-fix-missing-preempt_disable.patch
+dm-raid-fix-null-pointer-dereference-for-raid1-without-bitmap.patch
+x86-mce-don-t-print-mces-when-mcelog-is-active.patch
+nios2-reserve-boot-memory-for-device-tree.patch
+xtensa-make-__pa-work-with-uncached-kseg-addresses.patch
+s390-decompressor-fix-initrd-corruption-caused-by-bss-clear.patch
+s390-uaccess-get_user-should-zero-on-failure-again.patch
+mips-force-o32-fp64-support-on-32bit-mips64r6-kernels.patch
+mips-ralink-fix-typos-in-rt3883-pinctrl.patch
+mips-end-spinlocks-with-.insn.patch
+mips-lantiq-fix-missing-xbar-kernel-panic.patch
+mips-check-tlb-before-handle_ri_rdhwr-for-loongson-3.patch
+mips-add-mips_cpu_ftlb-for-loongson-3a-r2.patch
+mips-flush-wrong-invalid-ftlb-entry-for-huge-page.patch
+mips-c-r4k-fix-loongson-3-s-vcache-scache-waysize-calculation.patch
+drm-i915-gvt-fix-gvt-scheduler-interval-time.patch
+drm-i915-kvmgt-fix-suspicious-rcu-dereference-usage.patch
--- /dev/null
+From cc66afea58f858ff6da7f79b8a595a67bbb4f9a9 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Mon, 27 Mar 2017 11:32:59 +0200
+Subject: x86/mce: Don't print MCEs when mcelog is active
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit cc66afea58f858ff6da7f79b8a595a67bbb4f9a9 upstream.
+
+Since:
+
+ cd9c57cad3fe ("x86/MCE: Dump MCE to dmesg if no consumers")
+
+all MCEs are printed even when mcelog is running. Fix the regression to
+not print to dmesg when mcelog is running as it is a consumer too.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+[ Massage commit message. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Fixes: cd9c57cad3fe ("x86/MCE: Dump MCE to dmesg if no consumers")
+Link: http://lkml.kernel.org/r/20170327093304.10683-2-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mcheck/mce.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -54,6 +54,8 @@
+
+ static DEFINE_MUTEX(mce_chrdev_read_mutex);
+
++static int mce_chrdev_open_count; /* #times opened */
++
+ #define mce_log_get_idx_check(p) \
+ ({ \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+@@ -601,6 +603,10 @@ static int mce_default_notifier(struct n
+ if (atomic_read(&num_notifiers) > 2)
+ return NOTIFY_DONE;
+
++ /* Don't print when mcelog is running */
++ if (mce_chrdev_open_count > 0)
++ return NOTIFY_DONE;
++
+ __print_mce(m);
+
+ return NOTIFY_DONE;
+@@ -1871,7 +1877,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86
+ */
+
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu; /* already open exclusive? */
+
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
--- /dev/null
+From 2b83878dd74a7c73bedcb6600663c1c46836e8af Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Wed, 29 Mar 2017 15:44:47 -0700
+Subject: xtensa: make __pa work with uncached KSEG addresses
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 2b83878dd74a7c73bedcb6600663c1c46836e8af upstream.
+
+When __pa is applied to virtual address in uncached KSEG region the
+result is incorrect. Fix it by checking if the original address is in
+the uncached KSEG and adjusting the result. It looks better than masking
+off bits because pfn_valid would correctly work with new __pa results
+and it may be made working in noMMU case, once we get definition for
+uncached memory view.
+
+This is required for the dma_common_mmap and DMA debug code to work
+correctly: they both indirectly use __pa with coherent DMA addresses.
+In case of DMA debug the visible effect is false reports that an address
+mapped for DMA is accessed by CPU.
+
+Tested-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/include/asm/page.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/xtensa/include/asm/page.h
++++ b/arch/xtensa/include/asm/page.h
+@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to,
+
+ #define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
++#ifdef CONFIG_MMU
++static inline unsigned long ___pa(unsigned long va)
++{
++ unsigned long off = va - PAGE_OFFSET;
++
++ if (off >= XCHAL_KSEG_SIZE)
++ off -= XCHAL_KSEG_SIZE;
++
++ return off + PHYS_OFFSET;
++}
++#define __pa(x) ___pa((unsigned long)(x))
++#else
+ #define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
++#endif
+ #define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
+ #define pfn_valid(pfn) \