--- /dev/null
+From 18ea43113f5b74a97dd4be9bddbac10d68b1a6ce Mon Sep 17 00:00:00 2001
+From: Germano Percossi <germano.percossi@citrix.com>
+Date: Fri, 7 Apr 2017 12:29:36 +0100
+Subject: CIFS: reconnect thread reschedule itself
+
+From: Germano Percossi <germano.percossi@citrix.com>
+
+commit 18ea43113f5b74a97dd4be9bddbac10d68b1a6ce upstream.
+
+In case of error, smb2_reconnect_server reschedule itself
+with a delay, to avoid being too aggressive.
+
+Signed-off-by: Germano Percossi <germano.percossi@citrix.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1987,6 +1987,9 @@ void smb2_reconnect_server(struct work_s
+ struct cifs_tcon *tcon, *tcon2;
+ struct list_head tmp_list;
+ int tcon_exist = false;
++ int rc;
++ int resched = false;
++
+
+ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
+ mutex_lock(&server->reconnect_mutex);
+@@ -2014,13 +2017,18 @@ void smb2_reconnect_server(struct work_s
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
+- if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
++ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
++ if (!rc)
+ cifs_reopen_persistent_handles(tcon);
++ else
++ resched = true;
+ list_del_init(&tcon->rlist);
+ cifs_put_tcon(tcon);
+ }
+
+ cifs_dbg(FYI, "Reconnecting tcons finished\n");
++ if (resched)
++ queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
+ mutex_unlock(&server->reconnect_mutex);
+
+ /* now we can safely release srv struct */
--- /dev/null
+From 1fa839b4986d648b907d117275869a0e46c324b9 Mon Sep 17 00:00:00 2001
+From: Germano Percossi <germano.percossi@citrix.com>
+Date: Fri, 7 Apr 2017 12:29:38 +0100
+Subject: CIFS: store results of cifs_reopen_file to avoid infinite wait
+
+From: Germano Percossi <germano.percossi@citrix.com>
+
+commit 1fa839b4986d648b907d117275869a0e46c324b9 upstream.
+
+This fixes Continuous Availability when errors during
+file reopen are encountered.
+
+cifs_user_readv and cifs_user_writev would wait for ever if
+results of cifs_reopen_file are not stored and for later inspection.
+
+In fact, results are checked and, in case of errors, a chain
+of function calls leading to reads and writes to be scheduled in
+a separate thread is skipped.
+These threads will wake up the corresponding waiters once reads
+and writes are done.
+
+However, given the return value is not stored, when rc is checked
+for errors a previous one (always zero) is inspected instead.
+This leads to pending reads/writes added to the list, making
+cifs_user_readv and cifs_user_writev wait for ever.
+
+Signed-off-by: Germano Percossi <germano.percossi@citrix.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size
+ wdata->credits = credits;
+
+ if (!wdata->cfile->invalidHandle ||
+- !cifs_reopen_file(wdata->cfile, false))
++ !(rc = cifs_reopen_file(wdata->cfile, false)))
+ rc = server->ops->async_writev(wdata,
+ cifs_uncached_writedata_release);
+ if (rc) {
+@@ -3002,7 +3002,7 @@ cifs_send_async_read(loff_t offset, size
+ rdata->credits = credits;
+
+ if (!rdata->cfile->invalidHandle ||
+- !cifs_reopen_file(rdata->cfile, true))
++ !(rc = cifs_reopen_file(rdata->cfile, true)))
+ rc = server->ops->async_readv(rdata);
+ error:
+ if (rc) {
+@@ -3577,7 +3577,7 @@ static int cifs_readpages(struct file *f
+ }
+
+ if (!rdata->cfile->invalidHandle ||
+- !cifs_reopen_file(rdata->cfile, true))
++ !(rc = cifs_reopen_file(rdata->cfile, true)))
+ rc = server->ops->async_readv(rdata);
+ if (rc) {
+ add_credits_and_wake_if(server, rdata->credits, 0);
--- /dev/null
+From 45abdf35cf82e4270328c7237e7812de960ac560 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Wed, 12 Apr 2017 00:31:16 +0000
+Subject: drm/etnaviv: fix missing unlock on error in etnaviv_gpu_submit()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit 45abdf35cf82e4270328c7237e7812de960ac560 upstream.
+
+Add the missing unlock before return from function etnaviv_gpu_submit()
+in the error handling case.
+
+lst: fixed label name.
+
+Fixes: f3cd1b064f11 ("drm/etnaviv: (re-)protect fence allocation with GPU mutex")
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -1305,7 +1305,7 @@ int etnaviv_gpu_submit(struct etnaviv_gp
+ if (!fence) {
+ event_free(gpu, event);
+ ret = -ENOMEM;
+- goto out_pm_put;
++ goto out_unlock;
+ }
+
+ gpu->event[event].fence = fence;
+@@ -1345,6 +1345,7 @@ int etnaviv_gpu_submit(struct etnaviv_gp
+ hangcheck_timer_reset(gpu);
+ ret = 0;
+
++out_unlock:
+ mutex_unlock(&gpu->lock);
+
+ out_pm_put:
--- /dev/null
+From f94773b9f5ecd1df7c88c2e921924dd41d2020cc Mon Sep 17 00:00:00 2001
+From: Ilia Mirkin <imirkin@alum.mit.edu>
+Date: Sat, 18 Mar 2017 16:23:10 -0400
+Subject: drm/nouveau/mmu/nv4a: use nv04 mmu rather than the nv44 one
+
+From: Ilia Mirkin <imirkin@alum.mit.edu>
+
+commit f94773b9f5ecd1df7c88c2e921924dd41d2020cc upstream.
+
+The NV4A (aka NV44A) is an oddity in the family. It only comes in AGP
+and PCI varieties, rather than a core PCIE chip with a bridge for
+AGP/PCI as necessary. As a result, it appears that the MMU is also
+non-functional. For AGP cards, the vast majority of the NV4A lineup,
+this worked out since we force AGP cards to use the nv04 mmu. However
+for PCI variants, this did not work.
+
+Switching to the NV04 MMU makes it work like a charm. Thanks to mwk for
+the suggestion. This should be a no-op for NV4A AGP boards, as they were
+using it already.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=70388
+Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+@@ -714,7 +714,7 @@ nv4a_chipset = {
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+- .mmu = nv44_mmu_new,
++ .mmu = nv04_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
--- /dev/null
+From 83bce9c2baa51e439480a713119a73d3c8b61083 Mon Sep 17 00:00:00 2001
+From: Ilia Mirkin <imirkin@alum.mit.edu>
+Date: Sat, 18 Mar 2017 21:53:05 -0400
+Subject: drm/nouveau/mpeg: mthd returns true on success now
+
+From: Ilia Mirkin <imirkin@alum.mit.edu>
+
+commit 83bce9c2baa51e439480a713119a73d3c8b61083 upstream.
+
+Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
+Fixes: 590801c1a3 ("drm/nouveau/mpeg: remove dependence on namedb/engctx lookup")
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c | 2 +-
+ drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engin
+ }
+
+ if (type == 0x00000010) {
+- if (!nv31_mpeg_mthd(mpeg, mthd, data))
++ if (nv31_mpeg_mthd(mpeg, mthd, data))
+ show &= ~0x01000000;
+ }
+ }
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engin
+ }
+
+ if (type == 0x00000010) {
+- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
++ if (nv44_mpeg_mthd(subdev->device, mthd, data))
+ show &= ~0x01000000;
+ }
+ }
--- /dev/null
+From 5376366886251e2f8f248704adb620a4bc4c0937 Mon Sep 17 00:00:00 2001
+From: Cameron Gutman <aicommander@gmail.com>
+Date: Mon, 10 Apr 2017 20:44:25 -0700
+Subject: Input: xpad - add support for Razer Wildcat gamepad
+
+From: Cameron Gutman <aicommander@gmail.com>
+
+commit 5376366886251e2f8f248704adb620a4bc4c0937 upstream.
+
+Signed-off-by: Cameron Gutman <aicommander@gmail.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/joystick/xpad.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -201,6 +201,7 @@ static const struct xpad_device {
+ { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
+ { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+@@ -329,6 +330,7 @@ static struct usb_device_id xpad_table[]
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
++ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
+ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
+ { }
--- /dev/null
+From 1ec1688c5360e14dde4094d6acbf7516bf6db37e Mon Sep 17 00:00:00 2001
+From: Martin Brandenburg <martin@omnibond.com>
+Date: Fri, 14 Apr 2017 14:22:41 -0400
+Subject: orangefs: free superblock when mount fails
+
+From: Martin Brandenburg <martin@omnibond.com>
+
+commit 1ec1688c5360e14dde4094d6acbf7516bf6db37e upstream.
+
+Otherwise lockdep says:
+
+[ 1337.483798] ================================================
+[ 1337.483999] [ BUG: lock held when returning to user space! ]
+[ 1337.484252] 4.11.0-rc6 #19 Not tainted
+[ 1337.484423] ------------------------------------------------
+[ 1337.484626] mount/14766 is leaving the kernel with locks still held!
+[ 1337.484841] 1 lock held by mount/14766:
+[ 1337.485017] #0: (&type->s_umount_key#33/1){+.+.+.}, at: [<ffffffff8124171f>] sget_userns+0x2af/0x520
+
+Caught by xfstests generic/413 which tried to mount with the unsupported
+mount option dax. Then xfstests generic/422 ran sync which deadlocks.
+
+Signed-off-by: Martin Brandenburg <martin@omnibond.com>
+Acked-by: Mike Marshall <hubcap@omnibond.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/orangefs/devorangefs-req.c | 9 +++++++--
+ fs/orangefs/orangefs-kernel.h | 1 +
+ fs/orangefs/super.c | 23 ++++++++++++++++-------
+ 3 files changed, 24 insertions(+), 9 deletions(-)
+
+--- a/fs/orangefs/devorangefs-req.c
++++ b/fs/orangefs/devorangefs-req.c
+@@ -208,14 +208,19 @@ restart:
+ continue;
+ /*
+ * Skip ops whose filesystem we don't know about unless
+- * it is being mounted.
++ * it is being mounted or unmounted. It is possible for
++ * a filesystem we don't know about to be unmounted if
++ * it fails to mount in the kernel after userspace has
++ * been sent the mount request.
+ */
+ /* XXX: is there a better way to detect this? */
+ } else if (ret == -1 &&
+ !(op->upcall.type ==
+ ORANGEFS_VFS_OP_FS_MOUNT ||
+ op->upcall.type ==
+- ORANGEFS_VFS_OP_GETATTR)) {
++ ORANGEFS_VFS_OP_GETATTR ||
++ op->upcall.type ==
++ ORANGEFS_VFS_OP_FS_UMOUNT)) {
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "orangefs: skipping op tag %llu %s\n",
+ llu(op->tag), get_opname_string(op));
+--- a/fs/orangefs/orangefs-kernel.h
++++ b/fs/orangefs/orangefs-kernel.h
+@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
+ char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
+ struct super_block *sb;
+ int mount_pending;
++ int no_list;
+ struct list_head list;
+ };
+
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct fil
+
+ if (ret) {
+ d = ERR_PTR(ret);
+- goto free_op;
++ goto free_sb_and_op;
+ }
+
+ /*
+@@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct fil
+ spin_unlock(&orangefs_superblocks_lock);
+ op_release(new_op);
+
++ /* Must be removed from the list now. */
++ ORANGEFS_SB(sb)->no_list = 0;
++
+ if (orangefs_userspace_version >= 20906) {
+ new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
+ if (!new_op)
+@@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct fil
+
+ return dget(sb->s_root);
+
++free_sb_and_op:
++ /* Will call orangefs_kill_sb with sb not in list. */
++ ORANGEFS_SB(sb)->no_list = 1;
++ deactivate_locked_super(sb);
+ free_op:
+ gossip_err("orangefs_mount: mount request failed with %d\n", ret);
+ if (ret == -EINVAL) {
+@@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block
+ */
+ orangefs_unmount_sb(sb);
+
+- /* remove the sb from our list of orangefs specific sb's */
+-
+- spin_lock(&orangefs_superblocks_lock);
+- __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
+- ORANGEFS_SB(sb)->list.prev = NULL;
+- spin_unlock(&orangefs_superblocks_lock);
++ if (!ORANGEFS_SB(sb)->no_list) {
++ /* remove the sb from our list of orangefs specific sb's */
++ spin_lock(&orangefs_superblocks_lock);
++ /* not list_del_init */
++ __list_del_entry(&ORANGEFS_SB(sb)->list);
++ ORANGEFS_SB(sb)->list.prev = NULL;
++ spin_unlock(&orangefs_superblocks_lock);
++ }
+
+ /*
+ * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
--- /dev/null
+From f2200ac311302fcdca6556fd0c5127eab6c65a3e Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 11 Apr 2017 10:10:28 +0200
+Subject: perf/x86: Avoid exposing wrong/stale data in intel_pmu_lbr_read_32()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit f2200ac311302fcdca6556fd0c5127eab6c65a3e upstream.
+
+When the perf_branch_entry::{in_tx,abort,cycles} fields were added,
+intel_pmu_lbr_read_32() wasn't updated to initialize them.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Fixes: 135c5612c460 ("perf/x86/intel: Support Haswell/v4 LBR format")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/lbr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct
+ cpuc->lbr_entries[i].to = msr_lastbranch.to;
+ cpuc->lbr_entries[i].mispred = 0;
+ cpuc->lbr_entries[i].predicted = 0;
++ cpuc->lbr_entries[i].in_tx = 0;
++ cpuc->lbr_entries[i].abort = 0;
++ cpuc->lbr_entries[i].cycles = 0;
+ cpuc->lbr_entries[i].reserved = 0;
+ }
+ cpuc->lbr_stack.nr = i;
cgroup-avoid-attaching-a-cgroup-root-to-two-different-superblocks.patch
cgroup-kthread-close-race-window-where-new-kthreads-can-be-migrated-to-non-root-cgroups.patch
+tcmu-fix-possible-overwrite-of-t_data_sg-s-last-iov.patch
+tcmu-fix-wrongly-calculating-of-the-base_command_size.patch
+tcmu-skip-data-out-blocks-before-gathering-data-in-buffer-for-bidi-case.patch
+thp-fix-madv_dontneed-vs.-madv_free-race.patch
+thp-fix-madv_dontneed-vs-clear-soft-dirty-race.patch
+zram-fix-operator-precedence-to-get-offset.patch
+zram-do-not-use-copy_page-with-non-page-aligned-address.patch
+zsmalloc-expand-class-bit.patch
+orangefs-free-superblock-when-mount-fails.patch
+drm-nouveau-mpeg-mthd-returns-true-on-success-now.patch
+drm-nouveau-mmu-nv4a-use-nv04-mmu-rather-than-the-nv44-one.patch
+drm-etnaviv-fix-missing-unlock-on-error-in-etnaviv_gpu_submit.patch
+cifs-reconnect-thread-reschedule-itself.patch
+cifs-store-results-of-cifs_reopen_file-to-avoid-infinite-wait.patch
+input-xpad-add-support-for-razer-wildcat-gamepad.patch
+perf-x86-avoid-exposing-wrong-stale-data-in-intel_pmu_lbr_read_32.patch
+x86-efi-don-t-try-to-reserve-runtime-regions.patch
+x86-signals-fix-lower-upper-bound-reporting-in-compat-siginfo.patch
+x86-pmem-fix-broken-__copy_user_nocache-cache-bypass-assumptions.patch
+x86-vdso-ensure-vdso32_enabled-gets-set-to-valid-values-only.patch
+x86-vdso-plug-race-between-mapping-and-elf-header-setup.patch
--- /dev/null
+From ab22d2604c86ceb01bb2725c9860b88a7dd383bb Mon Sep 17 00:00:00 2001
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Date: Mon, 27 Mar 2017 17:07:40 +0800
+Subject: tcmu: Fix possible overwrite of t_data_sg's last iov[]
+
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+
+commit ab22d2604c86ceb01bb2725c9860b88a7dd383bb upstream.
+
+If there has BIDI data, its first iov[] will overwrite the last
+iov[] for se_cmd->t_data_sg.
+
+To fix this, we can just increase the iov pointer, but this may
+introuduce a new memory leakage bug: If the se_cmd->data_length
+and se_cmd->t_bidi_data_sg->length are all not aligned up to the
+DATA_BLOCK_SIZE, the actual length needed maybe larger than just
+sum of them.
+
+So, this could be avoided by rounding all the data lengthes up
+to DATA_BLOCK_SIZE.
+
+Reviewed-by: Mike Christie <mchristi@redhat.com>
+Tested-by: Ilias Tsitsimpis <iliastsi@arrikto.com>
+Reviewed-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_user.c | 34 +++++++++++++++++++++++-----------
+ 1 file changed, 23 insertions(+), 11 deletions(-)
+
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -389,6 +389,20 @@ static bool is_ring_space_avail(struct t
+ return true;
+ }
+
++static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
++{
++ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
++ size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
++
++ if (se_cmd->se_cmd_flags & SCF_BIDI) {
++ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
++ data_length += round_up(se_cmd->t_bidi_data_sg->length,
++ DATA_BLOCK_SIZE);
++ }
++
++ return data_length;
++}
++
+ static sense_reason_t
+ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ {
+@@ -402,7 +416,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcm
+ uint32_t cmd_head;
+ uint64_t cdb_off;
+ bool copy_to_data_area;
+- size_t data_length;
++ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
+@@ -428,11 +442,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcm
+
+ mb = udev->mb_addr;
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+- data_length = se_cmd->data_length;
+- if (se_cmd->se_cmd_flags & SCF_BIDI) {
+- BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+- data_length += se_cmd->t_bidi_data_sg->length;
+- }
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+@@ -502,11 +511,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcm
+ entry->req.iov_dif_cnt = 0;
+
+ /* Handle BIDI commands */
+- iov_cnt = 0;
+- alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+- se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
+- entry->req.iov_bidi_cnt = iov_cnt;
+-
++ if (se_cmd->se_cmd_flags & SCF_BIDI) {
++ iov_cnt = 0;
++ iov++;
++ alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
++ se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
++ false);
++ entry->req.iov_bidi_cnt = iov_cnt;
++ }
+ /* cmd's data_bitmap is what changed in process */
+ bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
+ DATA_BLOCK_BITS);
--- /dev/null
+From abe342a5b4b5aa579f6bf40ba73447c699e6b579 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Date: Mon, 27 Mar 2017 17:07:41 +0800
+Subject: tcmu: Fix wrongly calculating of the base_command_size
+
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+
+commit abe342a5b4b5aa579f6bf40ba73447c699e6b579 upstream.
+
+The t_data_nents and t_bidi_data_nents are the numbers of the
+segments, but it couldn't be sure the block size equals to size
+of the segment.
+
+For the worst case, all the blocks are discontiguous and there
+will need the same number of iovecs, that's to say: blocks == iovs.
+So here just set the number of iovs to block count needed by tcmu
+cmd.
+
+Tested-by: Ilias Tsitsimpis <iliastsi@arrikto.com>
+Reviewed-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_user.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -403,6 +403,13 @@ static inline size_t tcmu_cmd_get_data_l
+ return data_length;
+ }
+
++static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
++{
++ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
++
++ return data_length / DATA_BLOCK_SIZE;
++}
++
+ static sense_reason_t
+ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ {
+@@ -430,8 +437,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcm
+ * expensive to tell how many regions are freed in the bitmap
+ */
+ base_command_size = max(offsetof(struct tcmu_cmd_entry,
+- req.iov[se_cmd->t_bidi_data_nents +
+- se_cmd->t_data_nents]),
++ req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
+ sizeof(struct tcmu_cmd_entry));
+ command_size = base_command_size
+ + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
--- /dev/null
+From a5d68ba85801a78c892a0eb8efb711e293ed314b Mon Sep 17 00:00:00 2001
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Date: Fri, 31 Mar 2017 10:35:25 +0800
+Subject: tcmu: Skip Data-Out blocks before gathering Data-In buffer for BIDI case
+
+From: Xiubo Li <lixiubo@cmss.chinamobile.com>
+
+commit a5d68ba85801a78c892a0eb8efb711e293ed314b upstream.
+
+For the bidirectional case, the Data-Out buffer blocks will always at
+the head of the tcmu_cmd's bitmap, and before gathering the Data-In
+buffer, first of all it should skip the Data-Out ones, or the device
+supporting BIDI commands won't work.
+
+Fixed: 26418649eead ("target/user: Introduce data_bitmap, replace
+ data_length/data_head/data_tail")
+Reported-by: Ilias Tsitsimpis <iliastsi@arrikto.com>
+Tested-by: Ilias Tsitsimpis <iliastsi@arrikto.com>
+Signed-off-by: Xiubo Li <lixiubo@cmss.chinamobile.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_user.c | 48 ++++++++++++++++++++++++++------------
+ 1 file changed, 33 insertions(+), 15 deletions(-)
+
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -306,24 +306,50 @@ static void free_data_area(struct tcmu_d
+ DATA_BLOCK_BITS);
+ }
+
+-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
+- struct scatterlist *data_sg, unsigned int data_nents)
++static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
++ bool bidi)
+ {
++ struct se_cmd *se_cmd = cmd->se_cmd;
+ int i, block;
+ int block_remaining = 0;
+ void *from, *to;
+ size_t copy_bytes, from_offset;
+- struct scatterlist *sg;
++ struct scatterlist *sg, *data_sg;
++ unsigned int data_nents;
++ DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
++
++ bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
++
++ if (!bidi) {
++ data_sg = se_cmd->t_data_sg;
++ data_nents = se_cmd->t_data_nents;
++ } else {
++ uint32_t count;
++
++ /*
++ * For bidi case, the first count blocks are for Data-Out
++ * buffer blocks, and before gathering the Data-In buffer
++ * the Data-Out buffer blocks should be discarded.
++ */
++ count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
++ while (count--) {
++ block = find_first_bit(bitmap, DATA_BLOCK_BITS);
++ clear_bit(block, bitmap);
++ }
++
++ data_sg = se_cmd->t_bidi_data_sg;
++ data_nents = se_cmd->t_bidi_data_nents;
++ }
+
+ for_each_sg(data_sg, sg, data_nents, i) {
+ int sg_remaining = sg->length;
+ to = kmap_atomic(sg_page(sg)) + sg->offset;
+ while (sg_remaining > 0) {
+ if (block_remaining == 0) {
+- block = find_first_bit(cmd_bitmap,
++ block = find_first_bit(bitmap,
+ DATA_BLOCK_BITS);
+ block_remaining = DATA_BLOCK_SIZE;
+- clear_bit(block, cmd_bitmap);
++ clear_bit(block, bitmap);
+ }
+ copy_bytes = min_t(size_t, sg_remaining,
+ block_remaining);
+@@ -600,19 +626,11 @@ static void tcmu_handle_completion(struc
+ se_cmd->scsi_sense_length);
+ free_data_area(udev, cmd);
+ } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+-
+ /* Get Data-In buffer before clean up */
+- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+- gather_data_area(udev, bitmap,
+- se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
++ gather_data_area(udev, cmd, true);
+ free_data_area(udev, cmd);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+-
+- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+- gather_data_area(udev, bitmap,
+- se_cmd->t_data_sg, se_cmd->t_data_nents);
++ gather_data_area(udev, cmd, false);
+ free_data_area(udev, cmd);
+ } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ free_data_area(udev, cmd);
--- /dev/null
+From 5b7abeae3af8c08c577e599dd0578b9e3ee6687b Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 13 Apr 2017 14:56:28 -0700
+Subject: thp: fix MADV_DONTNEED vs clear soft dirty race
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit 5b7abeae3af8c08c577e599dd0578b9e3ee6687b upstream.
+
+Yet another instance of the same race.
+
+Fix is identical to change_huge_pmd().
+
+See "thp: fix MADV_DONTNEED vs. numa balancing race" for more details.
+
+Link: http://lkml.kernel.org/r/20170302151034.27829-5-kirill.shutemov@linux.intel.com
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/task_mmu.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -899,7 +899,14 @@ static inline void clear_soft_dirty(stru
+ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+ {
+- pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
++ pmd_t pmd = *pmdp;
++
++ /* See comment in change_huge_pmd() */
++ pmdp_invalidate(vma, addr, pmdp);
++ if (pmd_dirty(*pmdp))
++ pmd = pmd_mkdirty(pmd);
++ if (pmd_young(*pmdp))
++ pmd = pmd_mkyoung(pmd);
+
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_clear_soft_dirty(pmd);
--- /dev/null
+From 58ceeb6bec86d9140f9d91d71a710e963523d063 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 13 Apr 2017 14:56:26 -0700
+Subject: thp: fix MADV_DONTNEED vs. MADV_FREE race
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit 58ceeb6bec86d9140f9d91d71a710e963523d063 upstream.
+
+Both MADV_DONTNEED and MADV_FREE handled with down_read(mmap_sem).
+
+It's critical to not clear pmd intermittently while handling MADV_FREE
+to avoid race with MADV_DONTNEED:
+
+ CPU0: CPU1:
+ madvise_free_huge_pmd()
+ pmdp_huge_get_and_clear_full()
+madvise_dontneed()
+ zap_pmd_range()
+ pmd_trans_huge(*pmd) == 0 (without ptl)
+ // skip the pmd
+ set_pmd_at();
+ // pmd is re-established
+
+It results in MADV_DONTNEED skipping the pmd, leaving it not cleared.
+It violates MADV_DONTNEED interface and can result is userspace
+misbehaviour.
+
+Basically it's the same race as with numa balancing in
+change_huge_pmd(), but a bit simpler to mitigate: we don't need to
+preserve dirty/young flags here due to MADV_FREE functionality.
+
+[kirill.shutemov@linux.intel.com: Urgh... Power is special again]
+ Link: http://lkml.kernel.org/r/20170303102636.bhd2zhtpds4mt62a@black.fi.intel.com
+Link: http://lkml.kernel.org/r/20170302151034.27829-4-kirill.shutemov@linux.intel.com
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1380,8 +1380,7 @@ bool madvise_free_huge_pmd(struct mmu_ga
+ deactivate_page(page);
+
+ if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
+- orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
+- tlb->fullmm);
++ pmdp_invalidate(vma, addr, pmd);
+ orig_pmd = pmd_mkold(orig_pmd);
+ orig_pmd = pmd_mkclean(orig_pmd);
+
--- /dev/null
+From 6f6266a561306e206e0e31a5038f029b6a7b1d89 Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Wed, 12 Apr 2017 16:27:19 +0100
+Subject: x86/efi: Don't try to reserve runtime regions
+
+From: Omar Sandoval <osandov@fb.com>
+
+commit 6f6266a561306e206e0e31a5038f029b6a7b1d89 upstream.
+
+Reserving a runtime region results in splitting the EFI memory
+descriptors for the runtime region. This results in runtime region
+descriptors with bogus memory mappings, leading to interesting crashes
+like the following during a kexec:
+
+ general protection fault: 0000 [#1] SMP
+ Modules linked in:
+ CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.11.0-rc1 #53
+ Hardware name: Wiwynn Leopard-Orv2/Leopard-DDR BW, BIOS LBM05 09/30/2016
+ RIP: 0010:virt_efi_set_variable()
+ ...
+ Call Trace:
+ efi_delete_dummy_variable()
+ efi_enter_virtual_mode()
+ start_kernel()
+ ? set_init_arg()
+ x86_64_start_reservations()
+ x86_64_start_kernel()
+ start_cpu()
+ ...
+ Kernel panic - not syncing: Fatal exception
+
+Runtime regions will not be freed and do not need to be reserved, so
+skip the memmap modification in this case.
+
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Fixes: 8e80632fb23f ("efi/esrt: Use efi_mem_reserve() and avoid a kmalloc()")
+Link: http://lkml.kernel.org/r/20170412152719.9779-2-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/platform/efi/quirks.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_ad
+ return;
+ }
+
++ /* No need to reserve regions that will never be freed. */
++ if (md.attribute & EFI_MEMORY_RUNTIME)
++ return;
++
+ size += addr % EFI_PAGE_SIZE;
+ size = round_up(size, EFI_PAGE_SIZE);
+ addr = round_down(addr, EFI_PAGE_SIZE);
--- /dev/null
+From 11e63f6d920d6f2dfd3cd421e939a4aec9a58dcd Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 6 Apr 2017 09:04:31 -0700
+Subject: x86, pmem: fix broken __copy_user_nocache cache-bypass assumptions
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 11e63f6d920d6f2dfd3cd421e939a4aec9a58dcd upstream.
+
+Before we rework the "pmem api" to stop abusing __copy_user_nocache()
+for memcpy_to_pmem() we need to fix cases where we may strand dirty data
+in the cpu cache. The problem occurs when copy_from_iter_pmem() is used
+for arbitrary data transfers from userspace. There is no guarantee that
+these transfers, performed by dax_iomap_actor(), will have aligned
+destinations or aligned transfer lengths. Backstop the usage
+__copy_user_nocache() with explicit cache management in these unaligned
+cases.
+
+Yes, copy_from_iter_pmem() is now too big for an inline, but addressing
+that is saved for a later patch that moves the entirety of the "pmem
+api" into the pmem driver directly.
+
+Fixes: 5de490daec8b ("pmem: add copy_from_iter_pmem() and clear_pmem()")
+Cc: <x86@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Matthew Wilcox <mawilcox@microsoft.com>
+Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pmem.h | 42 +++++++++++++++++++++++++++++++-----------
+ 1 file changed, 31 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/pmem.h
++++ b/arch/x86/include/asm/pmem.h
+@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(
+ * @size: number of bytes to write back
+ *
+ * Write back a cache range using the CLWB (cache line write back)
+- * instruction.
++ * instruction. Note that @size is internally rounded up to be cache
++ * line size aligned.
+ */
+ static inline void arch_wb_cache_pmem(void *addr, size_t size)
+ {
+@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(vo
+ clwb(p);
+ }
+
+-/*
+- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
+- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
+- */
+-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
+-{
+- return iter_is_iovec(i) == false;
+-}
+-
+ /**
+ * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr: PMEM destination address
+@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter
+ /* TODO: skip the write-back by always using non-temporal stores */
+ len = copy_from_iter_nocache(addr, bytes, i);
+
+- if (__iter_needs_pmem_wb(i))
++ /*
++ * In the iovec case on x86_64 copy_from_iter_nocache() uses
++ * non-temporal stores for the bulk of the transfer, but we need
++ * to manually flush if the transfer is unaligned. A cached
++ * memory copy is used when destination or size is not naturally
++ * aligned. That is:
++ * - Require 8-byte alignment when size is 8 bytes or larger.
++ * - Require 4-byte alignment when size is 4 bytes.
++ *
++ * In the non-iovec case the entire destination needs to be
++ * flushed.
++ */
++ if (iter_is_iovec(i)) {
++ unsigned long flushed, dest = (unsigned long) addr;
++
++ if (bytes < 8) {
++ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
++ arch_wb_cache_pmem(addr, 1);
++ } else {
++ if (!IS_ALIGNED(dest, 8)) {
++ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
++ arch_wb_cache_pmem(addr, 1);
++ }
++
++ flushed = dest - (unsigned long) addr;
++ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
++ arch_wb_cache_pmem(addr + bytes - 1, 1);
++ }
++ } else
+ arch_wb_cache_pmem(addr, bytes);
+
+ return len;
--- /dev/null
+From cfac6dfa42bddfa9711b20d486e521d1a41ab09f Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Tue, 4 Apr 2017 18:15:01 +0200
+Subject: x86/signals: Fix lower/upper bound reporting in compat siginfo
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit cfac6dfa42bddfa9711b20d486e521d1a41ab09f upstream.
+
+Put the right values from the original siginfo into the
+userspace compat-siginfo.
+
+This fixes the 32-bit MPX "tabletest" testcase on 64-bit kernels.
+
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Dmitry Safonov <0x7f454c46@gmail.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: a4455082dc6f0 ('x86/signals: Add missing signal_compat code for x86 features')
+Link: http://lkml.kernel.org/r/1491322501-5054-1-git-send-email-joro@8bytes.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/signal_compat.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/signal_compat.c
++++ b/arch/x86/kernel/signal_compat.c
+@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_sigi
+
+ if (from->si_signo == SIGSEGV) {
+ if (from->si_code == SEGV_BNDERR) {
+- compat_uptr_t lower = (unsigned long)&to->si_lower;
+- compat_uptr_t upper = (unsigned long)&to->si_upper;
++ compat_uptr_t lower = (unsigned long)from->si_lower;
++ compat_uptr_t upper = (unsigned long)from->si_upper;
+ put_user_ex(lower, &to->si_lower);
+ put_user_ex(upper, &to->si_upper);
+ }
--- /dev/null
+From c06989da39cdb10604d572c8c7ea8c8c97f3c483 Mon Sep 17 00:00:00 2001
+From: Mathias Krause <minipli@googlemail.com>
+Date: Mon, 10 Apr 2017 17:14:27 +0200
+Subject: x86/vdso: Ensure vdso32_enabled gets set to valid values only
+
+From: Mathias Krause <minipli@googlemail.com>
+
+commit c06989da39cdb10604d572c8c7ea8c8c97f3c483 upstream.
+
+vdso_enabled can be set to arbitrary integer values via the kernel command
+line 'vdso32=' parameter or via 'sysctl abi.vsyscall32'.
+
+load_vdso32() only maps VDSO if vdso_enabled == 1, but ARCH_DLINFO_IA32
+merily checks for vdso_enabled != 0. As a consequence the AT_SYSINFO_EHDR
+auxiliary vector for the VDSO_ENTRY is emitted with a NULL pointer which
+causes a segfault when the application tries to use the VDSO.
+
+Restrict the valid arguments on the command line and the sysctl to 0 and 1.
+
+Fixes: b0b49f2673f0 ("x86, vdso: Remove compat vdso support")
+Signed-off-by: Mathias Krause <minipli@googlemail.com>
+Acked-by: Andy Lutomirski <luto@amacapital.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Roland McGrath <roland@redhat.com>
+Link: http://lkml.kernel.org/r/1491424561-7187-1-git-send-email-minipli@googlemail.com
+Link: http://lkml.kernel.org/r/20170410151723.518412863@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/vdso/vdso32-setup.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/entry/vdso/vdso32-setup.c
++++ b/arch/x86/entry/vdso/vdso32-setup.c
+@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
+ {
+ vdso32_enabled = simple_strtoul(s, NULL, 0);
+
+- if (vdso32_enabled > 1)
++ if (vdso32_enabled > 1) {
+ pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
++ vdso32_enabled = 0;
++ }
+
+ return 1;
+ }
+@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
+ /* Register vsyscall32 into the ABI table */
+ #include <linux/sysctl.h>
+
++static const int zero;
++static const int one = 1;
++
+ static struct ctl_table abi_table2[] = {
+ {
+ .procname = "vsyscall32",
+ .data = &vdso32_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = (int *)&zero,
++ .extra2 = (int *)&one,
+ },
+ {}
+ };
--- /dev/null
+From 6fdc6dd90272ce7e75d744f71535cfbd8d77da81 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 10 Apr 2017 17:14:28 +0200
+Subject: x86/vdso: Plug race between mapping and ELF header setup
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 6fdc6dd90272ce7e75d744f71535cfbd8d77da81 upstream.
+
+The vsyscall32 sysctl can racy against a concurrent fork when it switches
+from disabled to enabled:
+
+ arch_setup_additional_pages()
+ if (vdso32_enabled)
+ --> No mapping
+ sysctl.vsysscall32()
+ --> vdso32_enabled = true
+ create_elf_tables()
+ ARCH_DLINFO_IA32
+ if (vdso32_enabled) {
+ --> Add VDSO entry with NULL pointer
+
+Make ARCH_DLINFO_IA32 check whether the VDSO mapping has been set up for
+the newly forked process or not.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Andy Lutomirski <luto@amacapital.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Mathias Krause <minipli@googlemail.com>
+Link: http://lkml.kernel.org/r/20170410151723.602367196@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/elf.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -278,7 +278,7 @@ struct task_struct;
+
+ #define ARCH_DLINFO_IA32 \
+ do { \
+- if (vdso32_enabled) { \
++ if (VDSO_CURRENT_BASE) { \
+ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
+ } \
--- /dev/null
+From d72e9a7a93e4f8e9e52491921d99e0c8aa89eb4e Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Thu, 13 Apr 2017 14:56:37 -0700
+Subject: zram: do not use copy_page with non-page aligned address
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit d72e9a7a93e4f8e9e52491921d99e0c8aa89eb4e upstream.
+
+The copy_page is optimized memcpy for page-alinged address. If it is
+used with non-page aligned address, it can corrupt memory which means
+system corruption. With zram, it can happen with
+
+1. 64K architecture
+2. partial IO
+3. slub debug
+
+Partial IO need to allocate a page and zram allocates it via kmalloc.
+With slub debug, kmalloc(PAGE_SIZE) doesn't return page-size aligned
+address. And finally, copy_page(mem, cmem) corrupts memory.
+
+So, this patch changes it to memcpy.
+
+Actuaully, we don't need to change zram_bvec_write part because zsmalloc
+returns page-aligned address in case of PAGE_SIZE class but it's not
+good to rely on the internal of zsmalloc.
+
+Note:
+ When this patch is merged to stable, clear_page should be fixed, too.
+ Unfortunately, recent zram removes it by "same page merge" feature so
+ it's hard to backport this patch to -stable tree.
+
+I will handle it when I receive the mail from stable tree maintainer to
+merge this patch to backport.
+
+Fixes: 42e99bd ("zram: optimize memory operations with clear_page()/copy_page()")
+Link: http://lkml.kernel.org/r/1492042622-12074-2-git-send-email-minchan@kernel.org
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/zram/zram_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -588,7 +588,7 @@ static int zram_decompress_page(struct z
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (size == PAGE_SIZE) {
+- copy_page(mem, cmem);
++ memcpy(mem, cmem, PAGE_SIZE);
+ } else {
+ struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
+
+@@ -780,7 +780,7 @@ compress_again:
+
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = kmap_atomic(page);
+- copy_page(cmem, src);
++ memcpy(cmem, src, PAGE_SIZE);
+ kunmap_atomic(src);
+ } else {
+ memcpy(cmem, src, clen);
--- /dev/null
+From 4ca82dabc9fbf7bc5322aa54d802cb3cb7b125c5 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Thu, 13 Apr 2017 14:56:35 -0700
+Subject: zram: fix operator precedence to get offset
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit 4ca82dabc9fbf7bc5322aa54d802cb3cb7b125c5 upstream.
+
+In zram_rw_page, the logic to get offset is wrong by operator precedence
+(i.e., "<<" is higher than "&"). With wrong offset, zram can corrupt
+the user's data. This patch fixes it.
+
+Fixes: 8c7f01025 ("zram: implement rw_page operation of zram")
+Link: http://lkml.kernel.org/r/1492042622-12074-1-git-send-email-minchan@kernel.org
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/zram/zram_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -998,7 +998,7 @@ static int zram_rw_page(struct block_dev
+ }
+
+ index = sector >> SECTORS_PER_PAGE_SHIFT;
+- offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
++ offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+
+ bv.bv_page = page;
+ bv.bv_len = PAGE_SIZE;
--- /dev/null
+From 85d492f28d056c40629fc25d79f54da618a29dc4 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Thu, 13 Apr 2017 14:56:40 -0700
+Subject: zsmalloc: expand class bit
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit 85d492f28d056c40629fc25d79f54da618a29dc4 upstream.
+
+Now 64K page system, zsamlloc has 257 classes so 8 class bit is not
+enough. With that, it corrupts the system when zsmalloc stores
+65536byte data(ie, index number 256) so that this patch increases class
+bit for simple fix for stable backport. We should clean up this mess
+soon.
+
+ index size
+ 0 32
+ 1 288
+ ..
+ ..
+ 204 52256
+ 256 65536
+
+Fixes: 3783689a1 ("zsmalloc: introduce zspage structure")
+Link: http://lkml.kernel.org/r/1492042622-12074-3-git-send-email-minchan@kernel.org
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/zsmalloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -280,7 +280,7 @@ struct zs_pool {
+ struct zspage {
+ struct {
+ unsigned int fullness:FULLNESS_BITS;
+- unsigned int class:CLASS_BITS;
++ unsigned int class:CLASS_BITS + 1;
+ unsigned int isolated:ISOLATED_BITS;
+ unsigned int magic:MAGIC_VAL_BITS;
+ };