--- /dev/null
+From a96dfddbcc04336bbed50dc2b24823e45e09e80c Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Fri, 3 Feb 2017 13:13:23 -0800
+Subject: base/memory, hotplug: fix a kernel oops in show_valid_zones()
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit a96dfddbcc04336bbed50dc2b24823e45e09e80c upstream.
+
+Reading a sysfs "memoryN/valid_zones" file leads to the following oops
+when the first page of a range is not backed by struct page.
+show_valid_zones() assumes that 'start_pfn' is always valid for
+page_zone().
+
+ BUG: unable to handle kernel paging request at ffffea017a000000
+ IP: show_valid_zones+0x6f/0x160
+
+This issue may happen on x86-64 systems with 64GiB or more memory since
+their memory block size is bumped up to 2GiB. [1] An example of such
+systems is desribed below. 0x3240000000 is only aligned by 1GiB and
+this memory block starts from 0x3200000000, which is not backed by
+struct page.
+
+ BIOS-e820: [mem 0x0000003240000000-0x000000603fffffff] usable
+
+Since test_pages_in_a_zone() already checks holes, fix this issue by
+extending this function to return 'valid_start' and 'valid_end' for a
+given range. show_valid_zones() then proceeds with the valid range.
+
+[1] 'Commit bdee237c0343 ("x86: mm: Use 2GB memory block size on
+ large-memory x86-64 systems")'
+
+Link: http://lkml.kernel.org/r/20170127222149.30893-3-toshi.kani@hpe.com
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Zhang Zhen <zhenzhang.zhang@huawei.com>
+Cc: Reza Arbab <arbab@linux.vnet.ibm.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+---
+ drivers/base/memory.c | 12 ++++++------
+ include/linux/memory_hotplug.h | 3 ++-
+ mm/memory_hotplug.c | 20 +++++++++++++++-----
+ 3 files changed, 23 insertions(+), 12 deletions(-)
+
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -391,33 +391,33 @@ static ssize_t show_valid_zones(struct d
+ {
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn, end_pfn;
++ unsigned long valid_start, valid_end, valid_pages;
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+- struct page *first_page;
+ struct zone *zone;
+ int zone_shift = 0;
+
+ start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ end_pfn = start_pfn + nr_pages;
+- first_page = pfn_to_page(start_pfn);
+
+ /* The block contains more than one zone can not be offlined. */
+- if (!test_pages_in_a_zone(start_pfn, end_pfn))
++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ return sprintf(buf, "none\n");
+
+- zone = page_zone(first_page);
++ zone = page_zone(pfn_to_page(valid_start));
++ valid_pages = valid_end - valid_start;
+
+ /* MMOP_ONLINE_KEEP */
+ sprintf(buf, "%s", zone->name);
+
+ /* MMOP_ONLINE_KERNEL */
+- zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
++ zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+ }
+
+ /* MMOP_ONLINE_MOVABLE */
+- zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
++ zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+--- a/include/linux/memory_hotplug.h
++++ b/include/linux/memory_hotplug.h
+@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct z
+ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+ /* VM interface that may be used by firmware interface */
+ extern int online_pages(unsigned long, unsigned long, int);
+-extern int test_pages_in_a_zone(unsigned long, unsigned long);
++extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned long *valid_start, unsigned long *valid_end);
+ extern void __offline_isolated_pages(unsigned long, unsigned long);
+
+ typedef void (*online_page_callback_t)(struct page *page);
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1484,10 +1484,13 @@ bool is_mem_section_removable(unsigned l
+
+ /*
+ * Confirm all pages in a range [start, end) belong to the same zone.
++ * When true, return its valid [start, end).
+ */
+-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
++int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned long *valid_start, unsigned long *valid_end)
+ {
+ unsigned long pfn, sec_end_pfn;
++ unsigned long start, end;
+ struct zone *zone = NULL;
+ struct page *page;
+ int i;
+@@ -1509,14 +1512,20 @@ int test_pages_in_a_zone(unsigned long s
+ page = pfn_to_page(pfn + i);
+ if (zone && page_zone(page) != zone)
+ return 0;
++ if (!zone)
++ start = pfn + i;
+ zone = page_zone(page);
++ end = pfn + MAX_ORDER_NR_PAGES;
+ }
+ }
+
+- if (zone)
++ if (zone) {
++ *valid_start = start;
++ *valid_end = end;
+ return 1;
+- else
++ } else {
+ return 0;
++ }
+ }
+
+ /*
+@@ -1863,6 +1872,7 @@ static int __ref __offline_pages(unsigne
+ long offlined_pages;
+ int ret, drain, retry_max, node;
+ unsigned long flags;
++ unsigned long valid_start, valid_end;
+ struct zone *zone;
+ struct memory_notify arg;
+
+@@ -1873,10 +1883,10 @@ static int __ref __offline_pages(unsigne
+ return -EINVAL;
+ /* This makes hotplug much easier...and readable.
+ we assume this for now. .*/
+- if (!test_pages_in_a_zone(start_pfn, end_pfn))
++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ return -EINVAL;
+
+- zone = page_zone(pfn_to_page(start_pfn));
++ zone = page_zone(pfn_to_page(valid_start));
+ node = zone_to_nid(zone);
+ nr_pages = end_pfn - start_pfn;
+
--- /dev/null
+From 81ddd8c0c5e1cb41184d66567140cb48c53eb3d1 Mon Sep 17 00:00:00 2001
+From: Rabin Vincent <rabinv@axis.com>
+Date: Fri, 13 Jan 2017 15:00:16 +0100
+Subject: cifs: initialize file_info_lock
+
+From: Rabin Vincent <rabinv@axis.com>
+
+commit 81ddd8c0c5e1cb41184d66567140cb48c53eb3d1 upstream.
+
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+
+file_info_lock is not initalized in initiate_cifs_search(), leading to the
+following splat after a simple "mount.cifs ... dir && ls dir/":
+
+ BUG: spinlock bad magic on CPU#0, ls/486
+ lock: 0xffff880009301110, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
+ CPU: 0 PID: 486 Comm: ls Not tainted 4.9.0 #27
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
+ ffffc900042f3db0 ffffffff81327533 0000000000000000 ffff880009301110
+ ffffc900042f3dd0 ffffffff810baf75 ffff880009301110 ffffffff817ae077
+ ffffc900042f3df0 ffffffff810baff6 ffff880009301110 ffff880008d69900
+ Call Trace:
+ [<ffffffff81327533>] dump_stack+0x65/0x92
+ [<ffffffff810baf75>] spin_dump+0x85/0xe0
+ [<ffffffff810baff6>] spin_bug+0x26/0x30
+ [<ffffffff810bb159>] do_raw_spin_lock+0xe9/0x130
+ [<ffffffff8159ad2f>] _raw_spin_lock+0x1f/0x30
+ [<ffffffff8127e50d>] cifs_closedir+0x4d/0x100
+ [<ffffffff81181cfd>] __fput+0x5d/0x160
+ [<ffffffff81181e3e>] ____fput+0xe/0x10
+ [<ffffffff8109410e>] task_work_run+0x7e/0xa0
+ [<ffffffff81002512>] exit_to_usermode_loop+0x92/0xa0
+ [<ffffffff810026f9>] syscall_return_slowpath+0x49/0x50
+ [<ffffffff8159b484>] entry_SYSCALL_64_fastpath+0xa7/0xa9
+
+Fixes: 3afca265b5f53a0 ("Clarify locking of cifs file and tcon structures and make more granular")
+Signed-off-by: Rabin Vincent <rabinv@axis.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/readdir.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int
+ rc = -ENOMEM;
+ goto error_exit;
+ }
++ spin_lock_init(&cifsFile->file_info_lock);
+ file->private_data = cifsFile;
+ cifsFile->tlink = cifs_get_tlink(tlink);
+ tcon = tlink_tcon(tlink);
--- /dev/null
+From 5abf186a30a89d5b9c18a6bf93a2c192c9fd52f6 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Fri, 3 Feb 2017 13:13:29 -0800
+Subject: mm, fs: check for fatal signals in do_generic_file_read()
+
+From: Michal Hocko <mhocko@suse.com>
+
+commit 5abf186a30a89d5b9c18a6bf93a2c192c9fd52f6 upstream.
+
+do_generic_file_read() can be told to perform a large request from
+userspace. If the system is under OOM and the reading task is the OOM
+victim then it has an access to memory reserves and finishing the full
+request can lead to the full memory depletion which is dangerous. Make
+sure we rather go with a short read and allow the killed task to
+terminate.
+
+Link: http://lkml.kernel.org/r/20170201092706.9966-3-mhocko@kernel.org
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/filemap.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1703,6 +1703,11 @@ static ssize_t do_generic_file_read(stru
+
+ cond_resched();
+ find_page:
++ if (fatal_signal_pending(current)) {
++ error = -EINTR;
++ goto out;
++ }
++
+ page = find_get_page(mapping, index);
+ if (!page) {
+ page_cache_sync_readahead(mapping,
--- /dev/null
+From deb88a2a19e85842d79ba96b05031739ec327ff4 Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Fri, 3 Feb 2017 13:13:20 -0800
+Subject: mm/memory_hotplug.c: check start_pfn in test_pages_in_a_zone()
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit deb88a2a19e85842d79ba96b05031739ec327ff4 upstream.
+
+Patch series "fix a kernel oops when reading sysfs valid_zones", v2.
+
+A sysfs memory file is created for each 2GiB memory block on x86-64 when
+the system has 64GiB or more memory. [1] When the start address of a
+memory block is not backed by struct page, i.e. a memory range is not
+aligned by 2GiB, reading its 'valid_zones' attribute file leads to a
+kernel oops. This issue was observed on multiple x86-64 systems with
+more than 64GiB of memory. This patch-set fixes this issue.
+
+Patch 1 first fixes an issue in test_pages_in_a_zone(), which does not
+test the start section.
+
+Patch 2 then fixes the kernel oops by extending test_pages_in_a_zone()
+to return valid [start, end).
+
+Note for stable kernels: The memory block size change was made by commit
+bdee237c0343 ("x86: mm: Use 2GB memory block size on large-memory x86-64
+systems"), which was accepted to 3.9. However, this patch-set depends
+on (and fixes) the change to test_pages_in_a_zone() made by commit
+5f0f2887f4de ("mm/memory_hotplug.c: check for missing sections in
+test_pages_in_a_zone()"), which was accepted to 4.4.
+
+So, I recommend that we backport it up to 4.4.
+
+[1] 'Commit bdee237c0343 ("x86: mm: Use 2GB memory block size on
+ large-memory x86-64 systems")'
+
+This patch (of 2):
+
+test_pages_in_a_zone() does not check 'start_pfn' when it is aligned by
+section since 'sec_end_pfn' is set equal to 'pfn'. Since this function
+is called for testing the range of a sysfs memory file, 'start_pfn' is
+always aligned by section.
+
+Fix it by properly setting 'sec_end_pfn' to the next section pfn.
+
+Also make sure that this function returns 1 only when the range belongs
+to a zone.
+
+Link: http://lkml.kernel.org/r/20170127222149.30893-2-toshi.kani@hpe.com
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Andrew Banman <abanman@sgi.com>
+Cc: Reza Arbab <arbab@linux.vnet.ibm.com>
+Cc: Greg KH <greg@kroah.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory_hotplug.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1483,7 +1483,7 @@ bool is_mem_section_removable(unsigned l
+ }
+
+ /*
+- * Confirm all pages in a range [start, end) is belongs to the same zone.
++ * Confirm all pages in a range [start, end) belong to the same zone.
+ */
+ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ {
+@@ -1491,9 +1491,9 @@ int test_pages_in_a_zone(unsigned long s
+ struct zone *zone = NULL;
+ struct page *page;
+ int i;
+- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+ pfn < end_pfn;
+- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
++ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
+ /* Make sure the memory section is present first */
+ if (!present_section_nr(pfn_to_section_nr(pfn)))
+ continue;
+@@ -1512,7 +1512,11 @@ int test_pages_in_a_zone(unsigned long s
+ zone = page_zone(page);
+ }
+ }
+- return 1;
++
++ if (zone)
++ return 1;
++ else
++ return 0;
+ }
+
+ /*
nfsd-fix-a-null-reference-case-in-find_or_create_lock_stateid.patch
svcrpc-fix-oops-in-absence-of-krb5-module.patch
zswap-disable-changing-params-if-init-fails.patch
+cifs-initialize-file_info_lock.patch
+mm-memory_hotplug.c-check-start_pfn-in-test_pages_in_a_zone.patch
+base-memory-hotplug-fix-a-kernel-oops-in-show_valid_zones.patch
+mm-fs-check-for-fatal-signals-in-do_generic_file_read.patch