--- /dev/null
+From 4fee9f364b9b99f76732f2a6fd6df679a237fa74 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 16 Nov 2015 11:18:14 +0100
+Subject: arm64: mm: use correct mapping granularity under DEBUG_RODATA
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 4fee9f364b9b99f76732f2a6fd6df679a237fa74 upstream.
+
+When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA
+and resides at an offset that is not a multiple of 512 MB, the rounding
+that occurs in __map_memblock() and fixup_executable() results in
+incorrect regions being mapped.
+
+The following snippet from /sys/kernel/debug/kernel_page_tables shows
+how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,
+the first 2 MB of memory (which may be inaccessible from non-secure EL1
+or just reserved by the firmware) is inadvertently mapped into the end of
+the module region.
+
+ ---[ Modules start ]---
+ 0xfffffdffffe00000-0xfffffe0000000000 2M RW NX ... UXN MEM/NORMAL
+ ---[ Modules end ]---
+ ---[ Kernel Mapping ]---
+ 0xfffffe0000000000-0xfffffe0000090000 576K RW NX ... UXN MEM/NORMAL
+ 0xfffffe0000090000-0xfffffe0000200000 1472K ro x ... UXN MEM/NORMAL
+ 0xfffffe0000200000-0xfffffe0000800000 6M ro x ... UXN MEM/NORMAL
+ 0xfffffe0000800000-0xfffffe0000810000 64K ro x ... UXN MEM/NORMAL
+ 0xfffffe0000810000-0xfffffe0000a00000 1984K RW NX ... UXN MEM/NORMAL
+ 0xfffffe0000a00000-0xfffffe00ffe00000 4084M RW NX ... UXN MEM/NORMAL
+
+The same issue is likely to occur on 16k pages kernels whose load
+address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to
+SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.
+
+Fixes: da141706aea5 ("arm64: add better page protections to arm64")
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ard.biesheuvel: add #define of SWAPPER_BLOCK_SIZE for -stable version]
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -301,6 +301,7 @@ static void create_mapping_late(phys_add
+ }
+
+ #ifdef CONFIG_DEBUG_RODATA
++#define SWAPPER_BLOCK_SIZE (PAGE_SHIFT == 12 ? SECTION_SIZE : PAGE_SIZE)
+ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+ {
+ /*
+@@ -308,8 +309,8 @@ static void __init __map_memblock(phys_a
+ * for now. This will get more fine grained later once all memory
+ * is mapped
+ */
+- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
++ unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
++ unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
+
+ if (end < kernel_x_start) {
+ create_mapping(start, __phys_to_virt(start),
+@@ -397,18 +398,18 @@ void __init fixup_executable(void)
+ {
+ #ifdef CONFIG_DEBUG_RODATA
+ /* now that we are actually fully mapped, make the start/end more fine grained */
+- if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
++ if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_start = round_down(__pa(_stext),
+- SECTION_SIZE);
++ SWAPPER_BLOCK_SIZE);
+
+ create_mapping(aligned_start, __phys_to_virt(aligned_start),
+ __pa(_stext) - aligned_start,
+ PAGE_KERNEL);
+ }
+
+- if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
++ if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_end = round_up(__pa(__init_end),
+- SECTION_SIZE);
++ SWAPPER_BLOCK_SIZE);
+ create_mapping(__pa(__init_end), (unsigned long)__init_end,
+ aligned_end - __pa(__init_end),
+ PAGE_KERNEL);
--- /dev/null
+From tadeusz.struk@intel.com Sun Feb 14 12:56:09 2016
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+Date: Wed, 13 Jan 2016 20:57:40 -0800
+Subject: crypto: fix test vector for rsa
+To: Greg KH <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Linux Crypto Mailing List <linux-crypto@vger.kernel.org>, Linux Kernel Developers List <linux-kernel@vger.kernel.org>, David Howells <dhowells@redhat.com>
+Message-ID: <56972AC4.4010501@intel.com>
+
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+
+After the fix to the asn1_decoder in commit: 0d62e9dd
+"ASN.1: Fix non-match detection failure on data overrun"
+the rsa algorithm is failing to register in 4.3 stable kernels with
+error: "alg: rsa: test failed on vector 4, err=-74"
+
+This happens because the asn1 definition for the rsa key that has been
+added in 4.2 defined all 3 components of the key as non-optional, as
+the asn1_decoder before the fix was working fine for both the private
+and public keys.
+
+This patch adds the missing (fake) component to one key vector to allow
+the algorithm to successfully register and be used with a valid private
+keys later. This is only to make the asn1_decoder successfully parse the
+key and the fake component is never used in the test as the vector is
+marked as public key.
+
+This patch applies only to 4.3 kernels as the 4.2 version of asn1_decoder
+works fine with the asn1 definition.
+4.4 is also ok because the akcipher interface has been changed, and
+the set_key function has been split into set_public_key and set_priv_key
+and there are two separate asn1 definitions for the two key formats
+with all the required components correctly defined (commit 22287b0).
+
+Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+---
+ crypto/testmgr.h | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/crypto/testmgr.h
++++ b/crypto/testmgr.h
+@@ -270,7 +270,7 @@ static struct akcipher_testvec rsa_tv_te
+ .c_size = 256,
+ }, {
+ .key =
+- "\x30\x82\x01\x09" /* sequence of 265 bytes */
++ "\x30\x82\x01\x0C" /* sequence of 268 bytes */
+ "\x02\x82\x01\x00" /* modulus - integer of 256 bytes */
+ "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D"
+ "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA"
+@@ -288,8 +288,9 @@ static struct akcipher_testvec rsa_tv_te
+ "\x55\xE6\x29\x69\xD1\xC2\xE8\xB9\x78\x59\xF6\x79\x10\xC6\x4E\xEB"
+ "\x6A\x5E\xB9\x9A\xC7\xC4\x5B\x63\xDA\xA3\x3F\x5E\x92\x7A\x81\x5E"
+ "\xD6\xB0\xE2\x62\x8F\x74\x26\xC2\x0C\xD3\x9A\x17\x47\xE6\x8E\xAB"
+- "\x02\x03\x01\x00\x01", /* public key - integer of 3 bytes */
+- .key_len = 269,
++ "\x02\x03\x01\x00\x01" /* public key - integer of 3 bytes */
++ "\x02\x01\x00", /* private key - integer of 1 byte */
++ .key_len = 272,
+ .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a",
+ .c =
+ "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe"
--- /dev/null
+From ef83b6e8f40bb24b92ad73b5889732346e54a793 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 29 Sep 2015 15:48:11 -0400
+Subject: ext2, ext4: warn when mounting with dax enabled
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit ef83b6e8f40bb24b92ad73b5889732346e54a793 upstream.
+
+Similar to XFS warn when mounting DAX while it is still considered under
+development. Also, aspects of the DAX implementation, for example
+synchronization against multiple faults and faults causing block
+allocation, depend on the correct implementation in the filesystem. The
+maturity of a given DAX implementation is filesystem specific.
+
+Cc: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Matthew Wilcox <willy@linux.intel.com>
+Cc: linux-ext4@vger.kernel.org
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reported-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Jan Kara <jack@suse.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext2/super.c | 2 ++
+ fs/ext4/super.c | 6 +++++-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -566,6 +566,8 @@ static int parse_options(char *options,
+ /* Fall through */
+ case Opt_dax:
+ #ifdef CONFIG_FS_DAX
++ ext2_msg(sb, KERN_WARNING,
++ "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+ set_opt(sbi->s_mount_opt, DAX);
+ #else
+ ext2_msg(sb, KERN_INFO, "dax option not supported");
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super
+ }
+ sbi->s_jquota_fmt = m->mount_opt;
+ #endif
+-#ifndef CONFIG_FS_DAX
+ } else if (token == Opt_dax) {
++#ifdef CONFIG_FS_DAX
++ ext4_msg(sb, KERN_WARNING,
++ "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
++ sbi->s_mount_opt |= m->mount_opt;
++#else
+ ext4_msg(sb, KERN_INFO, "dax option not supported");
+ return -1;
+ #endif
--- /dev/null
+From 5a1c7f47da9b32d0671e776b0f388095b7f91e2e Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@ZenIV.linux.org.uk>
+Date: Thu, 26 Nov 2015 15:20:50 -0500
+Subject: ext4: fix an endianness bug in ext4_encrypted_follow_link()
+
+From: Al Viro <viro@ZenIV.linux.org.uk>
+
+commit 5a1c7f47da9b32d0671e776b0f388095b7f91e2e upstream.
+
+applying le32_to_cpu() to 16bit value is a bad idea...
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/symlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/symlink.c
++++ b/fs/ext4/symlink.c
+@@ -52,7 +52,7 @@ static const char *ext4_encrypted_follow
+ /* Symlink is encrypted */
+ sd = (struct ext4_encrypted_symlink_data *)caddr;
+ cstr.name = sd->encrypted_path;
+- cstr.len = le32_to_cpu(sd->len);
++ cstr.len = le16_to_cpu(sd->len);
+ if ((cstr.len +
+ sizeof(struct ext4_encrypted_symlink_data) - 1) >
+ max_size) {
--- /dev/null
+From e2c9e0b28e146c9a3bce21408f3c02e24ac7ac31 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@ZenIV.linux.org.uk>
+Date: Thu, 26 Nov 2015 15:20:19 -0500
+Subject: ext4: fix an endianness bug in ext4_encrypted_zeroout()
+
+From: Al Viro <viro@ZenIV.linux.org.uk>
+
+commit e2c9e0b28e146c9a3bce21408f3c02e24ac7ac31 upstream.
+
+ex->ee_block is not host-endian (note that accesses of other fields
+of *ex right next to that line go through the helpers that do proper
+conversion from little-endian to host-endian; it might make sense
+to add similar for ->ee_block to avoid reintroducing that kind of
+bugs...)
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/crypto.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -408,7 +408,7 @@ int ext4_encrypted_zeroout(struct inode
+ struct ext4_crypto_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ struct bio *bio;
+- ext4_lblk_t lblk = ex->ee_block;
++ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+ ext4_fsblk_t pblk = ext4_ext_pblock(ex);
+ unsigned int len = ext4_ext_get_actual_len(ex);
+ int ret, err = 0;
--- /dev/null
+From a4dad1ae24f850410c4e60f22823cba1289b8d52 Mon Sep 17 00:00:00 2001
+From: David Turner <novalis@novalis.org>
+Date: Tue, 24 Nov 2015 14:34:37 -0500
+Subject: ext4: Fix handling of extended tv_sec
+
+From: David Turner <novalis@novalis.org>
+
+commit a4dad1ae24f850410c4e60f22823cba1289b8d52 upstream.
+
+In ext4, the bottom two bits of {a,c,m}time_extra are used to extend
+the {a,c,m}time fields, deferring the year 2038 problem to the year
+2446.
+
+When decoding these extended fields, for times whose bottom 32 bits
+would represent a negative number, sign extension causes the 64-bit
+extended timestamp to be negative as well, which is not what's
+intended. This patch corrects that issue, so that the only negative
+{a,c,m}times are those between 1901 and 1970 (as per 32-bit signed
+timestamps).
+
+Some older kernels might have written pre-1970 dates with 1,1 in the
+extra bits. This patch treats those incorrectly-encoded dates as
+pre-1970, instead of post-2311, until kernel 4.20 is released.
+Hopefully by then e2fsck will have fixed up the bad data.
+
+Also add a comment explaining the encoding of ext4's extra {a,c,m}time
+bits.
+
+Signed-off-by: David Turner <novalis@novalis.org>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reported-by: Mark Harris <mh8928@yahoo.com>
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=23732
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ext4.h | 51 ++++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 44 insertions(+), 7 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -26,6 +26,7 @@
+ #include <linux/seqlock.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
++#include <linux/version.h>
+ #include <linux/wait.h>
+ #include <linux/blockgroup_lock.h>
+ #include <linux/percpu_counter.h>
+@@ -723,19 +724,55 @@ struct move_extent {
+ <= (EXT4_GOOD_OLD_INODE_SIZE + \
+ (einode)->i_extra_isize)) \
+
++/*
++ * We use an encoding that preserves the times for extra epoch "00":
++ *
++ * extra msb of adjust for signed
++ * epoch 32-bit 32-bit tv_sec to
++ * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
++ * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
++ * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
++ * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
++ * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
++ * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
++ * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
++ * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
++ * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
++ *
++ * Note that previous versions of the kernel on 64-bit systems would
++ * incorrectly use extra epoch bits 1,1 for dates between 1901 and
++ * 1970. e2fsck will correct this, assuming that it is run on the
++ * affected filesystem before 2242.
++ */
++
+ static inline __le32 ext4_encode_extra_time(struct timespec *time)
+ {
+- return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
+- (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
+- ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
++ u32 extra = sizeof(time->tv_sec) > 4 ?
++ ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
++ return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
+ }
+
+ static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
+ {
+- if (sizeof(time->tv_sec) > 4)
+- time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
+- << 32;
+- time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
++ if (unlikely(sizeof(time->tv_sec) > 4 &&
++ (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
++ /* Handle legacy encoding of pre-1970 dates with epoch
++ * bits 1,1. We assume that by kernel version 4.20,
++ * everyone will have run fsck over the affected
++ * filesystems to correct the problem. (This
++ * backwards compatibility may be removed before this
++ * time, at the discretion of the ext4 developers.)
++ */
++ u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
++ if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
++ extra_bits = 0;
++ time->tv_sec += extra_bits << 32;
++#else
++ time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
++#endif
++ }
++ time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ }
+
+ #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
--- /dev/null
+From 12c2ab09571e8aae3a87da2a4a452632a5fac1e5 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 15 Dec 2015 16:08:12 +0000
+Subject: iommu/io-pgtable-arm: Ensure we free the final level on teardown
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 12c2ab09571e8aae3a87da2a4a452632a5fac1e5 upstream.
+
+When tearing down page tables, we return early for the final level
+since we know that we won't have any table pointers to follow.
+Unfortunately, this also means that we forget to free the final level,
+so we end up leaking memory.
+
+Fix the issue by always freeing the current level, but just don't bother
+to iterate over the ptes if we're at the final level.
+
+Reported-by: Zhang Bo <zhangbo_a@xiaomi.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/io-pgtable-arm.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -405,17 +405,18 @@ static void __arm_lpae_free_pgtable(stru
+ arm_lpae_iopte *start, *end;
+ unsigned long table_size;
+
+- /* Only leaf entries at the last level */
+- if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+- return;
+-
+ if (lvl == ARM_LPAE_START_LVL(data))
+ table_size = data->pgd_size;
+ else
+ table_size = 1UL << data->pg_shift;
+
+ start = ptep;
+- end = (void *)ptep + table_size;
++
++ /* Only leaf entries at the last level */
++ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
++ end = ptep;
++ else
++ end = (void *)ptep + table_size;
+
+ while (ptep != end) {
+ arm_lpae_iopte pte = *ptep++;
--- /dev/null
+From 9c03ee147193645be4c186d3688232fa438c57c7 Mon Sep 17 00:00:00 2001
+From: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Date: Sat, 16 Jan 2016 00:31:23 +0530
+Subject: sched: Fix crash in sched_init_numa()
+
+From: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+
+commit 9c03ee147193645be4c186d3688232fa438c57c7 upstream.
+
+The following PowerPC commit:
+
+ c118baf80256 ("arch/powerpc/mm/numa.c: do not allocate bootmem memory for non existing nodes")
+
+avoids allocating bootmem memory for non existent nodes.
+
+But when DEBUG_PER_CPU_MAPS=y is enabled, my powerNV system failed to boot
+because in sched_init_numa(), cpumask_or() operation was done on
+unallocated nodes.
+
+Fix that by making cpumask_or() operation only on existing nodes.
+
+[ Tested with and w/o DEBUG_PER_CPU_MAPS=y on x86 and PowerPC. ]
+
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Tested-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Cc: <gkurz@linux.vnet.ibm.com>
+Cc: <grant.likely@linaro.org>
+Cc: <nikunj@linux.vnet.ibm.com>
+Cc: <vdavydov@parallels.com>
+Cc: <linuxppc-dev@lists.ozlabs.org>
+Cc: <linux-mm@kvack.org>
+Cc: <peterz@infradead.org>
+Cc: <benh@kernel.crashing.org>
+Cc: <paulus@samba.org>
+Cc: <mpe@ellerman.id.au>
+Cc: <anton@samba.org>
+Link: http://lkml.kernel.org/r/1452884483-11676-1-git-send-email-raghavendra.kt@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6678,7 +6678,7 @@ static void sched_init_numa(void)
+
+ sched_domains_numa_masks[i][j] = mask;
+
+- for (k = 0; k < nr_node_ids; k++) {
++ for_each_node(k) {
+ if (node_distance(j, k) > sched_domains_numa_distance[i])
+ continue;
+
tty-fix-gpf-in-flush_to_ldisc.patch
tty-retry-failed-reopen-if-tty-teardown-in-progress.patch
tty-fix-unsafe-ldisc-reference-via-ioctl-tiocgetd.patch
+iommu-io-pgtable-arm-ensure-we-free-the-final-level-on-teardown.patch
+arm64-mm-use-correct-mapping-granularity-under-debug_rodata.patch
+xhci-fix-usb2-resume-timing-and-races.patch
+crypto-fix-test-vector-for-rsa.patch
+ext2-ext4-warn-when-mounting-with-dax-enabled.patch
+ext4-fix-handling-of-extended-tv_sec.patch
+ext4-fix-an-endianness-bug-in-ext4_encrypted_zeroout.patch
+ext4-fix-an-endianness-bug-in-ext4_encrypted_follow_link.patch
+sched-fix-crash-in-sched_init_numa.patch
--- /dev/null
+From f69115fdbc1ac0718e7d19ad3caa3da2ecfe1c96 Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Fri, 11 Dec 2015 14:38:06 +0200
+Subject: xhci: fix usb2 resume timing and races.
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit f69115fdbc1ac0718e7d19ad3caa3da2ecfe1c96 upstream.
+
+According to USB 2 specs ports need to signal resume for at least 20ms,
+in practice even longer, before moving to U0 state.
+Both host and devices can initiate resume.
+
+On device initiated resume, a port status interrupt with the port in resume
+state in issued. The interrupt handler tags a resume_done[port]
+timestamp with current time + USB_RESUME_TIMEOUT, and kick roothub timer.
+Root hub timer requests for port status, finds the port in resume state,
+checks if resume_done[port] timestamp passed, and set port to U0 state.
+
+On host initiated resume, current code sets the port to resume state,
+sleep 20ms, and finally sets the port to U0 state. This should also
+be changed to work in a similar way as the device initiated resume, with
+timestamp tagging, but that is not yet tested and will be a separate
+fix later.
+
+There are a few issues with this approach
+
+1. A host initiated resume will also generate a resume event. The event
+ handler will find the port in resume state, believe it's a device
+ initiated resume, and act accordingly.
+
+2. A port status request might cut the resume signalling short if a
+ get_port_status request is handled during the host resume signalling.
+ The port will be found in resume state. The timestamp is not set leading
+ to time_after_eq(jiffies, timestamp) returning true, as timestamp = 0.
+ get_port_status will proceed with moving the port to U0.
+
+3. If an error, or anything else happens to the port during device
+ initiated resume signalling it will leave all the device resume
+ parameters hanging uncleared, preventing further suspend, returning
+ -EBUSY, and cause the pm thread to busyloop trying to enter suspend.
+
+Fix this by using the existing resuming_ports bitfield to indicate that
+resume signalling timing is taken care of.
+Check if the resume_done[port] is set before using it for timestamp
+comparison, and also clear out any resume signalling related variables
+if port is not in U0 or Resume state
+
+This issue was discovered when a PM thread busylooped, trying to runtime
+suspend the xhci USB 2 roothub on a Dell XPS
+
+Reported-by: Daniel J Blueman <daniel@quora.org>
+Tested-by: Daniel J Blueman <daniel@quora.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/usb/host/xhci-hub.c | 45 ++++++++++++++++++++++++++++++++++++++-----
+ drivers/usb/host/xhci-ring.c | 3 +-
+ 2 files changed, 42 insertions(+), 6 deletions(-)
+
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -616,8 +616,30 @@ static u32 xhci_get_port_status(struct u
+ if ((raw_port_status & PORT_RESET) ||
+ !(raw_port_status & PORT_PE))
+ return 0xffffffff;
+- if (time_after_eq(jiffies,
+- bus_state->resume_done[wIndex])) {
++ /* did port event handler already start resume timing? */
++ if (!bus_state->resume_done[wIndex]) {
++ /* If not, maybe we are in a host initated resume? */
++ if (test_bit(wIndex, &bus_state->resuming_ports)) {
++ /* Host initated resume doesn't time the resume
++ * signalling using resume_done[].
++ * It manually sets RESUME state, sleeps 20ms
++ * and sets U0 state. This should probably be
++ * changed, but not right now.
++ */
++ } else {
++ /* port resume was discovered now and here,
++ * start resume timing
++ */
++ unsigned long timeout = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
++
++ set_bit(wIndex, &bus_state->resuming_ports);
++ bus_state->resume_done[wIndex] = timeout;
++ mod_timer(&hcd->rh_timer, timeout);
++ }
++ /* Has resume been signalled for USB_RESUME_TIME yet? */
++ } else if (time_after_eq(jiffies,
++ bus_state->resume_done[wIndex])) {
+ int time_left;
+
+ xhci_dbg(xhci, "Resume USB2 port %d\n",
+@@ -658,13 +680,24 @@ static u32 xhci_get_port_status(struct u
+ } else {
+ /*
+ * The resume has been signaling for less than
+- * 20ms. Report the port status as SUSPEND,
+- * let the usbcore check port status again
+- * and clear resume signaling later.
++ * USB_RESUME_TIME. Report the port status as SUSPEND,
++ * let the usbcore check port status again and clear
++ * resume signaling later.
+ */
+ status |= USB_PORT_STAT_SUSPEND;
+ }
+ }
++ /*
++ * Clear stale usb2 resume signalling variables in case port changed
++ * state during resume signalling. For example on error
++ */
++ if ((bus_state->resume_done[wIndex] ||
++ test_bit(wIndex, &bus_state->resuming_ports)) &&
++ (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
++ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
++ bus_state->resume_done[wIndex] = 0;
++ clear_bit(wIndex, &bus_state->resuming_ports);
++ }
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0
+ && (raw_port_status & PORT_POWER)
+ && (bus_state->suspended_ports & (1 << wIndex))) {
+@@ -995,6 +1028,7 @@ int xhci_hub_control(struct usb_hcd *hcd
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
++ set_bit(wIndex, &bus_state->resuming_ports);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_RESUME);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -1002,6 +1036,7 @@ int xhci_hub_control(struct usb_hcd *hcd
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
++ clear_bit(wIndex, &bus_state->resuming_ports);
+ }
+ bus_state->port_c_suspend |= 1 << wIndex;
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1583,7 +1583,8 @@ static void handle_port_status(struct xh
+ */
+ bogus_port_status = true;
+ goto cleanup;
+- } else {
++ } else if (!test_bit(faked_port_index,
++ &bus_state->resuming_ports)) {
+ xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ bus_state->resume_done[faked_port_index] = jiffies +
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);