--- /dev/null
+From 374d446d25d6271ee615952a3b7f123ba4983c35 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Fri, 13 Jan 2017 22:51:08 +0100
+Subject: ARM: 8636/1: Cleanup sanity_check_meminfo
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 374d446d25d6271ee615952a3b7f123ba4983c35 upstream.
+
+The logic for sanity_check_meminfo has become difficult to
+follow. Clean up the code so it's more obvious what the code
+is actually trying to do. Additionally, meminfo is now removed
+so rename the function to better describe its purpose.
+
+Tested-by: Magnus Lilja <lilja.magnus@gmail.com>
+Reviewed-by: Nicolas Pitre <nico@linaro.org>
+Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Cc: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/setup.c | 4 +-
+ arch/arm/mm/mmu.c | 66 +++++++++++++++++-------------------------------
+ arch/arm/mm/nommu.c | 8 ++---
+ 3 files changed, 30 insertions(+), 48 deletions(-)
+
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
+ extern void init_default_cache_policy(unsigned long);
+ extern void paging_init(const struct machine_desc *desc);
+ extern void early_paging_init(const struct machine_desc *);
+-extern void sanity_check_meminfo(void);
++extern void adjust_lowmem_bounds(void);
+ extern enum reboot_mode reboot_mode;
+ extern void setup_dma_zone(const struct machine_desc *desc);
+
+@@ -1093,7 +1093,7 @@ void __init setup_arch(char **cmdline_p)
+ setup_dma_zone(mdesc);
+ xen_early_init();
+ efi_init();
+- sanity_check_meminfo();
++ adjust_lowmem_bounds();
+ arm_memblock_init(mdesc);
+
+ early_ioremap_reset();
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1152,13 +1152,11 @@ early_param("vmalloc", early_vmalloc);
+
+ phys_addr_t arm_lowmem_limit __initdata = 0;
+
+-void __init sanity_check_meminfo(void)
++void __init adjust_lowmem_bounds(void)
+ {
+ phys_addr_t memblock_limit = 0;
+- int highmem = 0;
+ u64 vmalloc_limit;
+ struct memblock_region *reg;
+- bool should_use_highmem = false;
+
+ /*
+ * Let's use our own (unoptimized) equivalent of __pa() that is
+@@ -1172,43 +1170,18 @@ void __init sanity_check_meminfo(void)
+ for_each_memblock(memory, reg) {
+ phys_addr_t block_start = reg->base;
+ phys_addr_t block_end = reg->base + reg->size;
+- phys_addr_t size_limit = reg->size;
+
+- if (reg->base >= vmalloc_limit)
+- highmem = 1;
+- else
+- size_limit = vmalloc_limit - reg->base;
+-
+-
+- if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
+-
+- if (highmem) {
+- pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
+- &block_start, &block_end);
+- memblock_remove(reg->base, reg->size);
+- should_use_highmem = true;
+- continue;
+- }
+-
+- if (reg->size > size_limit) {
+- phys_addr_t overlap_size = reg->size - size_limit;
+-
+- pr_notice("Truncating RAM at %pa-%pa",
+- &block_start, &block_end);
+- block_end = vmalloc_limit;
+- pr_cont(" to -%pa", &block_end);
+- memblock_remove(vmalloc_limit, overlap_size);
+- should_use_highmem = true;
+- }
+- }
+-
+- if (!highmem) {
+- if (block_end > arm_lowmem_limit) {
+- if (reg->size > size_limit)
+- arm_lowmem_limit = vmalloc_limit;
+- else
+- arm_lowmem_limit = block_end;
+- }
++ if (reg->base < vmalloc_limit) {
++ if (block_end > arm_lowmem_limit)
++ /*
++ * Compare as u64 to ensure vmalloc_limit does
++ * not get truncated. block_end should always
++ * fit in phys_addr_t so there should be no
++ * issue with assignment.
++ */
++ arm_lowmem_limit = min_t(u64,
++ vmalloc_limit,
++ block_end);
+
+ /*
+ * Find the first non-pmd-aligned page, and point
+@@ -1233,9 +1206,6 @@ void __init sanity_check_meminfo(void)
+ }
+ }
+
+- if (should_use_highmem)
+- pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+-
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+
+ /*
+@@ -1248,6 +1218,18 @@ void __init sanity_check_meminfo(void)
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
++ if (memblock_end_of_DRAM() > arm_lowmem_limit) {
++ phys_addr_t end = memblock_end_of_DRAM();
++
++ pr_notice("Ignoring RAM at %pa-%pa\n",
++ &memblock_limit, &end);
++ pr_notice("Consider using a HIGHMEM enabled kernel.\n");
++
++ memblock_remove(memblock_limit, end - memblock_limit);
++ }
++ }
++
+ memblock_set_current_limit(memblock_limit);
+ }
+
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -85,7 +85,7 @@ static unsigned long irbar_read(void)
+ }
+
+ /* MPU initialisation functions */
+-void __init sanity_check_meminfo_mpu(void)
++void __init adjust_lowmem_bounds_mpu(void)
+ {
+ phys_addr_t phys_offset = PHYS_OFFSET;
+ phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
+@@ -274,7 +274,7 @@ void __init mpu_setup(void)
+ }
+ }
+ #else
+-static void sanity_check_meminfo_mpu(void) {}
++static void adjust_lowmem_bounds_mpu(void) {}
+ static void __init mpu_setup(void) {}
+ #endif /* CONFIG_ARM_MPU */
+
+@@ -295,10 +295,10 @@ void __init arm_mm_memblock_reserve(void
+ #endif
+ }
+
+-void __init sanity_check_meminfo(void)
++void __init adjust_lowmem_bounds(void)
+ {
+ phys_addr_t end;
+- sanity_check_meminfo_mpu();
++ adjust_lowmem_bounds_mpu();
+ end = memblock_end_of_DRAM();
+ high_memory = __va(end - 1) + 1;
+ memblock_set_current_limit(end);
--- /dev/null
+From 985626564eedc470ce2866e53938303368ad41b7 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Fri, 13 Jan 2017 22:51:45 +0100
+Subject: ARM: 8637/1: Adjust memory boundaries after reservations
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 985626564eedc470ce2866e53938303368ad41b7 upstream.
+
+adjust_lowmem_bounds is responsible for setting up the boundary for
+lowmem/highmem. This needs to be setup before memblock reservations can
+occur. At the time memblock reservations can occur, memory can also be
+removed from the system. The lowmem/highmem boundary and end of memory
+may be affected by this but it is currently not recalculated. On some
+systems this may be harmless, on others this may result in incorrect
+ranges being passed to the main memory allocator. Correct this by
+recalculating the lowmem/highmem boundary after all reservations have
+been made.
+
+Tested-by: Magnus Lilja <lilja.magnus@gmail.com>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Cc: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/setup.c | 6 ++++++
+ arch/arm/mm/mmu.c | 9 ++++++---
+ 2 files changed, 12 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
+ setup_dma_zone(mdesc);
+ xen_early_init();
+ efi_init();
++ /*
++ * Make sure the calculation for lowmem/highmem is set appropriately
++ * before reserving/allocating any mmeory
++ */
+ adjust_lowmem_bounds();
+ arm_memblock_init(mdesc);
++ /* Memory may have been removed so recalculate the bounds. */
++ adjust_lowmem_bounds();
+
+ early_ioremap_reset();
+
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1157,6 +1157,7 @@ void __init adjust_lowmem_bounds(void)
+ phys_addr_t memblock_limit = 0;
+ u64 vmalloc_limit;
+ struct memblock_region *reg;
++ phys_addr_t lowmem_limit = 0;
+
+ /*
+ * Let's use our own (unoptimized) equivalent of __pa() that is
+@@ -1172,14 +1173,14 @@ void __init adjust_lowmem_bounds(void)
+ phys_addr_t block_end = reg->base + reg->size;
+
+ if (reg->base < vmalloc_limit) {
+- if (block_end > arm_lowmem_limit)
++ if (block_end > lowmem_limit)
+ /*
+ * Compare as u64 to ensure vmalloc_limit does
+ * not get truncated. block_end should always
+ * fit in phys_addr_t so there should be no
+ * issue with assignment.
+ */
+- arm_lowmem_limit = min_t(u64,
++ lowmem_limit = min_t(u64,
+ vmalloc_limit,
+ block_end);
+
+@@ -1200,12 +1201,14 @@ void __init adjust_lowmem_bounds(void)
+ if (!IS_ALIGNED(block_start, PMD_SIZE))
+ memblock_limit = block_start;
+ else if (!IS_ALIGNED(block_end, PMD_SIZE))
+- memblock_limit = arm_lowmem_limit;
++ memblock_limit = lowmem_limit;
+ }
+
+ }
+ }
+
++ arm_lowmem_limit = lowmem_limit;
++
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+
+ /*
--- /dev/null
+From kristina.martsenko@arm.com Mon Jun 12 15:05:37 2017
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Tue, 6 Jun 2017 20:14:10 +0100
+Subject: arm64: entry: improve data abort handling of tagged pointers
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>
+Message-ID: <1496776450-8731-3-git-send-email-kristina.martsenko@arm.com>
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 276e93279a630657fff4b086ba14c95955912dfa upstream.
+
+This backport has a minor difference from the upstream commit: it adds
+the asm-uaccess.h file, which is not present in 4.9, because 4.9 does
+not have commit b4b8664d291a ("arm64: don't pull uaccess.h into *.S").
+
+Original patch description:
+
+When handling a data abort from EL0, we currently zero the top byte of
+the faulting address, as we assume the address is a TTBR0 address, which
+may contain a non-zero address tag. However, the address may be a TTBR1
+address, in which case we should not zero the top byte. This patch fixes
+that. The effect is that the full TTBR1 address is passed to the task's
+signal handler (or printed out in the kernel log).
+
+When handling a data abort from EL1, we leave the faulting address
+intact, as we assume it's either a TTBR1 address or a TTBR0 address with
+tag 0x00. This is true as far as I'm aware, we don't seem to access a
+tagged TTBR0 address anywhere in the kernel. Regardless, it's easy to
+forget about address tags, and code added in the future may not always
+remember to remove tags from addresses before accessing them. So add tag
+handling to the EL1 data abort handler as well. This also makes it
+consistent with the EL0 data abort handler.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/asm-uaccess.h | 13 +++++++++++++
+ arch/arm64/kernel/entry.S | 6 ++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+ create mode 100644 arch/arm64/include/asm/asm-uaccess.h
+
+--- /dev/null
++++ b/arch/arm64/include/asm/asm-uaccess.h
+@@ -0,0 +1,13 @@
++#ifndef __ASM_ASM_UACCESS_H
++#define __ASM_ASM_UACCESS_H
++
++/*
++ * Remove the address tag from a virtual address, if present.
++ */
++ .macro clear_address_tag, dst, addr
++ tst \addr, #(1 << 55)
++ bic \dst, \addr, #(0xff << 56)
++ csel \dst, \dst, \addr, eq
++ .endm
++
++#endif
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -30,6 +30,7 @@
+ #include <asm/irq.h>
+ #include <asm/memory.h>
+ #include <asm/thread_info.h>
++#include <asm/asm-uaccess.h>
+ #include <asm/unistd.h>
+
+ /*
+@@ -369,12 +370,13 @@ el1_da:
+ /*
+ * Data abort handling
+ */
+- mrs x0, far_el1
++ mrs x3, far_el1
+ enable_dbg
+ // re-enable interrupts if they were enabled in the aborted context
+ tbnz x23, #7, 1f // PSR_I_BIT
+ enable_irq
+ 1:
++ clear_address_tag x0, x3
+ mov x2, sp // struct pt_regs
+ bl do_mem_abort
+
+@@ -535,7 +537,7 @@ el0_da:
+ // enable interrupts before calling the main handler
+ enable_dbg_and_irq
+ ct_user_exit
+- bic x0, x26, #(0xff << 56)
++ clear_address_tag x0, x26
+ mov x1, x25
+ mov x2, sp
+ bl do_mem_abort
--- /dev/null
+From kristina.martsenko@arm.com Mon Jun 12 15:05:21 2017
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Tue, 6 Jun 2017 20:14:09 +0100
+Subject: arm64: hw_breakpoint: fix watchpoint matching for tagged pointers
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>
+Message-ID: <1496776450-8731-2-git-send-email-kristina.martsenko@arm.com>
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 7dcd9dd8cebe9fa626af7e2358d03a37041a70fb upstream.
+
+This backport has a small difference from the upstream commit:
+ - The address tag is removed in watchpoint_handler() instead of
+ get_distance_from_watchpoint(), because 4.9 does not have commit
+ fdfeff0f9e3d ("arm64: hw_breakpoint: Handle inexact watchpoint
+ addresses").
+
+Original patch description:
+
+When we take a watchpoint exception, the address that triggered the
+watchpoint is found in FAR_EL1. We compare it to the address of each
+configured watchpoint to see which one was hit.
+
+The configured watchpoint addresses are untagged, while the address in
+FAR_EL1 will have an address tag if the data access was done using a
+tagged address. The tag needs to be removed to compare the address to
+the watchpoints.
+
+Currently we don't remove it, and as a result can report the wrong
+watchpoint as being hit (specifically, always either the highest TTBR0
+watchpoint or lowest TTBR1 watchpoint). This patch removes the tag.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h | 6 +++---
+ arch/arm64/kernel/hw_breakpoint.c | 3 ++-
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -105,9 +105,9 @@ static inline void set_fs(mm_segment_t f
+ })
+
+ /*
+- * When dealing with data aborts or instruction traps we may end up with
+- * a tagged userland pointer. Clear the tag to get a sane pointer to pass
+- * on to access_ok(), for instance.
++ * When dealing with data aborts, watchpoints, or instruction traps we may end
++ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
++ * pass on to access_ok(), for instance.
+ */
+ #define untagged_addr(addr) sign_extend64(addr, 55)
+
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -36,6 +36,7 @@
+ #include <asm/traps.h>
+ #include <asm/cputype.h>
+ #include <asm/system_misc.h>
++#include <asm/uaccess.h>
+
+ /* Breakpoint currently in use for each BRP. */
+ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
+@@ -696,7 +697,7 @@ static int watchpoint_handler(unsigned l
+
+ /* Check if the watchpoint value matches. */
+ val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+- if (val != (addr & ~alignment_mask))
++ if (val != (untagged_addr(addr) & ~alignment_mask))
+ goto unlock;
+
+ /* Possible match, check the byte address select to confirm. */
--- /dev/null
+From kristina.martsenko@arm.com Mon Jun 12 15:04:11 2017
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Tue, 6 Jun 2017 20:14:08 +0100
+Subject: arm64: traps: fix userspace cache maintenance emulation on a tagged pointer
+To: stable@vger.kernel.org
+Cc: Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>
+Message-ID: <1496776450-8731-1-git-send-email-kristina.martsenko@arm.com>
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit 81cddd65b5c82758ea5571a25e31ff6f1f89ff02 upstream.
+
+This backport has a minor difference from the upstream commit, as v4.9
+did not yet have the refactoring done by commit 8b6e70fccff2 ("arm64:
+traps: correctly handle MRS/MSR with XZR").
+
+Original patch description:
+
+When we emulate userspace cache maintenance in the kernel, we can
+currently send the task a SIGSEGV even though the maintenance was done
+on a valid address. This happens if the address has a non-zero address
+tag, and happens to not be mapped in.
+
+When we get the address from a user register, we don't currently remove
+the address tag before performing cache maintenance on it. If the
+maintenance faults, we end up in either __do_page_fault, where find_vma
+can't find the VMA if the address has a tag, or in do_translation_fault,
+where the tagged address will appear to be above TASK_SIZE. In both
+cases, the address is not mapped in, and the task is sent a SIGSEGV.
+
+This patch removes the tag from the address before using it. With this
+patch, the fault is handled correctly, the address gets mapped in, and
+the cache maintenance succeeds.
+
+As a second bug, if cache maintenance (correctly) fails on an invalid
+tagged address, the address gets passed into arm64_notify_segfault,
+where find_vma fails to find the VMA due to the tag, and the wrong
+si_code may be sent as part of the siginfo_t of the segfault. With this
+patch, the correct si_code is sent.
+
+Fixes: 7dd01aef0557 ("arm64: trap userspace "dc cvau" cache operation on errata-affected core")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/traps.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -435,7 +435,7 @@ int cpu_enable_cache_maint_trap(void *__
+ }
+
+ #define __user_cache_maint(insn, address, res) \
+- if (untagged_addr(address) >= user_addr_max()) \
++ if (address >= user_addr_max()) \
+ res = -EFAULT; \
+ else \
+ asm volatile ( \
+@@ -458,7 +458,7 @@ static void user_cache_maint_handler(uns
+ int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
+ int ret = 0;
+
+- address = (rt == 31) ? 0 : regs->regs[rt];
++ address = (rt == 31) ? 0 : untagged_addr(regs->regs[rt]);
+
+ switch (crm) {
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
--- /dev/null
+From 4296f23ed49a15d36949458adcc66ff993dee2a8 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Sun, 19 Mar 2017 14:30:02 +0100
+Subject: cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 4296f23ed49a15d36949458adcc66ff993dee2a8 upstream.
+
+sugov_start() only initializes struct sugov_cpu per-CPU structures
+for shared policies, but it should do that for single-CPU policies too.
+
+That in particular makes the IO-wait boost mechanism work in the
+cases when cpufreq policies correspond to individual CPUs.
+
+Fixes: 21ca6d2c52f8 (cpufreq: schedutil: Add iowait boosting)
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/cpufreq_schedutil.c | 20 +++++++-------------
+ 1 file changed, 7 insertions(+), 13 deletions(-)
+
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -507,20 +507,14 @@ static int sugov_start(struct cpufreq_po
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
++ memset(sg_cpu, 0, sizeof(*sg_cpu));
+ sg_cpu->sg_policy = sg_policy;
+- if (policy_is_shared(policy)) {
+- sg_cpu->util = 0;
+- sg_cpu->max = 0;
+- sg_cpu->flags = SCHED_CPUFREQ_RT;
+- sg_cpu->last_update = 0;
+- sg_cpu->iowait_boost = 0;
+- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+- sugov_update_shared);
+- } else {
+- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+- sugov_update_single);
+- }
++ sg_cpu->flags = SCHED_CPUFREQ_RT;
++ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
++ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
++ policy_is_shared(policy) ?
++ sugov_update_shared :
++ sugov_update_single);
+ }
+ return 0;
+ }
--- /dev/null
+From 6c4f0fa643cb9e775dcc976e3db00d649468ff1d Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Thu, 2 Mar 2017 14:03:20 +0530
+Subject: cpufreq: schedutil: move cached_raw_freq to struct sugov_policy
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit 6c4f0fa643cb9e775dcc976e3db00d649468ff1d upstream.
+
+cached_raw_freq applies to the entire cpufreq policy and not individual
+CPUs. Apart from wasting per-cpu memory, it is actually wrong to keep it
+in struct sugov_cpu as we may end up comparing next_freq with a stale
+cached_raw_freq of a random CPU.
+
+Move cached_raw_freq to struct sugov_policy.
+
+Fixes: 5cbea46984d6 (cpufreq: schedutil: map raw required frequency to driver frequency)
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/cpufreq_schedutil.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -32,6 +32,7 @@ struct sugov_policy {
+ u64 last_freq_update_time;
+ s64 freq_update_delay_ns;
+ unsigned int next_freq;
++ unsigned int cached_raw_freq;
+
+ /* The next fields are only needed if fast switch cannot be used. */
+ struct irq_work irq_work;
+@@ -46,7 +47,6 @@ struct sugov_cpu {
+ struct update_util_data update_util;
+ struct sugov_policy *sg_policy;
+
+- unsigned int cached_raw_freq;
+ unsigned long iowait_boost;
+ unsigned long iowait_boost_max;
+ u64 last_update;
+@@ -140,9 +140,9 @@ static unsigned int get_next_freq(struct
+
+ freq = (freq + (freq >> 2)) * util / max;
+
+- if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
++ if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ return sg_policy->next_freq;
+- sg_cpu->cached_raw_freq = freq;
++ sg_policy->cached_raw_freq = freq;
+ return cpufreq_driver_resolve_freq(policy, freq);
+ }
+
+@@ -502,6 +502,7 @@ static int sugov_start(struct cpufreq_po
+ sg_policy->next_freq = UINT_MAX;
+ sg_policy->work_in_progress = false;
+ sg_policy->need_freq_update = false;
++ sg_policy->cached_raw_freq = 0;
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+@@ -512,7 +513,6 @@ static int sugov_start(struct cpufreq_po
+ sg_cpu->max = 0;
+ sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->last_update = 0;
+- sg_cpu->cached_raw_freq = 0;
+ sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
--- /dev/null
+From 665788572c6410b7efadc2e3009c5d830b6d8ef9 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Fri, 10 Mar 2017 15:27:57 +0200
+Subject: drm/i915/vbt: don't propagate errors from intel_bios_init()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 665788572c6410b7efadc2e3009c5d830b6d8ef9 upstream.
+
+We don't use the error return for anything other than reporting and
+logging that there is no VBT. We can pull the logging in the function,
+and remove the error status return. Moreover, if we needed the
+information for something later on, we'd probably be better off storing
+the bit in dev_priv, and using it where it's needed, instead of using
+the error return.
+
+While at it, improve the comments.
+
+Cc: Manasi Navare <manasi.d.navare@intel.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/438ebbb0d5f0d321c625065b9cc78532a1dab24f.1489152288.git.jani.nikula@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_drv.c | 4 +---
+ drivers/gpu/drm/i915/i915_drv.h | 2 +-
+ drivers/gpu/drm/i915/intel_bios.c | 31 ++++++++++++++++---------------
+ 3 files changed, 18 insertions(+), 19 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -573,9 +573,7 @@ static int i915_load_modeset_init(struct
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+- ret = intel_bios_init(dev_priv);
+- if (ret)
+- DRM_INFO("failed to find VBIOS tables\n");
++ intel_bios_init(dev_priv);
+
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3584,7 +3584,7 @@ static inline bool intel_gmbus_is_forced
+ extern void intel_i2c_reset(struct drm_device *dev);
+
+ /* intel_bios.c */
+-int intel_bios_init(struct drm_i915_private *dev_priv);
++void intel_bios_init(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1450,36 +1450,35 @@ static const struct vbt_header *find_vbt
+ * intel_bios_init - find VBT and initialize settings from the BIOS
+ * @dev_priv: i915 device instance
+ *
+- * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+- * to appropriate values.
+- *
+- * Returns 0 on success, nonzero on failure.
++ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
++ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
++ * initialize some defaults if the VBT is not present at all.
+ */
+-int
+-intel_bios_init(struct drm_i915_private *dev_priv)
++void intel_bios_init(struct drm_i915_private *dev_priv)
+ {
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ const struct vbt_header *vbt = dev_priv->opregion.vbt;
+ const struct bdb_header *bdb;
+ u8 __iomem *bios = NULL;
+
+- if (HAS_PCH_NOP(dev_priv))
+- return -ENODEV;
++ if (HAS_PCH_NOP(dev_priv)) {
++ DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
++ return;
++ }
+
+ init_vbt_defaults(dev_priv);
+
++ /* If the OpRegion does not have VBT, look in PCI ROM. */
+ if (!vbt) {
+ size_t size;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+- return -1;
++ goto out;
+
+ vbt = find_vbt(bios, size);
+- if (!vbt) {
+- pci_unmap_rom(pdev, bios);
+- return -1;
+- }
++ if (!vbt)
++ goto out;
+
+ DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+ }
+@@ -1504,10 +1503,12 @@ intel_bios_init(struct drm_i915_private
+ parse_mipi_sequence(dev_priv, bdb);
+ parse_ddi_ports(dev_priv, bdb);
+
++out:
++ if (!vbt)
++ DRM_INFO("Failed to find VBIOS tables (VBT)\n");
++
+ if (bios)
+ pci_unmap_rom(pdev, bios);
+-
+- return 0;
+ }
+
+ /**
--- /dev/null
+From bb1d132935c2f87cd261eb559759fe49d5e5dc43 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Fri, 10 Mar 2017 15:27:58 +0200
+Subject: drm/i915/vbt: split out defaults that are set when there is no VBT
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit bb1d132935c2f87cd261eb559759fe49d5e5dc43 upstream.
+
+The main thing are the DDI ports. If there's a VBT that says there are
+no outputs, we should trust that, and not have semi-random
+defaults. Unfortunately, the defaults have resulted in some Chromebooks
+without VBT to rely on this behaviour, so we split out the defaults for
+the missing VBT case.
+
+Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
+Cc: Manasi Navare <manasi.d.navare@intel.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/95c26079ff640d43f53b944f17e9fc356b36daec.1489152288.git.jani.nikula@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_bios.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1332,6 +1332,7 @@ parse_device_mapping(struct drm_i915_pri
+ return;
+ }
+
++/* Common defaults which may be overridden by VBT. */
+ static void
+ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ {
+@@ -1368,6 +1369,18 @@ init_vbt_defaults(struct drm_i915_privat
+ &dev_priv->vbt.ddi_port_info[port];
+
+ info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
++ }
++}
++
++/* Defaults to initialize only if there is no VBT. */
++static void
++init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
++{
++ enum port port;
++
++ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
++ struct ddi_vbt_port_info *info =
++ &dev_priv->vbt.ddi_port_info[port];
+
+ info->supports_dvi = (port != PORT_A && port != PORT_E);
+ info->supports_hdmi = info->supports_dvi;
+@@ -1504,8 +1517,10 @@ void intel_bios_init(struct drm_i915_pri
+ parse_ddi_ports(dev_priv, bdb);
+
+ out:
+- if (!vbt)
++ if (!vbt) {
+ DRM_INFO("Failed to find VBIOS tables (VBT)\n");
++ init_vbt_missing_defaults(dev_priv);
++ }
+
+ if (bios)
+ pci_unmap_rom(pdev, bios);
cpu-hotplug-drop-the-device-lock-on-error.patch
drivers-char-mem-fix-wraparound-check-to-allow-mappings-up-to-the-end.patch
serial-sh-sci-fix-panic-when-serial-console-and-dma-are-enabled.patch
+arm64-traps-fix-userspace-cache-maintenance-emulation-on-a-tagged-pointer.patch
+arm64-hw_breakpoint-fix-watchpoint-matching-for-tagged-pointers.patch
+arm64-entry-improve-data-abort-handling-of-tagged-pointers.patch
+arm-8636-1-cleanup-sanity_check_meminfo.patch
+arm-8637-1-adjust-memory-boundaries-after-reservations.patch
+tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
+usercopy-adjust-tests-to-deal-with-smap-pan.patch
+drm-i915-vbt-don-t-propagate-errors-from-intel_bios_init.patch
+drm-i915-vbt-split-out-defaults-that-are-set-when-there-is-no-vbt.patch
+cpufreq-schedutil-move-cached_raw_freq-to-struct-sugov_policy.patch
+cpufreq-schedutil-fix-per-cpu-structure-initialization-in-sugov_start.patch
--- /dev/null
+From e09e28671cda63e6308b31798b997639120e2a21 Mon Sep 17 00:00:00 2001
+From: Amey Telawane <ameyt@codeaurora.org>
+Date: Wed, 3 May 2017 15:41:14 +0530
+Subject: tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
+
+From: Amey Telawane <ameyt@codeaurora.org>
+
+commit e09e28671cda63e6308b31798b997639120e2a21 upstream.
+
+Strcpy is inherently not safe, and strlcpy() should be used instead.
+__trace_find_cmdline() uses strcpy() because the comms saved must have a
+terminating nul character, but it doesn't hurt to add the extra protection
+of using strlcpy() instead of strcpy().
+
+Link: http://lkml.kernel.org/r/1493806274-13936-1-git-send-email-amit.pundir@linaro.org
+
+Signed-off-by: Amey Telawane <ameyt@codeaurora.org>
+[AmitP: Cherry-picked this commit from CodeAurora kernel/msm-3.10
+https://source.codeaurora.org/quic/la/kernel/msm-3.10/commit/?id=2161ae9a70b12cf18ac8e5952a20161ffbccb477]
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+[ Updated change log and removed the "- 1" from len parameter ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1862,7 +1862,7 @@ static void __trace_find_cmdline(int pid
+
+ map = savedcmd->map_pid_to_cmdline[pid];
+ if (map != NO_CMDLINE_MAP)
+- strcpy(comm, get_saved_cmdlines(map));
++ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+ else
+ strcpy(comm, "<...>");
+ }
--- /dev/null
+From f5f893c57e37ca730808cb2eee3820abd05e7507 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Mon, 13 Feb 2017 11:25:26 -0800
+Subject: usercopy: Adjust tests to deal with SMAP/PAN
+
+From: Kees Cook <keescook@chromium.org>
+
+commit f5f893c57e37ca730808cb2eee3820abd05e7507 upstream.
+
+Under SMAP/PAN/etc, we cannot write directly to userspace memory, so
+this rearranges the test bytes to get written through copy_to_user().
+Additionally drops the bad copy_from_user() test that would trigger a
+memcpy() against userspace on failure.
+
+[arnd: the test module was added in 3.14, and this backported patch
+ should apply cleanly on all version from 3.14 to 4.10.
+ The original patch was in 4.11 on top of a context change
+ I saw the bug triggered with kselftest on a 4.4.y stable kernel]
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_user_copy.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/lib/test_user_copy.c
++++ b/lib/test_user_copy.c
+@@ -58,7 +58,9 @@ static int __init test_user_copy_init(vo
+ usermem = (char __user *)user_addr;
+ bad_usermem = (char *)user_addr;
+
+- /* Legitimate usage: none of these should fail. */
++ /*
++ * Legitimate usage: none of these copies should fail.
++ */
+ ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
+ "legitimate copy_from_user failed");
+ ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
+@@ -68,19 +70,33 @@ static int __init test_user_copy_init(vo
+ ret |= test(put_user(value, (unsigned long __user *)usermem),
+ "legitimate put_user failed");
+
+- /* Invalid usage: none of these should succeed. */
++ /*
++ * Invalid usage: none of these copies should succeed.
++ */
++
++ /* Reject kernel-to-kernel copies through copy_from_user(). */
+ ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+ PAGE_SIZE),
+ "illegal all-kernel copy_from_user passed");
++
++#if 0
++ /*
++ * When running with SMAP/PAN/etc, this will Oops the kernel
++ * due to the zeroing of userspace memory on failure. This needs
++ * to be tested in LKDTM instead, since this test module does not
++ * expect to explode.
++ */
+ ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
+ PAGE_SIZE),
+ "illegal reversed copy_from_user passed");
++#endif
+ ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+ PAGE_SIZE),
+ "illegal all-kernel copy_to_user passed");
+ ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
+ PAGE_SIZE),
+ "illegal reversed copy_to_user passed");
++
+ ret |= test(!get_user(value, (unsigned long __user *)kmem),
+ "illegal get_user passed");
+ ret |= test(!put_user(value, (unsigned long __user *)kmem),