--- /dev/null
+From f9f4872df6e1801572949f8a370c886122d4b6da Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Sat, 8 Oct 2016 12:42:38 -0700
+Subject: cpufreq: intel_pstate: Fix unsafe HWP MSR access
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit f9f4872df6e1801572949f8a370c886122d4b6da upstream.
+
+This is a requirement that MSR MSR_PM_ENABLE must be set to 0x01 before
+reading MSR_HWP_CAPABILITIES on a given CPU. If cpufreq init() is
+scheduled on a CPU which is not same as policy->cpu or migrates to a
+different CPU before calling msr read for MSR_HWP_CAPABILITIES, it
+is possible that MSR_PM_ENABLE was not to set to 0x01 on that CPU.
+This will cause GP fault. So like other places in this path
+rdmsrl_on_cpu should be used instead of rdmsrl.
+
+Moreover the scope of MSR_HWP_CAPABILITIES is on per thread basis, so it
+should be read from the same CPU, for which MSR MSR_HWP_REQUEST is
+getting set.
+
+dmesg dump or warning:
+
+[ 22.014488] WARNING: CPU: 139 PID: 1 at arch/x86/mm/extable.c:50 ex_handler_rdmsr_unsafe+0x68/0x70
+[ 22.014492] unchecked MSR access error: RDMSR from 0x771
+[ 22.014493] Modules linked in:
+[ 22.014507] CPU: 139 PID: 1 Comm: swapper/0 Not tainted 4.7.5+ #1
+...
+...
+[ 22.014516] Call Trace:
+[ 22.014542] [<ffffffff813d7dd1>] dump_stack+0x63/0x82
+[ 22.014558] [<ffffffff8107bc8b>] __warn+0xcb/0xf0
+[ 22.014561] [<ffffffff8107bcff>] warn_slowpath_fmt+0x4f/0x60
+[ 22.014563] [<ffffffff810676f8>] ex_handler_rdmsr_unsafe+0x68/0x70
+[ 22.014564] [<ffffffff810677d9>] fixup_exception+0x39/0x50
+[ 22.014604] [<ffffffff8102e400>] do_general_protection+0x80/0x150
+[ 22.014610] [<ffffffff817f9ec8>] general_protection+0x28/0x30
+[ 22.014635] [<ffffffff81687940>] ? get_target_pstate_use_performance+0xb0/0xb0
+[ 22.014642] [<ffffffff810600c7>] ? native_read_msr+0x7/0x40
+[ 22.014657] [<ffffffff81688123>] intel_pstate_hwp_set+0x23/0x130
+[ 22.014660] [<ffffffff81688406>] intel_pstate_set_policy+0x1b6/0x340
+[ 22.014662] [<ffffffff816829bb>] cpufreq_set_policy+0xeb/0x2c0
+[ 22.014664] [<ffffffff81682f39>] cpufreq_init_policy+0x79/0xe0
+[ 22.014666] [<ffffffff81682cb0>] ? cpufreq_update_policy+0x120/0x120
+[ 22.014669] [<ffffffff816833a6>] cpufreq_online+0x406/0x820
+[ 22.014671] [<ffffffff8168381f>] cpufreq_add_dev+0x5f/0x90
+[ 22.014717] [<ffffffff81530ac8>] subsys_interface_register+0xb8/0x100
+[ 22.014719] [<ffffffff816821bc>] cpufreq_register_driver+0x14c/0x210
+[ 22.014749] [<ffffffff81fe1d90>] intel_pstate_init+0x39d/0x4d5
+[ 22.014751] [<ffffffff81fe13f2>] ? cpufreq_gov_dbs_init+0x12/0x12
+
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -285,14 +285,14 @@ static void intel_pstate_hwp_set(void)
+ int min, hw_min, max, hw_max, cpu, range, adj_range;
+ u64 value, cap;
+
+- rdmsrl(MSR_HWP_CAPABILITIES, cap);
+- hw_min = HWP_LOWEST_PERF(cap);
+- hw_max = HWP_HIGHEST_PERF(cap);
+- range = hw_max - hw_min;
+-
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
++ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
++ hw_min = HWP_LOWEST_PERF(cap);
++ hw_max = HWP_HIGHEST_PERF(cap);
++ range = hw_max - hw_min;
++
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ adj_range = limits->min_perf_pct * range / 100;
+ min = hw_min + adj_range;
--- /dev/null
+From f8850abb7ba68229838014b3409460e576751c6d Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sun, 9 Oct 2016 11:12:34 +0200
+Subject: parisc: Fix kernel memory layout regarding position of __gp
+
+From: Helge Deller <deller@gmx.de>
+
+commit f8850abb7ba68229838014b3409460e576751c6d upstream.
+
+Architecturally we need to keep __gp below 0x1000000.
+
+But because of ftrace and tracepoint support, the RO_DATA_SECTION now gets much
+bigger than it was before. By moving the linkage tables before RO_DATA_SECTION
+we can avoid that __gp gets positioned at a too high address.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/vmlinux.lds.S | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -88,8 +88,9 @@ SECTIONS
+ /* Start of data section */
+ _sdata = .;
+
+- RO_DATA_SECTION(8)
+-
++ /* Architecturally we need to keep __gp below 0x1000000 and thus
++ * in front of RO_DATA_SECTION() which stores lots of tracepoint
++ * and ftrace symbols. */
+ #ifdef CONFIG_64BIT
+ . = ALIGN(16);
+ /* Linkage tables */
+@@ -104,6 +105,8 @@ SECTIONS
+ }
+ #endif
+
++ RO_DATA_SECTION(8)
++
+ /* unwind info */
+ .PARISC.unwind : {
+ __start___unwind = .;
--- /dev/null
+From 65bf34f59594c11f13d371c5334a6a0a385cd7ae Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sun, 9 Oct 2016 09:57:54 +0200
+Subject: parisc: Increase initial kernel mapping size
+
+From: Helge Deller <deller@gmx.de>
+
+commit 65bf34f59594c11f13d371c5334a6a0a385cd7ae upstream.
+
+Increase the initial kernel default page mapping size for 64-bit kernels to
+64 MB and for 32-bit kernels to 32 MB.
+
+Due to the additional support of ftrace, tracepoint and huge pages the kernel
+size can exceed the sizes we used up to now.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/pgtable.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -83,10 +83,10 @@ static inline void purge_tlb_entries(str
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+ /* This is the size of the initially mapped kernel memory */
+-#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
+-#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
++#if defined(CONFIG_64BIT)
++#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
+ #else
+-#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
++#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+ #endif
+ #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
+
--- /dev/null
+From 690d097c00c88fa9d93d198591e184164b1d8c20 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 7 Oct 2016 18:19:55 +0200
+Subject: parisc: Increase KERNEL_INITIAL_SIZE for 32-bit SMP kernels
+
+From: Helge Deller <deller@gmx.de>
+
+commit 690d097c00c88fa9d93d198591e184164b1d8c20 upstream.
+
+Increase the initial kernel default page mapping size for SMP kernels to 32MB
+and add a runtime check which panics early if the kernel is bigger than the
+initial mapping size.
+
+This fixes boot crashes of 32bit SMP kernels. Due to the introduction of huge
+page support in kernel 4.4 and it's required initial kernel layout in memory, a
+32bit SMP kernel usually got bigger (in layout, not size) than 16MB.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/pgtable.h | 2 +-
+ arch/parisc/kernel/setup.c | 8 ++++++++
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -83,7 +83,7 @@ static inline void purge_tlb_entries(str
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+ /* This is the size of the initially mapped kernel memory */
+-#ifdef CONFIG_64BIT
++#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
+ #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+ #else
+ #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -38,6 +38,7 @@
+ #include <linux/export.h>
+
+ #include <asm/processor.h>
++#include <asm/sections.h>
+ #include <asm/pdc.h>
+ #include <asm/led.h>
+ #include <asm/machdep.h> /* for pa7300lc_init() proto */
+@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ printk(KERN_CONT ".\n");
+
++ /*
++ * Check if initial kernel page mappings are sufficient.
++ * panic early if not, else we may access kernel functions
++ * and variables which can't be reached.
++ */
++ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
++ panic("KERNEL_INITIAL_ORDER too small!");
+
+ pdc_console_init();
+
--- /dev/null
+From d5a9bf0b38d2ac85c9a693c7fb851f74fd2a2494 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 8 Sep 2016 13:48:06 +0200
+Subject: pstore/core: drop cmpxchg based updates
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit d5a9bf0b38d2ac85c9a693c7fb851f74fd2a2494 upstream.
+
+I have here a FPGA behind PCIe which exports SRAM which I use for
+pstore. Now it seems that the FPGA no longer supports cmpxchg based
+updates and writes back 0xff…ff and returns the same. This leads to
+crash during crash rendering pstore useless.
+Since I doubt that there is much benefit from using cmpxchg() here, I am
+dropping this atomic access and use the spinlock based version.
+
+Cc: Anton Vorontsov <anton@enomsg.org>
+Cc: Colin Cross <ccross@android.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Rabin Vincent <rabinv@axis.com>
+Tested-by: Rabin Vincent <rabinv@axis.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+[kees: remove "_locked" suffix since it's the only option now]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 43 ++-----------------------------------------
+ 1 file changed, 2 insertions(+), 41 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -47,43 +47,10 @@ static inline size_t buffer_start(struct
+ return atomic_read(&prz->buffer->start);
+ }
+
+-/* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+- int old;
+- int new;
+-
+- do {
+- old = atomic_read(&prz->buffer->start);
+- new = old + a;
+- while (unlikely(new >= prz->buffer_size))
+- new -= prz->buffer_size;
+- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+-
+- return old;
+-}
+-
+-/* increase the size counter until it hits the max size */
+-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+- size_t old;
+- size_t new;
+-
+- if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+- return;
+-
+- do {
+- old = atomic_read(&prz->buffer->size);
+- new = old + a;
+- if (new > prz->buffer_size)
+- new = prz->buffer_size;
+- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+-}
+-
+ static DEFINE_RAW_SPINLOCK(buffer_lock);
+
+ /* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ int old;
+ int new;
+@@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(st
+ }
+
+ /* increase the size counter until it hits the max size */
+-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ size_t old;
+ size_t new;
+@@ -124,9 +91,6 @@ exit:
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ }
+
+-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+-
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ uint8_t *data, size_t len, uint8_t *ecc)
+ {
+@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_a
+ return NULL;
+ }
+
+- buffer_start_add = buffer_start_add_locked;
+- buffer_size_add = buffer_size_add_locked;
+-
+ if (memtype)
+ va = ioremap(start, size);
+ else
--- /dev/null
+From d771fdf94180de2bd811ac90cba75f0f346abf8d Mon Sep 17 00:00:00 2001
+From: Andrew Bresticker <abrestic@chromium.org>
+Date: Mon, 15 Feb 2016 09:19:49 +0100
+Subject: pstore/ram: Use memcpy_fromio() to save old buffer
+
+From: Andrew Bresticker <abrestic@chromium.org>
+
+commit d771fdf94180de2bd811ac90cba75f0f346abf8d upstream.
+
+The ramoops buffer may be mapped as either I/O memory or uncached
+memory. On ARM64, this results in a device-type (strongly-ordered)
+mapping. Since unnaligned accesses to device-type memory will
+generate an alignment fault (regardless of whether or not strict
+alignment checking is enabled), it is not safe to use memcpy().
+memcpy_fromio() is guaranteed to only use aligned accesses, so use
+that instead.
+
+Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
+Signed-off-by: Enric Balletbo Serra <enric.balletbo@collabora.com>
+Reviewed-by: Puneet Kumar <puneetster@chromium.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -286,8 +286,8 @@ void persistent_ram_save_old(struct pers
+ }
+
+ prz->old_log_size = size;
+- memcpy(prz->old_log, &buffer->data[start], size - start);
+- memcpy(prz->old_log + size - start, &buffer->data[0], start);
++ memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
++ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
+ }
+
+ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
--- /dev/null
+From 7e75678d23167c2527e655658a8ef36a36c8b4d9 Mon Sep 17 00:00:00 2001
+From: Furquan Shaikh <furquan@google.com>
+Date: Mon, 15 Feb 2016 09:19:48 +0100
+Subject: pstore/ram: Use memcpy_toio instead of memcpy
+
+From: Furquan Shaikh <furquan@google.com>
+
+commit 7e75678d23167c2527e655658a8ef36a36c8b4d9 upstream.
+
+persistent_ram_update uses vmap / iomap based on whether the buffer is in
+memory region or reserved region. However, both map it as non-cacheable
+memory. For armv8 specifically, non-cacheable mapping requests use a
+memory type that has to be accessed aligned to the request size. memcpy()
+doesn't guarantee that.
+
+Signed-off-by: Furquan Shaikh <furquan@google.com>
+Signed-off-by: Enric Balletbo Serra <enric.balletbo@collabora.com>
+Reviewed-by: Aaron Durbin <adurbin@chromium.org>
+Reviewed-by: Olof Johansson <olofj@chromium.org>
+Tested-by: Furquan Shaikh <furquan@chromium.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -263,7 +263,7 @@ static void notrace persistent_ram_updat
+ const void *s, unsigned int start, unsigned int count)
+ {
+ struct persistent_ram_buffer *buffer = prz->buffer;
+- memcpy(buffer->data + start, s, count);
++ memcpy_toio(buffer->data + start, s, count);
+ persistent_ram_update_ecc(prz, start, count);
+ }
+
--- /dev/null
+From 4407de74df18ed405cc5998990004c813ccfdbde Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 8 Sep 2016 13:48:05 +0200
+Subject: pstore/ramoops: fixup driver removal
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 4407de74df18ed405cc5998990004c813ccfdbde upstream.
+
+A basic rmmod ramoops segfaults. Let's see why.
+
+Since commit 34f0ec82e0a9 ("pstore: Correct the max_dump_cnt clearing of
+ramoops") sets ->max_dump_cnt to zero before looping over ->przs but we
+didn't use it before that either.
+
+And since commit ee1d267423a1 ("pstore: add pstore unregister") we free
+that memory on rmmod.
+
+But even then, we looped until a NULL pointer or ERR. I don't see where
+it is ensured that the last member is NULL. Let's try this instead:
+simply error recovery and free. Clean up in error case where resources
+were allocated. And then, in the free path, rely on ->max_dump_cnt in
+the free path.
+
+Cc: Anton Vorontsov <anton@enomsg.org>
+Cc: Colin Cross <ccross@android.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -375,13 +375,14 @@ static void ramoops_free_przs(struct ram
+ {
+ int i;
+
+- cxt->max_dump_cnt = 0;
+ if (!cxt->przs)
+ return;
+
+- for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
++ for (i = 0; i < cxt->max_dump_cnt; i++)
+ persistent_ram_free(cxt->przs[i]);
++
+ kfree(cxt->przs);
++ cxt->max_dump_cnt = 0;
+ }
+
+ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+@@ -406,7 +407,7 @@ static int ramoops_init_przs(struct devi
+ GFP_KERNEL);
+ if (!cxt->przs) {
+ dev_err(dev, "failed to initialize a prz array for dumps\n");
+- goto fail_prz;
++ goto fail_mem;
+ }
+
+ for (i = 0; i < cxt->max_dump_cnt; i++) {
+@@ -417,6 +418,11 @@ static int ramoops_init_przs(struct devi
+ err = PTR_ERR(cxt->przs[i]);
+ dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+ cxt->record_size, (unsigned long long)*paddr, err);
++
++ while (i > 0) {
++ i--;
++ persistent_ram_free(cxt->przs[i]);
++ }
+ goto fail_prz;
+ }
+ *paddr += cxt->record_size;
+@@ -424,7 +430,9 @@ static int ramoops_init_przs(struct devi
+
+ return 0;
+ fail_prz:
+- ramoops_free_przs(cxt);
++ kfree(cxt->przs);
++fail_mem:
++ cxt->max_dump_cnt = 0;
+ return err;
+ }
+
+@@ -583,7 +591,6 @@ static int ramoops_remove(struct platfor
+ struct ramoops_context *cxt = &oops_cxt;
+
+ pstore_unregister(&cxt->pstore);
+- cxt->max_dump_cnt = 0;
+
+ kfree(cxt->pstore.buf);
+ cxt->pstore.bufsize = 0;
mmc-sdhci-cast-unsigned-int-to-unsigned-long-long-to-avoid-unexpeted-error.patch
pci-mark-atheros-ar9580-to-avoid-bus-reset.patch
platform-don-t-return-0-from-platform_get_irq-on-error.patch
+cpufreq-intel_pstate-fix-unsafe-hwp-msr-access.patch
+parisc-increase-kernel_initial_size-for-32-bit-smp-kernels.patch
+parisc-fix-kernel-memory-layout-regarding-position-of-__gp.patch
+parisc-increase-initial-kernel-mapping-size.patch
+pstore-ramoops-fixup-driver-removal.patch
+pstore-core-drop-cmpxchg-based-updates.patch
+pstore-ram-use-memcpy_toio-instead-of-memcpy.patch
+pstore-ram-use-memcpy_fromio-to-save-old-buffer.patch