--- /dev/null
+From 3510c7aa069aa83a2de6dab2b41401a198317bdc Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 6 Nov 2017 20:16:50 +0100
+Subject: ALSA: seq: Avoid invalid lockdep class warning
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 3510c7aa069aa83a2de6dab2b41401a198317bdc upstream.
+
+The recent fix for adding rwsem nesting annotation was using the given
+"hop" argument as the lock subclass key. Although the idea itself
+works, it may trigger a kernel warning like:
+ BUG: looking up invalid subclass: 8
+ ....
+since the lockdep has a smaller number of subclasses (8) than we
+currently allow for the hops there (10).
+
+The current definition is merely a sanity check for avoiding the too
+deep delivery paths, and the 8 hops are already enough. So, as a
+quick fix, just follow the max hops as same as the max lockdep
+subclasses.
+
+Fixes: 1f20f9ff57ca ("ALSA: seq: Fix nested rwsem annotation for lockdep splat")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/seq_kernel.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/sound/seq_kernel.h
++++ b/include/sound/seq_kernel.h
+@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_
+ #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
+
+ /* max delivery path length */
+-#define SNDRV_SEQ_MAX_HOPS 10
++/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
++#define SNDRV_SEQ_MAX_HOPS 8
+
+ /* max size of event size */
+ #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
--- /dev/null
+From 132d358b183ac6ad8b3fea32ad5e0663456d18d1 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 7 Nov 2017 16:05:24 +0100
+Subject: ALSA: seq: Fix OSS sysex delivery in OSS emulation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 132d358b183ac6ad8b3fea32ad5e0663456d18d1 upstream.
+
+The SYSEX event delivery in OSS sequencer emulation assumed that the
+event is encoded in the variable-length data with the straight
+buffering. This was the normal behavior in the past, but during the
+development, the chained buffers were introduced for carrying more
+data, while the OSS code was left intact. As a result, when a SYSEX
+event with the chained buffer data is passed to OSS sequencer port,
+it may end up with the wrong memory access, as if it were having a too
+large buffer.
+
+This patch addresses the bug, by applying the buffer data expansion by
+the generic snd_seq_dump_var_event() helper function.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Reported-by: Mark Salyzyn <salyzyn@android.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/oss/seq_oss_midi.c | 4 +---
+ sound/core/seq/oss/seq_oss_readq.c | 29 +++++++++++++++++++++++++++++
+ sound/core/seq/oss/seq_oss_readq.h | 2 ++
+ 3 files changed, 32 insertions(+), 3 deletions(-)
+
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *
+ if (!dp->timer->running)
+ len = snd_seq_oss_timer_start(dp->timer);
+ if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
+- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
+- ev->data.ext.ptr, ev->data.ext.len);
++ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+ } else {
+ len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
+ if (len > 0)
+--- a/sound/core/seq/oss/seq_oss_readq.c
++++ b/sound/core/seq/oss/seq_oss_readq.c
+@@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_re
+ }
+
+ /*
++ * put MIDI sysex bytes; the event buffer may be chained, thus it has
++ * to be expanded via snd_seq_dump_var_event().
++ */
++struct readq_sysex_ctx {
++ struct seq_oss_readq *readq;
++ int dev;
++};
++
++static int readq_dump_sysex(void *ptr, void *buf, int count)
++{
++ struct readq_sysex_ctx *ctx = ptr;
++
++ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
++}
++
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++ struct snd_seq_event *ev)
++{
++ struct readq_sysex_ctx ctx = {
++ .readq = q,
++ .dev = dev
++ };
++
++ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
++ return 0;
++ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
++}
++
++/*
+ * copy an event to input queue:
+ * return zero if enqueued
+ */
+--- a/sound/core/seq/oss/seq_oss_readq.h
++++ b/sound/core/seq/oss/seq_oss_readq.h
+@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq
+ void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
+ unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
+ int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++ struct snd_seq_event *ev);
+ int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
+ int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
+ int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
--- /dev/null
+From b9dd05c7002ee0ca8b676428b2268c26399b5e31 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 2 Nov 2017 18:44:28 +0100
+Subject: ARM: 8720/1: ensure dump_instr() checks addr_limit
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit b9dd05c7002ee0ca8b676428b2268c26399b5e31 upstream.
+
+When CONFIG_DEBUG_USER is enabled, it's possible for a user to
+deliberately trigger dump_instr() with a chosen kernel address.
+
+Let's avoid problems resulting from this by using get_user() rather than
+__get_user(), ensuring that we don't erroneously access kernel memory.
+
+So that we can use the same code to dump user instructions and kernel
+instructions, the common dumping code is factored out to __dump_instr(),
+with the fs manipulated appropriately in dump_instr() around calls to
+this.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/traps.c | 28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -152,30 +152,26 @@ static void dump_mem(const char *lvl, co
+ set_fs(fs);
+ }
+
+-static void dump_instr(const char *lvl, struct pt_regs *regs)
++static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ {
+ unsigned long addr = instruction_pointer(regs);
+ const int thumb = thumb_mode(regs);
+ const int width = thumb ? 4 : 8;
+- mm_segment_t fs;
+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ int i;
+
+ /*
+- * We need to switch to kernel mode so that we can use __get_user
+- * to safely read from kernel space. Note that we now dump the
+- * code first, just in case the backtrace kills us.
++ * Note that we now dump the code first, just in case the backtrace
++ * kills us.
+ */
+- fs = get_fs();
+- set_fs(KERNEL_DS);
+
+ for (i = -4; i < 1 + !!thumb; i++) {
+ unsigned int val, bad;
+
+ if (thumb)
+- bad = __get_user(val, &((u16 *)addr)[i]);
++ bad = get_user(val, &((u16 *)addr)[i]);
+ else
+- bad = __get_user(val, &((u32 *)addr)[i]);
++ bad = get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+@@ -186,8 +182,20 @@ static void dump_instr(const char *lvl,
+ }
+ }
+ printk("%sCode: %s\n", lvl, str);
++}
+
+- set_fs(fs);
++static void dump_instr(const char *lvl, struct pt_regs *regs)
++{
++ mm_segment_t fs;
++
++ if (!user_mode(regs)) {
++ fs = get_fs();
++ set_fs(KERNEL_DS);
++ __dump_instr(lvl, regs);
++ set_fs(fs);
++ } else {
++ __dump_instr(lvl, regs);
++ }
+ }
+
+ #ifdef CONFIG_ARM_UNWIND
--- /dev/null
+From 441f99c90497e15aa3ad1dbabd56187e29614348 Mon Sep 17 00:00:00 2001
+From: Romain Izard <romain.izard.pro@gmail.com>
+Date: Tue, 31 Oct 2017 15:42:35 +0100
+Subject: crypto: ccm - preserve the IV buffer
+
+From: Romain Izard <romain.izard.pro@gmail.com>
+
+commit 441f99c90497e15aa3ad1dbabd56187e29614348 upstream.
+
+The IV buffer used during CCM operations is used twice, during both the
+hashing step and the ciphering step.
+
+When using a hardware accelerator that updates the contents of the IV
+buffer at the end of ciphering operations, the value will be modified.
+In the decryption case, the subsequent setup of the hashing algorithm
+will interpret the updated IV instead of the original value, which can
+lead to out-of-bounds writes.
+
+Reuse the idata buffer, only used in the hashing step, to preserve the
+IV's value during the ciphering step in the decryption case.
+
+Signed-off-by: Romain Izard <romain.izard.pro@gmail.com>
+Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ccm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -413,7 +413,7 @@ static int crypto_ccm_decrypt(struct aea
+ unsigned int cryptlen = req->cryptlen;
+ u8 *authtag = pctx->auth_tag;
+ u8 *odata = pctx->odata;
+- u8 *iv = req->iv;
++ u8 *iv = pctx->idata;
+ int err;
+
+ cryptlen -= authsize;
+@@ -429,6 +429,8 @@ static int crypto_ccm_decrypt(struct aea
+ if (req->src != req->dst)
+ dst = pctx->dst;
+
++ memcpy(iv, req->iv, 16);
++
+ skcipher_request_set_tfm(skreq, ctx->ctr);
+ skcipher_request_set_callback(skreq, pctx->flags,
+ crypto_ccm_decrypt_done, req);
--- /dev/null
+From d041b557792c85677f17e08eee535eafbd6b9aa2 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Mon, 16 Oct 2017 18:51:31 +0300
+Subject: crypto: x86/sha1-mb - fix panic due to unaligned access
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit d041b557792c85677f17e08eee535eafbd6b9aa2 upstream.
+
+struct sha1_ctx_mgr allocated in sha1_mb_mod_init() via kzalloc()
+and later passed in sha1_mb_flusher_mgr_flush_avx2() function where
+instructions vmovdqa used to access the struct. vmovdqa requires
+16-bytes aligned argument, but nothing guarantees that struct
+sha1_ctx_mgr will have that alignment. Unaligned vmovdqa will
+generate GP fault.
+
+Fix this by replacing vmovdqa with vmovdqu which doesn't have alignment
+requirements.
+
+Fixes: 2249cbb53ead ("crypto: sha-mb - SHA1 multibuffer submit and flush routines for AVX2")
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+@@ -157,8 +157,8 @@ LABEL skip_ %I
+ .endr
+
+ # Find min length
+- vmovdqa _lens+0*16(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens+0*16(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+@@ -178,8 +178,8 @@ LABEL skip_ %I
+ vpsubd %xmm2, %xmm0, %xmm0
+ vpsubd %xmm2, %xmm1, %xmm1
+
+- vmovdqa %xmm0, _lens+0*16(state)
+- vmovdqa %xmm1, _lens+1*16(state)
++ vmovdqu %xmm0, _lens+0*16(state)
++ vmovdqu %xmm1, _lens+1*16(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
+ jc .return_null
+
+ # Find min length
+- vmovdqa _lens(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
--- /dev/null
+From 5dfeaac15f2b1abb5a53c9146041c7235eb9aa04 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Mon, 16 Oct 2017 18:51:30 +0300
+Subject: crypto: x86/sha256-mb - fix panic due to unaligned access
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 5dfeaac15f2b1abb5a53c9146041c7235eb9aa04 upstream.
+
+struct sha256_ctx_mgr allocated in sha256_mb_mod_init() via kzalloc()
+and later passed in sha256_mb_flusher_mgr_flush_avx2() function where
+instructions vmovdqa used to access the struct. vmovdqa requires
+16-bytes aligned argument, but nothing guarantees that struct
+sha256_ctx_mgr will have that alignment. Unaligned vmovdqa will
+generate GP fault.
+
+Fix this by replacing vmovdqa with vmovdqu which doesn't have alignment
+requirements.
+
+Fixes: a377c6b1876e ("crypto: sha256-mb - submit/flush routines for AVX2")
+Reported-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Tim Chen
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -155,8 +155,8 @@ LABEL skip_ %I
+ .endr
+
+ # Find min length
+- vmovdqa _lens+0*16(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens+0*16(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+@@ -176,8 +176,8 @@ LABEL skip_ %I
+ vpsubd %xmm2, %xmm0, %xmm0
+ vpsubd %xmm2, %xmm1, %xmm1
+
+- vmovdqa %xmm0, _lens+0*16(state)
+- vmovdqa %xmm1, _lens+1*16(state)
++ vmovdqu %xmm0, _lens+0*16(state)
++ vmovdqu %xmm1, _lens+1*16(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+ jc .return_null
+
+ # Find min length
+- vmovdqa _lens(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
--- /dev/null
+From 8777b927b92cf5b6c29f9f9d3c737addea9ac8a7 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Thu, 19 Oct 2017 17:13:40 +0200
+Subject: drm/i915: Do not rely on wm preservation for ILK watermarks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+commit 8777b927b92cf5b6c29f9f9d3c737addea9ac8a7 upstream.
+
+The original intent was to preserve watermarks as much as possible
+in intel_pipe_wm.raw_wm, and put the validated ones in intel_pipe_wm.wm.
+
+It seems this approach is insufficient and we don't always preserve
+the raw watermarks, so just use the atomic iterator we're already using
+to get a const pointer to all bound planes on the crtc.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102373
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Acked-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20171019151341.4579-1-maarten.lankhorst@linux.intel.com
+(cherry picked from commit 28283f4f359cd7cfa9e65457bb98c507a2cd0cd0)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_drv.h | 1
+ drivers/gpu/drm/i915/intel_pm.c | 52 ++++++++++++++++-----------------------
+ 2 files changed, 22 insertions(+), 31 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -457,7 +457,6 @@ struct intel_crtc_scaler_state {
+
+ struct intel_pipe_wm {
+ struct intel_wm_level wm[5];
+- struct intel_wm_level raw_wm[5];
+ uint32_t linetime;
+ bool fbc_wm_enabled;
+ bool pipe_enabled;
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -27,6 +27,7 @@
+
+ #include <linux/cpufreq.h>
+ #include <drm/drm_plane_helper.h>
++#include <drm/drm_atomic_helper.h>
+ #include "i915_drv.h"
+ #include "intel_drv.h"
+ #include "../../../platform/x86/intel_ips.h"
+@@ -2017,9 +2018,9 @@ static void ilk_compute_wm_level(const s
+ const struct intel_crtc *intel_crtc,
+ int level,
+ struct intel_crtc_state *cstate,
+- struct intel_plane_state *pristate,
+- struct intel_plane_state *sprstate,
+- struct intel_plane_state *curstate,
++ const struct intel_plane_state *pristate,
++ const struct intel_plane_state *sprstate,
++ const struct intel_plane_state *curstate,
+ struct intel_wm_level *result)
+ {
+ uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+@@ -2341,28 +2342,24 @@ static int ilk_compute_pipe_wm(struct in
+ struct intel_pipe_wm *pipe_wm;
+ struct drm_device *dev = state->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+- struct intel_plane *intel_plane;
+- struct intel_plane_state *pristate = NULL;
+- struct intel_plane_state *sprstate = NULL;
+- struct intel_plane_state *curstate = NULL;
++ struct drm_plane *plane;
++ const struct drm_plane_state *plane_state;
++ const struct intel_plane_state *pristate = NULL;
++ const struct intel_plane_state *sprstate = NULL;
++ const struct intel_plane_state *curstate = NULL;
+ int level, max_level = ilk_wm_max_level(dev), usable_level;
+ struct ilk_wm_maximums max;
+
+ pipe_wm = &cstate->wm.ilk.optimal;
+
+- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+- struct intel_plane_state *ps;
++ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
++ const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
+
+- ps = intel_atomic_get_existing_plane_state(state,
+- intel_plane);
+- if (!ps)
+- continue;
+-
+- if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
++ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = ps;
+- else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
++ else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = ps;
+- else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
++ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ curstate = ps;
+ }
+
+@@ -2384,11 +2381,9 @@ static int ilk_compute_pipe_wm(struct in
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
+
+- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+- pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+-
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+- pipe_wm->wm[0] = pipe_wm->raw_wm[0];
++ ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
++ pristate, sprstate, curstate, &pipe_wm->wm[0]);
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+@@ -2398,8 +2393,8 @@ static int ilk_compute_pipe_wm(struct in
+
+ ilk_compute_wm_reg_maximums(dev, 1, &max);
+
+- for (level = 1; level <= max_level; level++) {
+- struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
++ for (level = 1; level <= usable_level; level++) {
++ struct intel_wm_level *wm = &pipe_wm->wm[level];
+
+ ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+ pristate, sprstate, curstate, wm);
+@@ -2409,13 +2404,10 @@ static int ilk_compute_pipe_wm(struct in
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+- if (level > usable_level)
+- continue;
+-
+- if (ilk_validate_wm_level(level, &max, wm))
+- pipe_wm->wm[level] = *wm;
+- else
+- usable_level = level;
++ if (!ilk_validate_wm_level(level, &max, wm)) {
++ memset(wm, 0, sizeof(*wm));
++ break;
++ }
+ }
+
+ return 0;
--- /dev/null
+From 624f5ab8720b3371367327a822c267699c1823b8 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 7 Nov 2017 22:29:02 +0000
+Subject: KEYS: fix NULL pointer dereference during ASN.1 parsing [ver #2]
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 624f5ab8720b3371367327a822c267699c1823b8 upstream.
+
+syzkaller reported a NULL pointer dereference in asn1_ber_decoder(). It
+can be reproduced by the following command, assuming
+CONFIG_PKCS7_TEST_KEY=y:
+
+ keyctl add pkcs7_test desc '' @s
+
+The bug is that if the data buffer is empty, an integer underflow occurs
+in the following check:
+
+ if (unlikely(dp >= datalen - 1))
+ goto data_overrun_error;
+
+This results in the NULL data pointer being dereferenced.
+
+Fix it by checking for 'datalen - dp < 2' instead.
+
+Also fix the similar check for 'dp >= datalen - n' later in the same
+function. That one possibly could result in a buffer overread.
+
+The NULL pointer dereference was reproducible using the "pkcs7_test" key
+type but not the "asymmetric" key type because the "asymmetric" key type
+checks for a 0-length payload before calling into the ASN.1 decoder but
+the "pkcs7_test" key type does not.
+
+The bug report was:
+
+ BUG: unable to handle kernel NULL pointer dereference at (null)
+ IP: asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233
+ PGD 7b708067 P4D 7b708067 PUD 7b6ee067 PMD 0
+ Oops: 0000 [#1] SMP
+ Modules linked in:
+ CPU: 0 PID: 522 Comm: syz-executor1 Not tainted 4.14.0-rc8 #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.3-20171021_125229-anatol 04/01/2014
+ task: ffff9b6b3798c040 task.stack: ffff9b6b37970000
+ RIP: 0010:asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233
+ RSP: 0018:ffff9b6b37973c78 EFLAGS: 00010216
+ RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000000000021c
+ RDX: ffffffff814a04ed RSI: ffffb1524066e000 RDI: ffffffff910759e0
+ RBP: ffff9b6b37973d60 R08: 0000000000000001 R09: ffff9b6b3caa4180
+ R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000002
+ R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ FS: 00007f10ed1f2700(0000) GS:ffff9b6b3ea00000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000000 CR3: 000000007b6f3000 CR4: 00000000000006f0
+ Call Trace:
+ pkcs7_parse_message+0xee/0x240 crypto/asymmetric_keys/pkcs7_parser.c:139
+ verify_pkcs7_signature+0x33/0x180 certs/system_keyring.c:216
+ pkcs7_preparse+0x41/0x70 crypto/asymmetric_keys/pkcs7_key_type.c:63
+ key_create_or_update+0x180/0x530 security/keys/key.c:855
+ SYSC_add_key security/keys/keyctl.c:122 [inline]
+ SyS_add_key+0xbf/0x250 security/keys/keyctl.c:62
+ entry_SYSCALL_64_fastpath+0x1f/0xbe
+ RIP: 0033:0x4585c9
+ RSP: 002b:00007f10ed1f1bd8 EFLAGS: 00000216 ORIG_RAX: 00000000000000f8
+ RAX: ffffffffffffffda RBX: 00007f10ed1f2700 RCX: 00000000004585c9
+ RDX: 0000000020000000 RSI: 0000000020008ffb RDI: 0000000020008000
+ RBP: 0000000000000000 R08: ffffffffffffffff R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000216 R12: 00007fff1b2260ae
+ R13: 00007fff1b2260af R14: 00007f10ed1f2700 R15: 0000000000000000
+ Code: dd ca ff 48 8b 45 88 48 83 e8 01 4c 39 f0 0f 86 a8 07 00 00 e8 53 dd ca ff 49 8d 46 01 48 89 85 58 ff ff ff 48 8b 85 60 ff ff ff <42> 0f b6 0c 30 89 c8 88 8d 75 ff ff ff 83 e0 1f 89 8d 28 ff ff
+ RIP: asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233 RSP: ffff9b6b37973c78
+ CR2: 0000000000000000
+
+Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/asn1_decoder.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -228,7 +228,7 @@ next_op:
+ hdr = 2;
+
+ /* Extract a tag from the data */
+- if (unlikely(dp >= datalen - 1))
++ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ tag = data[dp++];
+ if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
+@@ -274,7 +274,7 @@ next_op:
+ int n = len - 0x80;
+ if (unlikely(n > 2))
+ goto length_too_long;
+- if (unlikely(dp >= datalen - n))
++ if (unlikely(n > datalen - dp))
+ goto data_overrun_error;
+ hdr += n;
+ for (len = 0; n > 0; n--) {
--- /dev/null
+From 6a6cba1d945a7511cdfaf338526871195e420762 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 31 Oct 2017 15:09:22 -0700
+Subject: MIPS: Fix CM region target definitions
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit 6a6cba1d945a7511cdfaf338526871195e420762 upstream.
+
+The default CM target field in the GCR_BASE register is encoded with 0
+meaning memory & 1 being reserved. However the definitions we use for
+those bits effectively get these two values backwards - likely because
+they were copied from the definitions for the CM regions where the
+target is encoded differently. This results in use setting up GCR_BASE
+with the reserved target value by default, rather than targeting memory
+as intended. Although we currently seem to get away with this it's not a
+great idea to rely upon.
+
+Fix this by changing our macros to match the documentated target values.
+
+The incorrect encoding became used as of commit 9f98f3dd0c51 ("MIPS: Add
+generic CM probe & access code") in the Linux v3.15 cycle, and was
+likely carried forwards from older but unused code introduced by
+commit 39b8d5254246 ("[MIPS] Add support for MIPS CMP platform.") in the
+v2.6.26 cycle.
+
+Fixes: 9f98f3dd0c51 ("MIPS: Add generic CM probe & access code")
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Reported-by: Matt Redfearn <matt.redfearn@mips.com>
+Reviewed-by: James Hogan <jhogan@kernel.org>
+Cc: Matt Redfearn <matt.redfearn@mips.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: <stable@vger.kernel.org> # v3.15+
+Patchwork: https://patchwork.linux-mips.org/patch/17562/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+[jhogan@kernel.org: Backported 3.15..4.13]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/mips-cm.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -239,8 +239,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
+ #define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
+ #define CM_GCR_BASE_CMDEFTGT_SHF 0
+ #define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
+-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
+-#define CM_GCR_BASE_CMDEFTGT_MEM 1
++#define CM_GCR_BASE_CMDEFTGT_MEM 0
++#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
+ #define CM_GCR_BASE_CMDEFTGT_IOCU0 2
+ #define CM_GCR_BASE_CMDEFTGT_IOCU1 3
+
--- /dev/null
+From 6f542ebeaee0ee552a902ce3892220fc22c7ec8e Mon Sep 17 00:00:00 2001
+From: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
+Date: Thu, 3 Aug 2017 08:20:22 +0200
+Subject: MIPS: Fix race on setting and getting cpu_online_mask
+
+From: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
+
+commit 6f542ebeaee0ee552a902ce3892220fc22c7ec8e upstream.
+
+While testing cpu hoptlug (cpu down and up in loops) on kernel 4.4, it was
+observed that occasionally check for cpu online will fail in kernel/cpu.c,
+_cpu_up:
+
+https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/tree/kernel/cpu.c?h=v4.4.79#n485
+ 518 /* Arch-specific enabling code. */
+ 519 ret = __cpu_up(cpu, idle);
+ 520
+ 521 if (ret != 0)
+ 522 goto out_notify;
+ 523 BUG_ON(!cpu_online(cpu));
+
+Reason is race between start_secondary and _cpu_up. cpu_callin_map is set
+before cpu_online_mask. In __cpu_up, cpu_callin_map is waited for, but cpu
+online mask is not, resulting in race in which secondary processor started
+and set cpu_callin_map, but not yet set the online mask,resulting in above
+BUG being hit.
+
+Upstream differs in the area. cpu_online check is in bringup_wait_for_ap,
+which is after cpu reached AP_ONLINE_IDLE,where secondary passed its start
+function. Nonetheless, fix makes start_secondary safe and not depending on
+other locks throughout the code. It protects as well against cpu_online
+checks put in between sometimes in the future.
+
+Fix this by moving completion after all flags are set.
+
+Signed-off-by: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
+Cc: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/16925/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/smp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -371,9 +371,6 @@ asmlinkage void start_secondary(void)
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
+- complete(&cpu_running);
+- synchronise_count_slave(cpu);
+-
+ set_cpu_online(cpu, true);
+
+ set_cpu_sibling_map(cpu);
+@@ -381,6 +378,9 @@ asmlinkage void start_secondary(void)
+
+ calculate_cpu_foreign_map();
+
++ complete(&cpu_running);
++ synchronise_count_slave(cpu);
++
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
--- /dev/null
+From 77238e76b9156d28d86c1e31c00ed2960df0e4de Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <garsilva@embeddedor.com>
+Date: Tue, 31 Oct 2017 00:35:03 -0500
+Subject: MIPS: microMIPS: Fix incorrect mask in insn_table_MM
+
+From: Gustavo A. R. Silva <garsilva@embeddedor.com>
+
+commit 77238e76b9156d28d86c1e31c00ed2960df0e4de upstream.
+
+It seems that this is a typo error and the proper bit masking is
+"RT | RS" instead of "RS | RS".
+
+This issue was detected with the help of Coccinelle.
+
+Fixes: d6b3314b49e1 ("MIPS: uasm: Add lh uam instruction")
+Reported-by: Julia Lawall <julia.lawall@lip6.fr>
+Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Reviewed-by: James Hogan <jhogan@kernel.org>
+Patchwork: https://patchwork.linux-mips.org/patch/17551/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+[jhogan@kernel.org: Backported 3.16..4.12]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/uasm-micromips.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/mm/uasm-micromips.c
++++ b/arch/mips/mm/uasm-micromips.c
+@@ -80,7 +80,7 @@ static struct insn insn_table_MM[] = {
+ { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+ { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_ld, 0, 0 },
+- { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
++ { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+ { insn_lld, 0, 0 },
+ { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
--- /dev/null
+From 9e8c399a88f0b87e41a894911475ed2a8f8dff9e Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Wed, 27 Sep 2017 10:13:25 +0100
+Subject: MIPS: SMP: Fix deadlock & online race
+
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+
+commit 9e8c399a88f0b87e41a894911475ed2a8f8dff9e upstream.
+
+Commit 6f542ebeaee0 ("MIPS: Fix race on setting and getting
+cpu_online_mask") effectively reverted commit 8f46cca1e6c06 ("MIPS: SMP:
+Fix possibility of deadlock when bringing CPUs online") and thus has
+reinstated the possibility of deadlock.
+
+The commit was based on testing of kernel v4.4, where the CPU hotplug
+core code issued a BUG() if the starting CPU is not marked online when
+the boot CPU returns from __cpu_up. The commit fixes this race (in
+v4.4), but re-introduces the deadlock situation.
+
+As noted in the commit message, upstream differs in this area. Commit
+8df3e07e7f21f ("cpu/hotplug: Let upcoming cpu bring itself fully up")
+adds a completion event in the CPU hotplug core code, making this race
+impossible. However, people were unhappy with relying on the core code
+to do the right thing.
+
+To address the issues both commits were trying to fix, add a second
+completion event in the MIPS smp hotplug path. It removes the
+possibility of a race, since the MIPS smp hotplug code now synchronises
+both the boot and secondary CPUs before they return to the hotplug core
+code. It also addresses the deadlock by ensuring that the secondary CPU
+is not marked online before it's counters are synchronised.
+
+This fix should also be backported to fix the race condition introduced
+by the backport of commit 8f46cca1e6c06 ("MIPS: SMP: Fix possibility of
+deadlock when bringing CPUs online"), through really that race only
+existed before commit 8df3e07e7f21f ("cpu/hotplug: Let upcoming cpu
+bring itself fully up").
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Fixes: 6f542ebeaee0 ("MIPS: Fix race on setting and getting cpu_online_mask")
+CC: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
+Patchwork: https://patchwork.linux-mips.org/patch/17376/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+[jhogan@kernel.org: Backported 4.1..4.9]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/smp.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -68,6 +68,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+
++static DECLARE_COMPLETION(cpu_starting);
+ static DECLARE_COMPLETION(cpu_running);
+
+ /*
+@@ -371,6 +372,12 @@ asmlinkage void start_secondary(void)
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
++ /* Notify boot CPU that we're starting & ready to sync counters */
++ complete(&cpu_starting);
++
++ synchronise_count_slave(cpu);
++
++ /* The CPU is running and counters synchronised, now mark it online */
+ set_cpu_online(cpu, true);
+
+ set_cpu_sibling_map(cpu);
+@@ -378,8 +385,11 @@ asmlinkage void start_secondary(void)
+
+ calculate_cpu_foreign_map();
+
++ /*
++ * Notify boot CPU that we're up & online and it can safely return
++ * from __cpu_up
++ */
+ complete(&cpu_running);
+- synchronise_count_slave(cpu);
+
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+@@ -438,17 +448,17 @@ int __cpu_up(unsigned int cpu, struct ta
+ {
+ mp_ops->boot_secondary(cpu, tidle);
+
+- /*
+- * We must check for timeout here, as the CPU will not be marked
+- * online until the counters are synchronised.
+- */
+- if (!wait_for_completion_timeout(&cpu_running,
++ /* Wait for CPU to start and be ready to sync counters */
++ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ synchronise_count_master(cpu);
++
++ /* Wait for CPU to finish startup & mark itself online before return */
++ wait_for_completion(&cpu_running);
+ return 0;
+ }
+
--- /dev/null
+From a00eeede507c975087b7b8df8cf2c9f88ba285de Mon Sep 17 00:00:00 2001
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+Date: Fri, 4 Nov 2016 09:28:56 +0000
+Subject: MIPS: SMP: Use a completion event to signal CPU up
+
+From: Matt Redfearn <matt.redfearn@imgtec.com>
+
+commit a00eeede507c975087b7b8df8cf2c9f88ba285de upstream.
+
+If a secondary CPU failed to start, for any reason, the CPU requesting
+the secondary to start would get stuck in the loop waiting for the
+secondary to be present in the cpu_callin_map.
+
+Rather than that, use a completion event to signal that the secondary
+CPU has started and is waiting to synchronise counters.
+
+Since the CPU presence will no longer be marked in cpu_callin_map,
+remove the redundant test from arch_cpu_idle_dead().
+
+Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
+Cc: Maciej W. Rozycki <macro@imgtec.com>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
+Cc: Chris Metcalf <cmetcalf@mellanox.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Qais Yousef <qsyousef@gmail.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Patchwork: https://patchwork.linux-mips.org/patch/14502/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/process.c | 4 +---
+ arch/mips/kernel/smp.c | 15 +++++++++------
+ 2 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -50,9 +50,7 @@
+ #ifdef CONFIG_HOTPLUG_CPU
+ void arch_cpu_idle_dead(void)
+ {
+- /* What the heck is this check doing ? */
+- if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
+- play_dead();
++ play_dead();
+ }
+ #endif
+
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -68,6 +68,8 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+
++static DECLARE_COMPLETION(cpu_running);
++
+ /*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+@@ -369,7 +371,7 @@ asmlinkage void start_secondary(void)
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
+- cpumask_set_cpu(cpu, &cpu_callin_map);
++ complete(&cpu_running);
+ synchronise_count_slave(cpu);
+
+ set_cpu_online(cpu, true);
+@@ -430,7 +432,6 @@ void smp_prepare_boot_cpu(void)
+ {
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+- cpumask_set_cpu(0, &cpu_callin_map);
+ }
+
+ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+@@ -438,11 +439,13 @@ int __cpu_up(unsigned int cpu, struct ta
+ mp_ops->boot_secondary(cpu, tidle);
+
+ /*
+- * Trust is futile. We should really have timeouts ...
++ * We must check for timeout here, as the CPU will not be marked
++ * online until the counters are synchronised.
+ */
+- while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
+- udelay(100);
+- schedule();
++ if (!wait_for_completion_timeout(&cpu_running,
++ msecs_to_jiffies(1000))) {
++ pr_crit("CPU%u: failed to start\n", cpu);
++ return -EIO;
+ }
+
+ synchronise_count_master(cpu);
--- /dev/null
+From d313876925f3e7a480a02773fd333bcab9202d5e Mon Sep 17 00:00:00 2001
+From: Carlo Caione <carlo@endlessm.com>
+Date: Wed, 19 Apr 2017 22:36:39 +0200
+Subject: platform/x86: hp-wmi: Do not shadow error values
+
+From: Carlo Caione <carlo@endlessm.com>
+
+commit d313876925f3e7a480a02773fd333bcab9202d5e upstream.
+
+All the helper functions (i.e. hp_wmi_dock_state, hp_wmi_tablet_state,
+...) using hp_wmi_perform_query to perform an HP WMI query shadow the
+returned value in case of error.
+
+We return -EINVAL only when the HP WMI query returns a positive value
+(the specific error code) to not mix this up with the actual value
+returned by the helper function.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Carlo Caione <carlo@endlessm.com>
+Signed-off-by: Darren Hart (VMware) <dvhart@infradead.org>
+Cc: Philip Müller <philm@manjaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/hp-wmi.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -248,7 +248,7 @@ static int hp_wmi_display_state(void)
+ int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+ return state;
+ }
+
+@@ -258,7 +258,7 @@ static int hp_wmi_hddtemp_state(void)
+ int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+ return state;
+ }
+
+@@ -268,7 +268,7 @@ static int hp_wmi_als_state(void)
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+ return state;
+ }
+
+@@ -279,7 +279,7 @@ static int hp_wmi_dock_state(void)
+ sizeof(state), sizeof(state));
+
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+
+ return state & 0x1;
+ }
+@@ -290,7 +290,7 @@ static int hp_wmi_tablet_state(void)
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+
+ return (state & 0x4) ? 1 : 0;
+ }
+@@ -323,7 +323,7 @@ static int __init hp_wmi_enable_hotkeys(
+ int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
+ sizeof(value), 0);
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+ return 0;
+ }
+
+@@ -336,7 +336,7 @@ static int hp_wmi_set_block(void *data,
+ ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
+ &query, sizeof(query), 0);
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+ return 0;
+ }
+
+@@ -428,7 +428,7 @@ static int hp_wmi_post_code_state(void)
+ int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+ return state;
+ }
+
+@@ -494,7 +494,7 @@ static ssize_t set_als(struct device *de
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
+ sizeof(tmp), sizeof(tmp));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+
+ return count;
+ }
+@@ -515,7 +515,7 @@ static ssize_t set_postcode(struct devic
+ ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
+ sizeof(tmp), sizeof(tmp));
+ if (ret)
+- return -EINVAL;
++ return ret < 0 ? ret : -EINVAL;
+
+ return count;
+ }
--- /dev/null
+From c7dfc2facbd69dad89b75e13c608da709668dcd0 Mon Sep 17 00:00:00 2001
+From: Carlo Caione <carlo@endlessm.com>
+Date: Sun, 9 Apr 2017 15:56:07 +0200
+Subject: platform/x86: hp-wmi: Fix error value for hp_wmi_tablet_state
+
+From: Carlo Caione <carlo@endlessm.com>
+
+commit c7dfc2facbd69dad89b75e13c608da709668dcd0 upstream.
+
+hp_wmi_tablet_state() fails to return the correct error code when
+hp_wmi_perform_query() returns the HP WMI query specific error code
+that is a positive value.
+
+Signed-off-by: Carlo Caione <carlo@endlessm.com>
+Signed-off-by: Darren Hart (VMware) <dvhart@infradead.org>
+Cc: Philip Müller <philm@manjaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/hp-wmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -290,7 +290,7 @@ static int hp_wmi_tablet_state(void)
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (ret)
+- return ret;
++ return -EINVAL;
+
+ return (state & 0x4) ? 1 : 0;
+ }
cdc_ncm-set-ntb-format-again-after-altsetting-switch-for-huawei-devices.patch
keys-trusted-sanitize-all-key-material.patch
keys-trusted-fix-writing-past-end-of-buffer-in-trusted_read.patch
+platform-x86-hp-wmi-fix-error-value-for-hp_wmi_tablet_state.patch
+platform-x86-hp-wmi-do-not-shadow-error-values.patch
+x86-uaccess-sched-preempt-verify-access_ok-context.patch
+workqueue-fix-null-pointer-dereference.patch
+crypto-ccm-preserve-the-iv-buffer.patch
+crypto-x86-sha1-mb-fix-panic-due-to-unaligned-access.patch
+crypto-x86-sha256-mb-fix-panic-due-to-unaligned-access.patch
+keys-fix-null-pointer-dereference-during-asn.1-parsing.patch
+arm-8720-1-ensure-dump_instr-checks-addr_limit.patch
+alsa-seq-fix-oss-sysex-delivery-in-oss-emulation.patch
+alsa-seq-avoid-invalid-lockdep-class-warning.patch
+drm-i915-do-not-rely-on-wm-preservation-for-ilk-watermarks.patch
+mips-micromips-fix-incorrect-mask-in-insn_table_mm.patch
+mips-fix-cm-region-target-definitions.patch
+mips-smp-use-a-completion-event-to-signal-cpu-up.patch
+mips-fix-race-on-setting-and-getting-cpu_online_mask.patch
+mips-smp-fix-deadlock-online-race.patch
--- /dev/null
+From cef572ad9bd7f85035ba8272e5352040e8be0152 Mon Sep 17 00:00:00 2001
+From: Li Bin <huawei.libin@huawei.com>
+Date: Sat, 28 Oct 2017 11:07:28 +0800
+Subject: workqueue: Fix NULL pointer dereference
+
+From: Li Bin <huawei.libin@huawei.com>
+
+commit cef572ad9bd7f85035ba8272e5352040e8be0152 upstream.
+
+When queue_work() is used in irq (not in task context), there is
+a potential case that trigger NULL pointer dereference.
+----------------------------------------------------------------
+worker_thread()
+|-spin_lock_irq()
+|-process_one_work()
+ |-worker->current_pwq = pwq
+ |-spin_unlock_irq()
+ |-worker->current_func(work)
+ |-spin_lock_irq()
+ |-worker->current_pwq = NULL
+|-spin_unlock_irq()
+
+ //interrupt here
+ |-irq_handler
+ |-__queue_work()
+ //assuming that the wq is draining
+ |-is_chained_work(wq)
+ |-current_wq_worker()
+ //Here, 'current' is the interrupted worker!
+ |-current->current_pwq is NULL here!
+|-schedule()
+----------------------------------------------------------------
+
+Avoid it by checking for task context in current_wq_worker(), and
+if not in task context, we shouldn't use the 'current' to check the
+condition.
+
+Reported-by: Xiaofei Tan <tanxiaofei@huawei.com>
+Signed-off-by: Li Bin <huawei.libin@huawei.com>
+Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Fixes: 8d03ecfe4718 ("workqueue: reimplement is_chained_work() using current_wq_worker()")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue_internal.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -9,6 +9,7 @@
+
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
++#include <linux/preempt.h>
+
+ struct worker_pool;
+
+@@ -59,7 +60,7 @@ struct worker {
+ */
+ static inline struct worker *current_wq_worker(void)
+ {
+- if (current->flags & PF_WQ_WORKER)
++ if (in_task() && (current->flags & PF_WQ_WORKER))
+ return kthread_data(current);
+ return NULL;
+ }
--- /dev/null
+From 7c4788950ba5922fde976d80b72baf46f14dee8d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 22 Nov 2016 10:57:15 +0100
+Subject: x86/uaccess, sched/preempt: Verify access_ok() context
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 7c4788950ba5922fde976d80b72baf46f14dee8d upstream.
+
+I recently encountered wreckage because access_ok() was used where it
+should not be, add an explicit WARN when access_ok() is used wrongly.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/uaccess.h | 13 +++++++++++--
+ include/linux/preempt.h | 21 +++++++++++++--------
+ 2 files changed, 24 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -68,6 +68,12 @@ static inline bool __chk_range_not_ok(un
+ __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
+ })
+
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
++#else
++# define WARN_ON_IN_IRQ()
++#endif
++
+ /**
+ * access_ok: - Checks if a user space pointer is valid
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
+@@ -88,8 +94,11 @@ static inline bool __chk_range_not_ok(un
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) \
+- likely(!__range_not_ok(addr, size, user_addr_max()))
++#define access_ok(type, addr, size) \
++({ \
++ WARN_ON_IN_IRQ(); \
++ likely(!__range_not_ok(addr, size, user_addr_max())); \
++})
+
+ /*
+ * These are the main single-value transfer routines. They automatically
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -65,19 +65,24 @@
+
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+- * Are we in a softirq context? Interrupt context?
+- * in_softirq - Are we currently processing softirq or have bh disabled?
+- * in_serving_softirq - Are we currently processing softirq?
++ *
++ * in_irq() - We're in (hard) IRQ context
++ * in_softirq() - We have BH disabled, or are processing softirqs
++ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
++ * in_serving_softirq() - We're in softirq context
++ * in_nmi() - We're in NMI context
++ * in_task() - We're in task context
++ *
++ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
++ * should not be used in new code.
+ */
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+ #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+-
+-/*
+- * Are we in NMI context?
+- */
+-#define in_nmi() (preempt_count() & NMI_MASK)
++#define in_nmi() (preempt_count() & NMI_MASK)
++#define in_task() (!(preempt_count() & \
++ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+
+ /*
+ * The preempt_count offset after preempt_disable();