--- /dev/null
+From 71630b7a832f699d6a6764ae75797e4e743ae348 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 6 Nov 2017 23:56:57 +0100
+Subject: ACPI / PM: Blacklist Low Power S0 Idle _DSM for Dell XPS13 9360
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 71630b7a832f699d6a6764ae75797e4e743ae348 upstream.
+
+At least one Dell XPS13 9360 is reported to have serious issues with
+the Low Power S0 Idle _DSM interface and since this machine model
+generally can do ACPI S3 just fine, add a blacklist entry to disable
+that interface for Dell XPS13 9360.
+
+Fixes: 8110dd281e15 (ACPI / sleep: EC-based wakeup from suspend-to-idle on recent systems)
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=196907
+Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/sleep.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const
+ return 0;
+ }
+
++static bool acpi_sleep_no_lps0;
++
++static int __init init_no_lps0(const struct dmi_system_id *d)
++{
++ acpi_sleep_no_lps0 = true;
++ return 0;
++}
++
+ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
+ {
+ .callback = init_old_suspend_ordering,
+@@ -343,6 +351,19 @@ static struct dmi_system_id acpisleep_dm
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
+ },
+ },
++ /*
++ * https://bugzilla.kernel.org/show_bug.cgi?id=196907
++ * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
++ * S0 Idle firmware interface.
++ */
++ {
++ .callback = init_no_lps0,
++ .ident = "Dell XPS13 9360",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
++ },
++ },
+ {},
+ };
+
+@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ #define acpi_target_sleep_state ACPI_STATE_S0
++#define acpi_sleep_no_lps0 (false)
+ static inline void acpi_sleep_dmi_check(void) {}
+ #endif /* CONFIG_ACPI_SLEEP */
+
+@@ -702,6 +724,12 @@ static int lps0_device_attach(struct acp
+ if (lps0_device_handle)
+ return 0;
+
++ if (acpi_sleep_no_lps0) {
++ acpi_handle_info(adev->handle,
++ "Low Power S0 Idle interface disabled\n");
++ return 0;
++ }
++
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return 0;
+
--- /dev/null
+From eb7f43c4adb4a789f99f53916182c3401b4e33c7 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 10 Aug 2017 00:34:23 +0200
+Subject: ACPI / scan: Enable GPEs before scanning the namespace
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit eb7f43c4adb4a789f99f53916182c3401b4e33c7 upstream.
+
+On some systems the platform firmware expects GPEs to be enabled
+before the enumeration of devices and if that expectation is not
+met, the systems in question may not boot in some situations.
+
+For this reason, change the initialization ordering of the ACPI
+subsystem to make it enable GPEs before scanning the namespace
+for the first time in order to enumerate devices.
+
+Reported-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Suggested-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Lv Zheng <lv.zheng@intel.com>
+Tested-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/scan.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -2058,6 +2058,9 @@ int __init acpi_scan_init(void)
+ acpi_get_spcr_uart_addr();
+ }
+
++ acpi_gpe_apply_masked_gpes();
++ acpi_update_all_gpes();
++
+ mutex_lock(&acpi_scan_lock);
+ /*
+ * Enumerate devices in the ACPI namespace.
+@@ -2082,9 +2085,6 @@ int __init acpi_scan_init(void)
+ }
+ }
+
+- acpi_gpe_apply_masked_gpes();
+- acpi_update_all_gpes();
+-
+ acpi_scan_initialized = true;
+
+ out:
--- /dev/null
+From ecc1165b8b743fd1503b9c799ae3a9933b89877b Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 10 Aug 2017 00:30:09 +0200
+Subject: ACPICA: Dispatch active GPEs at init time
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit ecc1165b8b743fd1503b9c799ae3a9933b89877b upstream.
+
+In some cases GPEs are already active when they are enabled by
+acpi_ev_initialize_gpe_block() and whatever happens next may depend
+on the result of handling the events signaled by them, so the
+events should not be discarded (which is what happens currently) and
+they should be handled as soon as reasonably possible.
+
+For this reason, modify acpi_ev_initialize_gpe_block() to
+dispatch GPEs with the status flag set in-band right after
+enabling them.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Tested-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpica/evgpeblk.c | 28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+--- a/drivers/acpi/acpica/evgpeblk.c
++++ b/drivers/acpi/acpica/evgpeblk.c
+@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi
+ void *ignored)
+ {
+ acpi_status status;
++ acpi_event_status event_status;
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 gpe_enabled_count;
+ u32 gpe_index;
++ u32 gpe_number;
+ u32 i;
+ u32 j;
+
+@@ -470,30 +472,38 @@ acpi_ev_initialize_gpe_block(struct acpi
+
+ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
+ gpe_event_info = &gpe_block->event_info[gpe_index];
++ gpe_number = gpe_block->block_base_number + gpe_index;
+
+ /*
+ * Ignore GPEs that have no corresponding _Lxx/_Exx method
+- * and GPEs that are used to wake the system
++ * and GPEs that are used for wakeup
+ */
+- if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+- ACPI_GPE_DISPATCH_NONE)
+- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+- ACPI_GPE_DISPATCH_HANDLER)
+- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+- ACPI_GPE_DISPATCH_RAW_HANDLER)
++ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
++ ACPI_GPE_DISPATCH_METHOD)
+ || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+ continue;
+ }
+
++ event_status = 0;
++ (void)acpi_hw_get_gpe_status(gpe_event_info,
++ &event_status);
++
+ status = acpi_ev_add_gpe_reference(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+- gpe_index +
+- gpe_block->block_base_number));
++ gpe_number));
+ continue;
+ }
+
++ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
++ ACPI_INFO(("GPE 0x%02X active on init",
++ gpe_number));
++ (void)acpi_ev_gpe_dispatch(gpe_block->node,
++ gpe_event_info,
++ gpe_number);
++ }
++
+ gpe_enabled_count++;
+ }
+ }
--- /dev/null
+From 1312b7e0caca44e7ff312bc2eaa888943384e3e1 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 10 Aug 2017 00:31:58 +0200
+Subject: ACPICA: Make it possible to enable runtime GPEs earlier
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 1312b7e0caca44e7ff312bc2eaa888943384e3e1 upstream.
+
+Runtime GPEs have corresponding _Lxx/_Exx methods and are enabled
+automatically during the initialization of the ACPI subsystem through
+acpi_update_all_gpes() with the assumption that acpi_setup_gpe_for_wake()
+will be called in advance for all of the GPEs pointed to by _PRW
+objects in the namespace that may be affected by acpi_update_all_gpes().
+That is, acpi_ev_initialize_gpe_block() can only be called for a GPE
+block after acpi_setup_gpe_for_wake() has been called for all of the
+_PRW (wakeup) GPEs in it.
+
+The platform firmware on some systems, however, expects GPEs to be
+enabled before the enumeration of devices which is when
+acpi_setup_gpe_for_wake() is called and that goes against the above
+assumption.
+
+For this reason, introduce a new flag to be set by
+acpi_ev_initialize_gpe_block() when automatically enabling a GPE
+to indicate to acpi_setup_gpe_for_wake() that it needs to drop the
+reference to the GPE coming from acpi_ev_initialize_gpe_block()
+and modify acpi_setup_gpe_for_wake() accordingly. These changes
+allow acpi_setup_gpe_for_wake() and acpi_ev_initialize_gpe_block()
+to be invoked in any order.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Tested-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/acpica/evgpeblk.c | 2 ++
+ drivers/acpi/acpica/evxfgpe.c | 8 ++++++++
+ include/acpi/actypes.h | 3 ++-
+ 3 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/acpica/evgpeblk.c
++++ b/drivers/acpi/acpica/evgpeblk.c
+@@ -496,6 +496,8 @@ acpi_ev_initialize_gpe_block(struct acpi
+ continue;
+ }
+
++ gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
++
+ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
+ ACPI_INFO(("GPE 0x%02X active on init",
+ gpe_number));
+--- a/drivers/acpi/acpica/evxfgpe.c
++++ b/drivers/acpi/acpica/evxfgpe.c
+@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake
+ */
+ gpe_event_info->flags =
+ (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
++ } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
++ /*
++ * A reference to this GPE has been added during the GPE block
++ * initialization, so drop it now to prevent the GPE from being
++ * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
++ */
++ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
++ gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
+ }
+
+ /*
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -775,7 +775,7 @@ typedef u32 acpi_event_status;
+ * | | | | +-- Type of dispatch:to method, handler, notify, or none
+ * | | | +----- Interrupt type: edge or level triggered
+ * | | +------- Is a Wake GPE
+- * | +--------- Is GPE masked by the software GPE masking mechanism
++ * | +--------- Has been enabled automatically at init time
+ * +------------ <Reserved>
+ */
+ #define ACPI_GPE_DISPATCH_NONE (u8) 0x00
+@@ -791,6 +791,7 @@ typedef u32 acpi_event_status;
+ #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
+
+ #define ACPI_GPE_CAN_WAKE (u8) 0x10
++#define ACPI_GPE_AUTO_ENABLED (u8) 0x20
+
+ /*
+ * Flags for GPE and Lock interfaces
--- /dev/null
+From 75ee94b20b46459e3d29f5ac2c3af3cebdeef777 Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Thu, 9 Nov 2017 08:48:08 +0800
+Subject: ALSA: hda - fix headset mic problem for Dell machines with alc274
+
+From: Hui Wang <hui.wang@canonical.com>
+
+commit 75ee94b20b46459e3d29f5ac2c3af3cebdeef777 upstream.
+
+Confirmed with Kailang of Realtek, the pin 0x19 is for Headset Mic, and
+the pin 0x1a is for Headphone Mic, he suggested to apply
+ALC269_FIXUP_DELL1_MIC_NO_PRESENCE to fix this problem. And we
+verified applying this FIXUP can fix this problem.
+
+Cc: Kailang Yang <kailang@realtek.com>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6527,6 +6527,11 @@ static const struct snd_hda_pin_quirk al
+ {0x14, 0x90170110},
+ {0x1b, 0x90a70130},
+ {0x21, 0x03211020}),
++ SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++ {0x12, 0xb7a60130},
++ {0x13, 0xb8a61140},
++ {0x16, 0x90170110},
++ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ {0x12, 0x90a60130},
+ {0x14, 0x90170110},
--- /dev/null
+From 3510c7aa069aa83a2de6dab2b41401a198317bdc Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 6 Nov 2017 20:16:50 +0100
+Subject: ALSA: seq: Avoid invalid lockdep class warning
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 3510c7aa069aa83a2de6dab2b41401a198317bdc upstream.
+
+The recent fix for adding rwsem nesting annotation was using the given
+"hop" argument as the lock subclass key. Although the idea itself
+works, it may trigger a kernel warning like:
+ BUG: looking up invalid subclass: 8
+ ....
+since the lockdep has a smaller number of subclasses (8) than we
+currently allow for the hops there (10).
+
+The current definition is merely a sanity check for avoiding the too
+deep delivery paths, and the 8 hops are already enough. So, as a
+quick fix, just follow the max hops as same as the max lockdep
+subclasses.
+
+Fixes: 1f20f9ff57ca ("ALSA: seq: Fix nested rwsem annotation for lockdep splat")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/seq_kernel.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/sound/seq_kernel.h
++++ b/include/sound/seq_kernel.h
+@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_
+ #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
+
+ /* max delivery path length */
+-#define SNDRV_SEQ_MAX_HOPS 10
++/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
++#define SNDRV_SEQ_MAX_HOPS 8
+
+ /* max size of event size */
+ #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
--- /dev/null
+From 132d358b183ac6ad8b3fea32ad5e0663456d18d1 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 7 Nov 2017 16:05:24 +0100
+Subject: ALSA: seq: Fix OSS sysex delivery in OSS emulation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 132d358b183ac6ad8b3fea32ad5e0663456d18d1 upstream.
+
+The SYSEX event delivery in OSS sequencer emulation assumed that the
+event is encoded in the variable-length data with the straight
+buffering. This was the normal behavior in the past, but during the
+development, the chained buffers were introduced for carrying more
+data, while the OSS code was left intact. As a result, when a SYSEX
+event with the chained buffer data is passed to OSS sequencer port,
+it may end up with the wrong memory access, as if it were having a too
+large buffer.
+
+This patch addresses the bug, by applying the buffer data expansion by
+the generic snd_seq_dump_var_event() helper function.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Reported-by: Mark Salyzyn <salyzyn@android.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/oss/seq_oss_midi.c | 4 +---
+ sound/core/seq/oss/seq_oss_readq.c | 29 +++++++++++++++++++++++++++++
+ sound/core/seq/oss/seq_oss_readq.h | 2 ++
+ 3 files changed, 32 insertions(+), 3 deletions(-)
+
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *
+ if (!dp->timer->running)
+ len = snd_seq_oss_timer_start(dp->timer);
+ if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
+- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
+- ev->data.ext.ptr, ev->data.ext.len);
++ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+ } else {
+ len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
+ if (len > 0)
+--- a/sound/core/seq/oss/seq_oss_readq.c
++++ b/sound/core/seq/oss/seq_oss_readq.c
+@@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_re
+ }
+
+ /*
++ * put MIDI sysex bytes; the event buffer may be chained, thus it has
++ * to be expanded via snd_seq_dump_var_event().
++ */
++struct readq_sysex_ctx {
++ struct seq_oss_readq *readq;
++ int dev;
++};
++
++static int readq_dump_sysex(void *ptr, void *buf, int count)
++{
++ struct readq_sysex_ctx *ctx = ptr;
++
++ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
++}
++
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++ struct snd_seq_event *ev)
++{
++ struct readq_sysex_ctx ctx = {
++ .readq = q,
++ .dev = dev
++ };
++
++ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
++ return 0;
++ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
++}
++
++/*
+ * copy an event to input queue:
+ * return zero if enqueued
+ */
+--- a/sound/core/seq/oss/seq_oss_readq.h
++++ b/sound/core/seq/oss/seq_oss_readq.h
+@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq
+ void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
+ unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
+ int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++ struct snd_seq_event *ev);
+ int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
+ int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
+ int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
--- /dev/null
+From 9b7d869ee5a77ed4a462372bb89af622e705bfb8 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sun, 5 Nov 2017 10:07:43 +0100
+Subject: ALSA: timer: Limit max instances per timer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 9b7d869ee5a77ed4a462372bb89af622e705bfb8 upstream.
+
+Currently we allow unlimited number of timer instances, and it may
+bring the system hogging way too much CPU when too many timer
+instances are opened and processed concurrently. This may end up with
+a soft-lockup report as triggered by syzkaller, especially when
+hrtimer backend is deployed.
+
+Since such insane number of instances aren't demanded by the normal
+use case of ALSA sequencer and it merely opens a risk only for abuse,
+this patch introduces the upper limit for the number of instances per
+timer backend. As default, it's set to 1000, but for the fine-grained
+timer like hrtimer, it's set to 100.
+
+Reported-by: syzbot
+Tested-by: Jérôme Glisse <jglisse@redhat.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/timer.h | 2 +
+ sound/core/hrtimer.c | 1
+ sound/core/timer.c | 67 ++++++++++++++++++++++++++++++++++++++++----------
+ 3 files changed, 57 insertions(+), 13 deletions(-)
+
+--- a/include/sound/timer.h
++++ b/include/sound/timer.h
+@@ -90,6 +90,8 @@ struct snd_timer {
+ struct list_head ack_list_head;
+ struct list_head sack_list_head; /* slow ack list head */
+ struct tasklet_struct task_queue;
++ int max_instances; /* upper limit of timer instances */
++ int num_instances; /* current number of timer instances */
+ };
+
+ struct snd_timer_instance {
+--- a/sound/core/hrtimer.c
++++ b/sound/core/hrtimer.c
+@@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void)
+ timer->hw = hrtimer_hw;
+ timer->hw.resolution = resolution;
+ timer->hw.ticks = NANO_SEC / resolution;
++ timer->max_instances = 100; /* lower the limit */
+
+ err = snd_timer_global_register(timer);
+ if (err < 0) {
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -180,7 +180,7 @@ static void snd_timer_request(struct snd
+ *
+ * call this with register_mutex down.
+ */
+-static void snd_timer_check_slave(struct snd_timer_instance *slave)
++static int snd_timer_check_slave(struct snd_timer_instance *slave)
+ {
+ struct snd_timer *timer;
+ struct snd_timer_instance *master;
+@@ -190,16 +190,21 @@ static void snd_timer_check_slave(struct
+ list_for_each_entry(master, &timer->open_list_head, open_list) {
+ if (slave->slave_class == master->slave_class &&
+ slave->slave_id == master->slave_id) {
++ if (master->timer->num_instances >=
++ master->timer->max_instances)
++ return -EBUSY;
+ list_move_tail(&slave->open_list,
+ &master->slave_list_head);
++ master->timer->num_instances++;
+ spin_lock_irq(&slave_active_lock);
+ slave->master = master;
+ slave->timer = master->timer;
+ spin_unlock_irq(&slave_active_lock);
+- return;
++ return 0;
+ }
+ }
+ }
++ return 0;
+ }
+
+ /*
+@@ -208,7 +213,7 @@ static void snd_timer_check_slave(struct
+ *
+ * call this with register_mutex down.
+ */
+-static void snd_timer_check_master(struct snd_timer_instance *master)
++static int snd_timer_check_master(struct snd_timer_instance *master)
+ {
+ struct snd_timer_instance *slave, *tmp;
+
+@@ -216,7 +221,11 @@ static void snd_timer_check_master(struc
+ list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
+ if (slave->slave_class == master->slave_class &&
+ slave->slave_id == master->slave_id) {
++ if (master->timer->num_instances >=
++ master->timer->max_instances)
++ return -EBUSY;
+ list_move_tail(&slave->open_list, &master->slave_list_head);
++ master->timer->num_instances++;
+ spin_lock_irq(&slave_active_lock);
+ spin_lock(&master->timer->lock);
+ slave->master = master;
+@@ -228,8 +237,11 @@ static void snd_timer_check_master(struc
+ spin_unlock_irq(&slave_active_lock);
+ }
+ }
++ return 0;
+ }
+
++static int snd_timer_close_locked(struct snd_timer_instance *timeri);
++
+ /*
+ * open a timer instance
+ * when opening a master, the slave id must be here given.
+@@ -240,6 +252,7 @@ int snd_timer_open(struct snd_timer_inst
+ {
+ struct snd_timer *timer;
+ struct snd_timer_instance *timeri = NULL;
++ int err;
+
+ if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
+ /* open a slave instance */
+@@ -259,10 +272,14 @@ int snd_timer_open(struct snd_timer_inst
+ timeri->slave_id = tid->device;
+ timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
+ list_add_tail(&timeri->open_list, &snd_timer_slave_list);
+- snd_timer_check_slave(timeri);
++ err = snd_timer_check_slave(timeri);
++ if (err < 0) {
++ snd_timer_close_locked(timeri);
++ timeri = NULL;
++ }
+ mutex_unlock(®ister_mutex);
+ *ti = timeri;
+- return 0;
++ return err;
+ }
+
+ /* open a master instance */
+@@ -288,6 +305,10 @@ int snd_timer_open(struct snd_timer_inst
+ return -EBUSY;
+ }
+ }
++ if (timer->num_instances >= timer->max_instances) {
++ mutex_unlock(®ister_mutex);
++ return -EBUSY;
++ }
+ timeri = snd_timer_instance_new(owner, timer);
+ if (!timeri) {
+ mutex_unlock(®ister_mutex);
+@@ -314,25 +335,27 @@ int snd_timer_open(struct snd_timer_inst
+ }
+
+ list_add_tail(&timeri->open_list, &timer->open_list_head);
+- snd_timer_check_master(timeri);
++ timer->num_instances++;
++ err = snd_timer_check_master(timeri);
++ if (err < 0) {
++ snd_timer_close_locked(timeri);
++ timeri = NULL;
++ }
+ mutex_unlock(®ister_mutex);
+ *ti = timeri;
+- return 0;
++ return err;
+ }
+ EXPORT_SYMBOL(snd_timer_open);
+
+ /*
+ * close a timer instance
++ * call this with register_mutex down.
+ */
+-int snd_timer_close(struct snd_timer_instance *timeri)
++static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+ {
+ struct snd_timer *timer = NULL;
+ struct snd_timer_instance *slave, *tmp;
+
+- if (snd_BUG_ON(!timeri))
+- return -ENXIO;
+-
+- mutex_lock(®ister_mutex);
+ list_del(&timeri->open_list);
+
+ /* force to stop the timer */
+@@ -340,6 +363,7 @@ int snd_timer_close(struct snd_timer_ins
+
+ timer = timeri->timer;
+ if (timer) {
++ timer->num_instances--;
+ /* wait, until the active callback is finished */
+ spin_lock_irq(&timer->lock);
+ while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
+@@ -355,6 +379,7 @@ int snd_timer_close(struct snd_timer_ins
+ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
+ open_list) {
+ list_move_tail(&slave->open_list, &snd_timer_slave_list);
++ timer->num_instances--;
+ slave->master = NULL;
+ slave->timer = NULL;
+ list_del_init(&slave->ack_list);
+@@ -382,9 +407,24 @@ int snd_timer_close(struct snd_timer_ins
+ module_put(timer->module);
+ }
+
+- mutex_unlock(®ister_mutex);
+ return 0;
+ }
++
++/*
++ * close a timer instance
++ */
++int snd_timer_close(struct snd_timer_instance *timeri)
++{
++ int err;
++
++ if (snd_BUG_ON(!timeri))
++ return -ENXIO;
++
++ mutex_lock(®ister_mutex);
++ err = snd_timer_close_locked(timeri);
++ mutex_unlock(®ister_mutex);
++ return err;
++}
+ EXPORT_SYMBOL(snd_timer_close);
+
+ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
+@@ -855,6 +895,7 @@ int snd_timer_new(struct snd_card *card,
+ spin_lock_init(&timer->lock);
+ tasklet_init(&timer->task_queue, snd_timer_tasklet,
+ (unsigned long)timer);
++ timer->max_instances = 1000; /* default limit per timer */
+ if (card != NULL) {
+ timer->module = card->module;
+ err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
--- /dev/null
+From f5ce817951f38023588b2b8308beca79abe20507 Mon Sep 17 00:00:00 2001
+From: Jussi Laako <jussi@sonarnerd.net>
+Date: Wed, 1 Nov 2017 23:32:33 +0200
+Subject: ALSA: usb-audio: support new Amanero Combo384 firmware version
+
+From: Jussi Laako <jussi@sonarnerd.net>
+
+commit f5ce817951f38023588b2b8308beca79abe20507 upstream.
+
+Support DSD_U32_BE sample format on new Amanero Combo384 firmware
+version on older VID/PID.
+
+Fixes: 3eff682d765b ("ALSA: usb-audio: Support both DSD LE/BE Amanero firmware versions")
+Signed-off-by: Jussi Laako <jussi@sonarnerd.net>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/quirks.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(
+ case 0x199:
+ return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+ case 0x19b:
++ case 0x203:
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ default:
+ break;
--- /dev/null
+From b9dd05c7002ee0ca8b676428b2268c26399b5e31 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 2 Nov 2017 18:44:28 +0100
+Subject: ARM: 8720/1: ensure dump_instr() checks addr_limit
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit b9dd05c7002ee0ca8b676428b2268c26399b5e31 upstream.
+
+When CONFIG_DEBUG_USER is enabled, it's possible for a user to
+deliberately trigger dump_instr() with a chosen kernel address.
+
+Let's avoid problems resulting from this by using get_user() rather than
+__get_user(), ensuring that we don't erroneously access kernel memory.
+
+So that we can use the same code to dump user instructions and kernel
+instructions, the common dumping code is factored out to __dump_instr(),
+with the fs manipulated appropriately in dump_instr() around calls to
+this.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/traps.c | 28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, co
+ set_fs(fs);
+ }
+
+-static void dump_instr(const char *lvl, struct pt_regs *regs)
++static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ {
+ unsigned long addr = instruction_pointer(regs);
+ const int thumb = thumb_mode(regs);
+ const int width = thumb ? 4 : 8;
+- mm_segment_t fs;
+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ int i;
+
+ /*
+- * We need to switch to kernel mode so that we can use __get_user
+- * to safely read from kernel space. Note that we now dump the
+- * code first, just in case the backtrace kills us.
++ * Note that we now dump the code first, just in case the backtrace
++ * kills us.
+ */
+- fs = get_fs();
+- set_fs(KERNEL_DS);
+
+ for (i = -4; i < 1 + !!thumb; i++) {
+ unsigned int val, bad;
+
+ if (thumb)
+- bad = __get_user(val, &((u16 *)addr)[i]);
++ bad = get_user(val, &((u16 *)addr)[i]);
+ else
+- bad = __get_user(val, &((u32 *)addr)[i]);
++ bad = get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+@@ -188,8 +184,20 @@ static void dump_instr(const char *lvl,
+ }
+ }
+ printk("%sCode: %s\n", lvl, str);
++}
+
+- set_fs(fs);
++static void dump_instr(const char *lvl, struct pt_regs *regs)
++{
++ mm_segment_t fs;
++
++ if (!user_mode(regs)) {
++ fs = get_fs();
++ set_fs(KERNEL_DS);
++ __dump_instr(lvl, regs);
++ set_fs(fs);
++ } else {
++ __dump_instr(lvl, regs);
++ }
+ }
+
+ #ifdef CONFIG_ARM_UNWIND
--- /dev/null
+From 441f99c90497e15aa3ad1dbabd56187e29614348 Mon Sep 17 00:00:00 2001
+From: Romain Izard <romain.izard.pro@gmail.com>
+Date: Tue, 31 Oct 2017 15:42:35 +0100
+Subject: crypto: ccm - preserve the IV buffer
+
+From: Romain Izard <romain.izard.pro@gmail.com>
+
+commit 441f99c90497e15aa3ad1dbabd56187e29614348 upstream.
+
+The IV buffer used during CCM operations is used twice, during both the
+hashing step and the ciphering step.
+
+When using a hardware accelerator that updates the contents of the IV
+buffer at the end of ciphering operations, the value will be modified.
+In the decryption case, the subsequent setup of the hashing algorithm
+will interpret the updated IV instead of the original value, which can
+lead to out-of-bounds writes.
+
+Reuse the idata buffer, only used in the hashing step, to preserve the
+IV's value during the ciphering step in the decryption case.
+
+Signed-off-by: Romain Izard <romain.izard.pro@gmail.com>
+Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ccm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aea
+ unsigned int cryptlen = req->cryptlen;
+ u8 *authtag = pctx->auth_tag;
+ u8 *odata = pctx->odata;
+- u8 *iv = req->iv;
++ u8 *iv = pctx->idata;
+ int err;
+
+ cryptlen -= authsize;
+@@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aea
+ if (req->src != req->dst)
+ dst = pctx->dst;
+
++ memcpy(iv, req->iv, 16);
++
+ skcipher_request_set_tfm(skreq, ctx->ctr);
+ skcipher_request_set_callback(skreq, pctx->flags,
+ crypto_ccm_decrypt_done, req);
--- /dev/null
+From d041b557792c85677f17e08eee535eafbd6b9aa2 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Mon, 16 Oct 2017 18:51:31 +0300
+Subject: crypto: x86/sha1-mb - fix panic due to unaligned access
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit d041b557792c85677f17e08eee535eafbd6b9aa2 upstream.
+
+struct sha1_ctx_mgr allocated in sha1_mb_mod_init() via kzalloc()
+and later passed in sha1_mb_flusher_mgr_flush_avx2() function where
+instructions vmovdqa used to access the struct. vmovdqa requires
+16-bytes aligned argument, but nothing guarantees that struct
+sha1_ctx_mgr will have that alignment. Unaligned vmovdqa will
+generate GP fault.
+
+Fix this by replacing vmovdqa with vmovdqu which doesn't have alignment
+requirements.
+
+Fixes: 2249cbb53ead ("crypto: sha-mb - SHA1 multibuffer submit and flush routines for AVX2")
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+@@ -157,8 +157,8 @@ LABEL skip_ %I
+ .endr
+
+ # Find min length
+- vmovdqa _lens+0*16(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens+0*16(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+@@ -178,8 +178,8 @@ LABEL skip_ %I
+ vpsubd %xmm2, %xmm0, %xmm0
+ vpsubd %xmm2, %xmm1, %xmm1
+
+- vmovdqa %xmm0, _lens+0*16(state)
+- vmovdqa %xmm1, _lens+1*16(state)
++ vmovdqu %xmm0, _lens+0*16(state)
++ vmovdqu %xmm1, _lens+1*16(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
+ jc .return_null
+
+ # Find min length
+- vmovdqa _lens(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
--- /dev/null
+From 5dfeaac15f2b1abb5a53c9146041c7235eb9aa04 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Mon, 16 Oct 2017 18:51:30 +0300
+Subject: crypto: x86/sha256-mb - fix panic due to unaligned access
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 5dfeaac15f2b1abb5a53c9146041c7235eb9aa04 upstream.
+
+struct sha256_ctx_mgr allocated in sha256_mb_mod_init() via kzalloc()
+and later passed in sha256_mb_flusher_mgr_flush_avx2() function where
+instructions vmovdqa used to access the struct. vmovdqa requires
+16-bytes aligned argument, but nothing guarantees that struct
+sha256_ctx_mgr will have that alignment. Unaligned vmovdqa will
+generate GP fault.
+
+Fix this by replacing vmovdqa with vmovdqu which doesn't have alignment
+requirements.
+
+Fixes: a377c6b1876e ("crypto: sha256-mb - submit/flush routines for AVX2")
+Reported-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Tim Chen
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -155,8 +155,8 @@ LABEL skip_ %I
+ .endr
+
+ # Find min length
+- vmovdqa _lens+0*16(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens+0*16(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
+@@ -176,8 +176,8 @@ LABEL skip_ %I
+ vpsubd %xmm2, %xmm0, %xmm0
+ vpsubd %xmm2, %xmm1, %xmm1
+
+- vmovdqa %xmm0, _lens+0*16(state)
+- vmovdqa %xmm1, _lens+1*16(state)
++ vmovdqu %xmm0, _lens+0*16(state)
++ vmovdqu %xmm1, _lens+1*16(state)
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+ jc .return_null
+
+ # Find min length
+- vmovdqa _lens(state), %xmm0
+- vmovdqa _lens+1*16(state), %xmm1
++ vmovdqu _lens(state), %xmm0
++ vmovdqu _lens+1*16(state), %xmm1
+
+ vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
+ vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
--- /dev/null
+From 624f5ab8720b3371367327a822c267699c1823b8 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 7 Nov 2017 22:29:02 +0000
+Subject: KEYS: fix NULL pointer dereference during ASN.1 parsing [ver #2]
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 624f5ab8720b3371367327a822c267699c1823b8 upstream.
+
+syzkaller reported a NULL pointer dereference in asn1_ber_decoder(). It
+can be reproduced by the following command, assuming
+CONFIG_PKCS7_TEST_KEY=y:
+
+ keyctl add pkcs7_test desc '' @s
+
+The bug is that if the data buffer is empty, an integer underflow occurs
+in the following check:
+
+ if (unlikely(dp >= datalen - 1))
+ goto data_overrun_error;
+
+This results in the NULL data pointer being dereferenced.
+
+Fix it by checking for 'datalen - dp < 2' instead.
+
+Also fix the similar check for 'dp >= datalen - n' later in the same
+function. That one possibly could result in a buffer overread.
+
+The NULL pointer dereference was reproducible using the "pkcs7_test" key
+type but not the "asymmetric" key type because the "asymmetric" key type
+checks for a 0-length payload before calling into the ASN.1 decoder but
+the "pkcs7_test" key type does not.
+
+The bug report was:
+
+ BUG: unable to handle kernel NULL pointer dereference at (null)
+ IP: asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233
+ PGD 7b708067 P4D 7b708067 PUD 7b6ee067 PMD 0
+ Oops: 0000 [#1] SMP
+ Modules linked in:
+ CPU: 0 PID: 522 Comm: syz-executor1 Not tainted 4.14.0-rc8 #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.3-20171021_125229-anatol 04/01/2014
+ task: ffff9b6b3798c040 task.stack: ffff9b6b37970000
+ RIP: 0010:asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233
+ RSP: 0018:ffff9b6b37973c78 EFLAGS: 00010216
+ RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000000000021c
+ RDX: ffffffff814a04ed RSI: ffffb1524066e000 RDI: ffffffff910759e0
+ RBP: ffff9b6b37973d60 R08: 0000000000000001 R09: ffff9b6b3caa4180
+ R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000002
+ R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ FS: 00007f10ed1f2700(0000) GS:ffff9b6b3ea00000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000000 CR3: 000000007b6f3000 CR4: 00000000000006f0
+ Call Trace:
+ pkcs7_parse_message+0xee/0x240 crypto/asymmetric_keys/pkcs7_parser.c:139
+ verify_pkcs7_signature+0x33/0x180 certs/system_keyring.c:216
+ pkcs7_preparse+0x41/0x70 crypto/asymmetric_keys/pkcs7_key_type.c:63
+ key_create_or_update+0x180/0x530 security/keys/key.c:855
+ SYSC_add_key security/keys/keyctl.c:122 [inline]
+ SyS_add_key+0xbf/0x250 security/keys/keyctl.c:62
+ entry_SYSCALL_64_fastpath+0x1f/0xbe
+ RIP: 0033:0x4585c9
+ RSP: 002b:00007f10ed1f1bd8 EFLAGS: 00000216 ORIG_RAX: 00000000000000f8
+ RAX: ffffffffffffffda RBX: 00007f10ed1f2700 RCX: 00000000004585c9
+ RDX: 0000000020000000 RSI: 0000000020008ffb RDI: 0000000020008000
+ RBP: 0000000000000000 R08: ffffffffffffffff R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000216 R12: 00007fff1b2260ae
+ R13: 00007fff1b2260af R14: 00007f10ed1f2700 R15: 0000000000000000
+ Code: dd ca ff 48 8b 45 88 48 83 e8 01 4c 39 f0 0f 86 a8 07 00 00 e8 53 dd ca ff 49 8d 46 01 48 89 85 58 ff ff ff 48 8b 85 60 ff ff ff <42> 0f b6 0c 30 89 c8 88 8d 75 ff ff ff 83 e0 1f 89 8d 28 ff ff
+ RIP: asn1_ber_decoder+0x17f/0xe60 lib/asn1_decoder.c:233 RSP: ffff9b6b37973c78
+ CR2: 0000000000000000
+
+Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/asn1_decoder.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -228,7 +228,7 @@ next_op:
+ hdr = 2;
+
+ /* Extract a tag from the data */
+- if (unlikely(dp >= datalen - 1))
++ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ tag = data[dp++];
+ if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
+@@ -274,7 +274,7 @@ next_op:
+ int n = len - 0x80;
+ if (unlikely(n > 2))
+ goto length_too_long;
+- if (unlikely(dp >= datalen - n))
++ if (unlikely(n > datalen - dp))
+ goto data_overrun_error;
+ hdr += n;
+ for (len = 0; n > 0; n--) {
--- /dev/null
+From 6a6cba1d945a7511cdfaf338526871195e420762 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 31 Oct 2017 15:09:22 -0700
+Subject: MIPS: Fix CM region target definitions
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit 6a6cba1d945a7511cdfaf338526871195e420762 upstream.
+
+The default CM target field in the GCR_BASE register is encoded with 0
+meaning memory & 1 being reserved. However the definitions we use for
+those bits effectively get these two values backwards - likely because
+they were copied from the definitions for the CM regions where the
+target is encoded differently. This results in use setting up GCR_BASE
+with the reserved target value by default, rather than targeting memory
+as intended. Although we currently seem to get away with this it's not a
+great idea to rely upon.
+
+Fix this by changing our macros to match the documentated target values.
+
+The incorrect encoding became used as of commit 9f98f3dd0c51 ("MIPS: Add
+generic CM probe & access code") in the Linux v3.15 cycle, and was
+likely carried forwards from older but unused code introduced by
+commit 39b8d5254246 ("[MIPS] Add support for MIPS CMP platform.") in the
+v2.6.26 cycle.
+
+Fixes: 9f98f3dd0c51 ("MIPS: Add generic CM probe & access code")
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Reported-by: Matt Redfearn <matt.redfearn@mips.com>
+Reviewed-by: James Hogan <jhogan@kernel.org>
+Cc: Matt Redfearn <matt.redfearn@mips.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: <stable@vger.kernel.org> # v3.15+
+Patchwork: https://patchwork.linux-mips.org/patch/17562/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+[jhogan@kernel.org: Backported 3.15..4.13]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/mips-cm.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -240,8 +240,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
+ #define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
+ #define CM_GCR_BASE_CMDEFTGT_SHF 0
+ #define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
+-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
+-#define CM_GCR_BASE_CMDEFTGT_MEM 1
++#define CM_GCR_BASE_CMDEFTGT_MEM 0
++#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
+ #define CM_GCR_BASE_CMDEFTGT_IOCU0 2
+ #define CM_GCR_BASE_CMDEFTGT_IOCU1 3
+
netfilter-nat-revert-netfilter-nat-convert-nat-bysrc-hash-to-rhashtable.patch
netfilter-nft_set_hash-disable-fast_ops-for-2-len-keys.patch
+workqueue-fix-null-pointer-dereference.patch
+crypto-ccm-preserve-the-iv-buffer.patch
+crypto-x86-sha1-mb-fix-panic-due-to-unaligned-access.patch
+crypto-x86-sha256-mb-fix-panic-due-to-unaligned-access.patch
+keys-fix-null-pointer-dereference-during-asn.1-parsing.patch
+acpi-pm-blacklist-low-power-s0-idle-_dsm-for-dell-xps13-9360.patch
+acpica-dispatch-active-gpes-at-init-time.patch
+acpica-make-it-possible-to-enable-runtime-gpes-earlier.patch
+acpi-scan-enable-gpes-before-scanning-the-namespace.patch
+arm-8720-1-ensure-dump_instr-checks-addr_limit.patch
+alsa-timer-limit-max-instances-per-timer.patch
+alsa-usb-audio-support-new-amanero-combo384-firmware-version.patch
+alsa-hda-fix-headset-mic-problem-for-dell-machines-with-alc274.patch
+alsa-seq-fix-oss-sysex-delivery-in-oss-emulation.patch
+alsa-seq-avoid-invalid-lockdep-class-warning.patch
+mips-fix-cm-region-target-definitions.patch
--- /dev/null
+From cef572ad9bd7f85035ba8272e5352040e8be0152 Mon Sep 17 00:00:00 2001
+From: Li Bin <huawei.libin@huawei.com>
+Date: Sat, 28 Oct 2017 11:07:28 +0800
+Subject: workqueue: Fix NULL pointer dereference
+
+From: Li Bin <huawei.libin@huawei.com>
+
+commit cef572ad9bd7f85035ba8272e5352040e8be0152 upstream.
+
+When queue_work() is used in irq (not in task context), there is
+a potential case that trigger NULL pointer dereference.
+----------------------------------------------------------------
+worker_thread()
+|-spin_lock_irq()
+|-process_one_work()
+ |-worker->current_pwq = pwq
+ |-spin_unlock_irq()
+ |-worker->current_func(work)
+ |-spin_lock_irq()
+ |-worker->current_pwq = NULL
+|-spin_unlock_irq()
+
+ //interrupt here
+ |-irq_handler
+ |-__queue_work()
+ //assuming that the wq is draining
+ |-is_chained_work(wq)
+ |-current_wq_worker()
+ //Here, 'current' is the interrupted worker!
+ |-current->current_pwq is NULL here!
+|-schedule()
+----------------------------------------------------------------
+
+Avoid it by checking for task context in current_wq_worker(), and
+if not in task context, we shouldn't use the 'current' to check the
+condition.
+
+Reported-by: Xiaofei Tan <tanxiaofei@huawei.com>
+Signed-off-by: Li Bin <huawei.libin@huawei.com>
+Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Fixes: 8d03ecfe4718 ("workqueue: reimplement is_chained_work() using current_wq_worker()")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue_internal.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -9,6 +9,7 @@
+
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
++#include <linux/preempt.h>
+
+ struct worker_pool;
+
+@@ -59,7 +60,7 @@ struct worker {
+ */
+ static inline struct worker *current_wq_worker(void)
+ {
+- if (current->flags & PF_WQ_WORKER)
++ if (in_task() && (current->flags & PF_WQ_WORKER))
+ return kthread_data(current);
+ return NULL;
+ }