--- /dev/null
+From ddfd9dcf270ce23ed1985b66fcfa163920e2e1b8 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Fri, 3 Apr 2020 17:48:33 +0200
+Subject: ACPI: PM: Add acpi_[un]register_wakeup_handler()
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit ddfd9dcf270ce23ed1985b66fcfa163920e2e1b8 upstream.
+
+Since commit fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from
+waking up the system") the SCI triggering without there being a wakeup
+cause recognized by the ACPI sleep code will no longer wakeup the system.
+
+This works as intended, but this is a problem for devices where the SCI
+is shared with another device which is also a wakeup source.
+
+In the past these, from the pov of the ACPI sleep code, spurious SCIs
+would still cause a wakeup so the wakeup from the device sharing the
+interrupt would actually wakeup the system. This now no longer works.
+
+This is a problem on e.g. Bay Trail-T and Cherry Trail devices where
+some peripherals (typically the XHCI controller) can signal a
+Power Management Event (PME) to the Power Management Controller (PMC)
+to wakeup the system, this uses the same interrupt as the SCI.
+These wakeups are handled through a special INT0002 ACPI device which
+checks for events in the GPE0a_STS for this and takes care of acking
+the PME so that the shared interrupt stops triggering.
+
+The change to the ACPI sleep code to ignore the spurious SCI, causes
+the system to no longer wakeup on these PME events. To make things
+worse this means that the INT0002 device driver interrupt handler will
+no longer run, causing the PME to not get cleared and resulting in the
+system hanging. Trying to wakeup the system after such a PME through e.g.
+the power button no longer works.
+
+Add an acpi_register_wakeup_handler() function which registers
+a handler to be called from acpi_s2idle_wake() and when the handler
+returns true, return true from acpi_s2idle_wake().
+
+The INT0002 driver will use this mechanism to check the GPE0a_STS
+register from acpi_s2idle_wake() and to tell the system to wakeup
+if a PME is signaled in the register.
+
+Fixes: fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system")
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/sleep.c | 4 ++
+ drivers/acpi/sleep.h | 1
+ drivers/acpi/wakeup.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/acpi.h | 5 +++
+ 4 files changed, 91 insertions(+)
+
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void)
+ if (acpi_any_fixed_event_status_set())
+ return true;
+
++ /* Check wakeups from drivers sharing the SCI. */
++ if (acpi_check_wakeup_handlers())
++ return true;
++
+ /*
+ * If there are no EC events to process and at least one of the
+ * other enabled GPEs is active, the wakeup is regarded as a
+--- a/drivers/acpi/sleep.h
++++ b/drivers/acpi/sleep.h
+@@ -2,6 +2,7 @@
+
+ extern void acpi_enable_wakeup_devices(u8 sleep_state);
+ extern void acpi_disable_wakeup_devices(u8 sleep_state);
++extern bool acpi_check_wakeup_handlers(void);
+
+ extern struct list_head acpi_wakeup_device_list;
+ extern struct mutex acpi_device_lock;
+--- a/drivers/acpi/wakeup.c
++++ b/drivers/acpi/wakeup.c
+@@ -12,6 +12,15 @@
+ #include "internal.h"
+ #include "sleep.h"
+
++struct acpi_wakeup_handler {
++ struct list_head list_node;
++ bool (*wakeup)(void *context);
++ void *context;
++};
++
++static LIST_HEAD(acpi_wakeup_handler_head);
++static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
++
+ /*
+ * We didn't lock acpi_device_lock in the file, because it invokes oops in
+ * suspend/resume and isn't really required as this is called in S-state. At
+@@ -96,3 +105,75 @@ int __init acpi_wakeup_device_init(void)
+ mutex_unlock(&acpi_device_lock);
+ return 0;
+ }
++
++/**
++ * acpi_register_wakeup_handler - Register wakeup handler
++ * @wake_irq: The IRQ through which the device may receive wakeups
++ * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
++ * @context: Context to pass to the handler when calling it
++ *
++ * Drivers which may share an IRQ with the SCI can use this to register
++ * a handler which returns true when the device they are managing wants
++ * to trigger a wakeup.
++ */
++int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
++ void *context)
++{
++ struct acpi_wakeup_handler *handler;
++
++ /*
++ * If the device is not sharing its IRQ with the SCI, there is no
++ * need to register the handler.
++ */
++ if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
++ return 0;
++
++ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
++ if (!handler)
++ return -ENOMEM;
++
++ handler->wakeup = wakeup;
++ handler->context = context;
++
++ mutex_lock(&acpi_wakeup_handler_mutex);
++ list_add(&handler->list_node, &acpi_wakeup_handler_head);
++ mutex_unlock(&acpi_wakeup_handler_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
++
++/**
++ * acpi_unregister_wakeup_handler - Unregister wakeup handler
++ * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
++ * @context: Context passed to acpi_register_wakeup_handler()
++ */
++void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
++ void *context)
++{
++ struct acpi_wakeup_handler *handler;
++
++ mutex_lock(&acpi_wakeup_handler_mutex);
++ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
++ if (handler->wakeup == wakeup && handler->context == context) {
++ list_del(&handler->list_node);
++ kfree(handler);
++ break;
++ }
++ }
++ mutex_unlock(&acpi_wakeup_handler_mutex);
++}
++EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
++
++bool acpi_check_wakeup_handlers(void)
++{
++ struct acpi_wakeup_handler *handler;
++
++ /* No need to lock, nothing else is running when we're called. */
++ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
++ if (handler->wakeup(handler->context))
++ return true;
++ }
++
++ return false;
++}
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -473,6 +473,11 @@ void __init acpi_nvs_nosave_s3(void);
+ void __init acpi_sleep_no_blacklist(void);
+ #endif /* CONFIG_PM_SLEEP */
+
++int acpi_register_wakeup_handler(
++ int wake_irq, bool (*wakeup)(void *context), void *context);
++void acpi_unregister_wakeup_handler(
++ bool (*wakeup)(void *context), void *context);
++
+ struct acpi_osc_context {
+ char *uuid_str; /* UUID string */
+ int rev;
--- /dev/null
+From 4562fa4c86c92a2df635fe0697c9e06379738741 Mon Sep 17 00:00:00 2001
+From: Anson Huang <Anson.Huang@nxp.com>
+Date: Wed, 11 Dec 2019 10:53:36 +0800
+Subject: ARM: imx: Enable ARM_ERRATA_814220 for i.MX6UL and i.MX7D
+
+From: Anson Huang <Anson.Huang@nxp.com>
+
+commit 4562fa4c86c92a2df635fe0697c9e06379738741 upstream.
+
+ARM_ERRATA_814220 has below description:
+
+The v7 ARM states that all cache and branch predictor maintenance
+operations that do not specify an address execute, relative to
+each other, in program order.
+However, because of this erratum, an L2 set/way cache maintenance
+operation can overtake an L1 set/way cache maintenance operation.
+This ERRATA only affected the Cortex-A7 and present in r0p2, r0p3,
+r0p4, r0p5.
+
+i.MX6UL and i.MX7D have Cortex-A7 r0p5 inside, need to enable
+ARM_ERRATA_814220 for proper workaround.
+
+Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Cc: Christian Eggers <ceggers@arri.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-imx/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/mach-imx/Kconfig
++++ b/arch/arm/mach-imx/Kconfig
+@@ -520,6 +520,7 @@ config SOC_IMX6UL
+ bool "i.MX6 UltraLite support"
+ select PINCTRL_IMX6UL
+ select SOC_IMX6
++ select ARM_ERRATA_814220
+
+ help
+ This enables support for Freescale i.MX6 UltraLite processor.
+@@ -556,6 +557,7 @@ config SOC_IMX7D
+ select PINCTRL_IMX7D
+ select SOC_IMX7D_CA7 if ARCH_MULTI_V7
+ select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
++ select ARM_ERRATA_814220
+ help
+ This enables support for Freescale i.MX7 Dual processor.
+
--- /dev/null
+From c74067a0f776c1d695a713a4388c3b6a094ee40a Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 7 Jan 2020 22:51:39 +0100
+Subject: ARM: imx: only select ARM_ERRATA_814220 for ARMv7-A
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit c74067a0f776c1d695a713a4388c3b6a094ee40a upstream.
+
+i.MX7D is supported for either the v7-A or the v7-M cores,
+but the latter causes a warning:
+
+WARNING: unmet direct dependencies detected for ARM_ERRATA_814220
+ Depends on [n]: CPU_V7 [=n]
+ Selected by [y]:
+ - SOC_IMX7D [=y] && ARCH_MXC [=y] && (ARCH_MULTI_V7 [=n] || ARM_SINGLE_ARMV7M [=y])
+
+Make the select statement conditional.
+
+Fixes: 4562fa4c86c9 ("ARM: imx: Enable ARM_ERRATA_814220 for i.MX6UL and i.MX7D")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Cc: Christian Eggers <ceggers@arri.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-imx/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mach-imx/Kconfig
++++ b/arch/arm/mach-imx/Kconfig
+@@ -557,7 +557,7 @@ config SOC_IMX7D
+ select PINCTRL_IMX7D
+ select SOC_IMX7D_CA7 if ARCH_MULTI_V7
+ select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
+- select ARM_ERRATA_814220
++ select ARM_ERRATA_814220 if ARCH_MULTI_V7
+ help
+ This enables support for Freescale i.MX7 Dual processor.
+
--- /dev/null
+From 9401d5aa328e64617d87abd59af1c91cace4c3e4 Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Fri, 6 Mar 2020 23:29:27 +0100
+Subject: ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit 9401d5aa328e64617d87abd59af1c91cace4c3e4 upstream.
+
+The 4-bit divider value was written at offset 8, while the jz4740
+programming manual locates it at offset 0.
+
+Fixes: 26b0aad80a86 ("ASoC: jz4740: Add dynamic sampling rate support to jz4740-i2s")
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200306222931.39664-2-paul@crapouillou.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/jz4740/jz4740-i2s.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/jz4740/jz4740-i2s.c
++++ b/sound/soc/jz4740/jz4740-i2s.c
+@@ -83,7 +83,7 @@
+ #define JZ_AIC_I2S_STATUS_BUSY BIT(2)
+
+ #define JZ_AIC_CLK_DIV_MASK 0xf
+-#define I2SDIV_DV_SHIFT 8
++#define I2SDIV_DV_SHIFT 0
+ #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
+ #define I2SDIV_IDV_SHIFT 8
+ #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
--- /dev/null
+From b27a939e8376a3f1ed09b9c33ef44d20f18ec3d0 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 10 Feb 2020 22:51:08 +0100
+Subject: ceph: canonicalize server path in place
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit b27a939e8376a3f1ed09b9c33ef44d20f18ec3d0 upstream.
+
+syzbot reported that 4fbc0c711b24 ("ceph: remove the extra slashes in
+the server path") had caused a regression where an allocation could be
+done under a spinlock -- compare_mount_options() is called by sget_fc()
+with sb_lock held.
+
+We don't really need the supplied server path, so canonicalize it
+in place and compare it directly. To make this work, the leading
+slash is kept around and the logic in ceph_real_mount() to skip it
+is restored. CEPH_MSG_CLIENT_SESSION now reports the same (i.e.
+canonicalized) path, with the leading slash of course.
+
+Fixes: 4fbc0c711b24 ("ceph: remove the extra slashes in the server path")
+Reported-by: syzbot+98704a51af8e3d9425a9@syzkaller.appspotmail.com
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Luis Henriques <lhenriques@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/super.c | 118 ++++++++++++--------------------------------------------
+ fs/ceph/super.h | 2
+ 2 files changed, 28 insertions(+), 92 deletions(-)
+
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -214,6 +214,26 @@ static match_table_t fsopt_tokens = {
+ {-1, NULL}
+ };
+
++/*
++ * Remove adjacent slashes and then the trailing slash, unless it is
++ * the only remaining character.
++ *
++ * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
++ */
++static void canonicalize_path(char *path)
++{
++ int i, j = 0;
++
++ for (i = 0; path[i] != '\0'; i++) {
++ if (path[i] != '/' || j < 1 || path[j - 1] != '/')
++ path[j++] = path[i];
++ }
++
++ if (j > 1 && path[j - 1] == '/')
++ j--;
++ path[j] = '\0';
++}
++
+ static int parse_fsopt_token(char *c, void *private)
+ {
+ struct ceph_mount_options *fsopt = private;
+@@ -429,73 +449,6 @@ static int strcmp_null(const char *s1, c
+ return strcmp(s1, s2);
+ }
+
+-/**
+- * path_remove_extra_slash - Remove the extra slashes in the server path
+- * @server_path: the server path and could be NULL
+- *
+- * Return NULL if the path is NULL or only consists of "/", or a string
+- * without any extra slashes including the leading slash(es) and the
+- * slash(es) at the end of the server path, such as:
+- * "//dir1////dir2///" --> "dir1/dir2"
+- */
+-static char *path_remove_extra_slash(const char *server_path)
+-{
+- const char *path = server_path;
+- const char *cur, *end;
+- char *buf, *p;
+- int len;
+-
+- /* if the server path is omitted */
+- if (!path)
+- return NULL;
+-
+- /* remove all the leading slashes */
+- while (*path == '/')
+- path++;
+-
+- /* if the server path only consists of slashes */
+- if (*path == '\0')
+- return NULL;
+-
+- len = strlen(path);
+-
+- buf = kmalloc(len + 1, GFP_KERNEL);
+- if (!buf)
+- return ERR_PTR(-ENOMEM);
+-
+- end = path + len;
+- p = buf;
+- do {
+- cur = strchr(path, '/');
+- if (!cur)
+- cur = end;
+-
+- len = cur - path;
+-
+- /* including one '/' */
+- if (cur != end)
+- len += 1;
+-
+- memcpy(p, path, len);
+- p += len;
+-
+- while (cur <= end && *cur == '/')
+- cur++;
+- path = cur;
+- } while (path < end);
+-
+- *p = '\0';
+-
+- /*
+- * remove the last slash if there has and just to make sure that
+- * we will get something like "dir1/dir2"
+- */
+- if (*(--p) == '/')
+- *p = '\0';
+-
+- return buf;
+-}
+-
+ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
+ struct ceph_options *new_opt,
+ struct ceph_fs_client *fsc)
+@@ -503,7 +456,6 @@ static int compare_mount_options(struct
+ struct ceph_mount_options *fsopt1 = new_fsopt;
+ struct ceph_mount_options *fsopt2 = fsc->mount_options;
+ int ofs = offsetof(struct ceph_mount_options, snapdir_name);
+- char *p1, *p2;
+ int ret;
+
+ ret = memcmp(fsopt1, fsopt2, ofs);
+@@ -513,21 +465,12 @@ static int compare_mount_options(struct
+ ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
+ if (ret)
+ return ret;
++
+ ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
+ if (ret)
+ return ret;
+
+- p1 = path_remove_extra_slash(fsopt1->server_path);
+- if (IS_ERR(p1))
+- return PTR_ERR(p1);
+- p2 = path_remove_extra_slash(fsopt2->server_path);
+- if (IS_ERR(p2)) {
+- kfree(p1);
+- return PTR_ERR(p2);
+- }
+- ret = strcmp_null(p1, p2);
+- kfree(p1);
+- kfree(p2);
++ ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
+ if (ret)
+ return ret;
+
+@@ -595,6 +538,8 @@ static int parse_mount_options(struct ce
+ err = -ENOMEM;
+ goto out;
+ }
++
++ canonicalize_path(fsopt->server_path);
+ } else {
+ dev_name_end = dev_name + strlen(dev_name);
+ }
+@@ -1022,7 +967,9 @@ static struct dentry *ceph_real_mount(st
+ mutex_lock(&fsc->client->mount_mutex);
+
+ if (!fsc->sb->s_root) {
+- const char *path, *p;
++ const char *path = fsc->mount_options->server_path ?
++ fsc->mount_options->server_path + 1 : "";
++
+ err = __ceph_open_session(fsc->client, started);
+ if (err < 0)
+ goto out;
+@@ -1034,22 +981,11 @@ static struct dentry *ceph_real_mount(st
+ goto out;
+ }
+
+- p = path_remove_extra_slash(fsc->mount_options->server_path);
+- if (IS_ERR(p)) {
+- err = PTR_ERR(p);
+- goto out;
+- }
+- /* if the server path is omitted or just consists of '/' */
+- if (!p)
+- path = "";
+- else
+- path = p;
+ dout("mount opening path '%s'\n", path);
+
+ ceph_fs_debugfs_init(fsc);
+
+ root = open_root_dentry(fsc, path, started);
+- kfree(p);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto out;
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -92,7 +92,7 @@ struct ceph_mount_options {
+
+ char *snapdir_name; /* default ".snap" */
+ char *mds_namespace; /* default NULL */
+- char *server_path; /* default "/" */
++ char *server_path; /* default NULL (means "/") */
+ char *fscache_uniq; /* default NULL */
+ };
+
--- /dev/null
+From 4fbc0c711b2464ee1551850b85002faae0b775d5 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Fri, 20 Dec 2019 09:34:04 -0500
+Subject: ceph: remove the extra slashes in the server path
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit 4fbc0c711b2464ee1551850b85002faae0b775d5 upstream.
+
+It's possible to pass the mount helper a server path that has more
+than one contiguous slash character. For example:
+
+ $ mount -t ceph 192.168.195.165:40176:/// /mnt/cephfs/
+
+In the MDS server side the extra slashes of the server path will be
+treated as snap dir, and then we can get the following debug logs:
+
+ ceph: mount opening path //
+ ceph: open_root_inode opening '//'
+ ceph: fill_trace 0000000059b8a3bc is_dentry 0 is_target 1
+ ceph: alloc_inode 00000000dc4ca00b
+ ceph: get_inode created new inode 00000000dc4ca00b 1.ffffffffffffffff ino 1
+ ceph: get_inode on 1=1.ffffffffffffffff got 00000000dc4ca00b
+
+And then when creating any new file or directory under the mount
+point, we can hit the following BUG_ON in ceph_fill_trace():
+
+ BUG_ON(ceph_snap(dir) != dvino.snap);
+
+Have the client ignore the extra slashes in the server path when
+mounting. This will also canonicalize the path, so that identical mounts
+can be consilidated.
+
+1) "//mydir1///mydir//"
+2) "/mydir1/mydir"
+3) "/mydir1/mydir/"
+
+Regardless of the internal treatment of these paths, the kernel still
+stores the original string including the leading '/' for presentation
+to userland.
+
+URL: https://tracker.ceph.com/issues/42771
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Luis Henriques <lhenriques@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/super.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 101 insertions(+), 19 deletions(-)
+
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -106,7 +106,6 @@ static int ceph_statfs(struct dentry *de
+ return 0;
+ }
+
+-
+ static int ceph_sync_fs(struct super_block *sb, int wait)
+ {
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+@@ -430,6 +429,73 @@ static int strcmp_null(const char *s1, c
+ return strcmp(s1, s2);
+ }
+
++/**
++ * path_remove_extra_slash - Remove the extra slashes in the server path
++ * @server_path: the server path and could be NULL
++ *
++ * Return NULL if the path is NULL or only consists of "/", or a string
++ * without any extra slashes including the leading slash(es) and the
++ * slash(es) at the end of the server path, such as:
++ * "//dir1////dir2///" --> "dir1/dir2"
++ */
++static char *path_remove_extra_slash(const char *server_path)
++{
++ const char *path = server_path;
++ const char *cur, *end;
++ char *buf, *p;
++ int len;
++
++ /* if the server path is omitted */
++ if (!path)
++ return NULL;
++
++ /* remove all the leading slashes */
++ while (*path == '/')
++ path++;
++
++ /* if the server path only consists of slashes */
++ if (*path == '\0')
++ return NULL;
++
++ len = strlen(path);
++
++ buf = kmalloc(len + 1, GFP_KERNEL);
++ if (!buf)
++ return ERR_PTR(-ENOMEM);
++
++ end = path + len;
++ p = buf;
++ do {
++ cur = strchr(path, '/');
++ if (!cur)
++ cur = end;
++
++ len = cur - path;
++
++ /* including one '/' */
++ if (cur != end)
++ len += 1;
++
++ memcpy(p, path, len);
++ p += len;
++
++ while (cur <= end && *cur == '/')
++ cur++;
++ path = cur;
++ } while (path < end);
++
++ *p = '\0';
++
++ /*
++ * remove the last slash if there has and just to make sure that
++ * we will get something like "dir1/dir2"
++ */
++ if (*(--p) == '/')
++ *p = '\0';
++
++ return buf;
++}
++
+ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
+ struct ceph_options *new_opt,
+ struct ceph_fs_client *fsc)
+@@ -437,6 +503,7 @@ static int compare_mount_options(struct
+ struct ceph_mount_options *fsopt1 = new_fsopt;
+ struct ceph_mount_options *fsopt2 = fsc->mount_options;
+ int ofs = offsetof(struct ceph_mount_options, snapdir_name);
++ char *p1, *p2;
+ int ret;
+
+ ret = memcmp(fsopt1, fsopt2, ofs);
+@@ -449,9 +516,21 @@ static int compare_mount_options(struct
+ ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
+ if (ret)
+ return ret;
+- ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
++
++ p1 = path_remove_extra_slash(fsopt1->server_path);
++ if (IS_ERR(p1))
++ return PTR_ERR(p1);
++ p2 = path_remove_extra_slash(fsopt2->server_path);
++ if (IS_ERR(p2)) {
++ kfree(p1);
++ return PTR_ERR(p2);
++ }
++ ret = strcmp_null(p1, p2);
++ kfree(p1);
++ kfree(p2);
+ if (ret)
+ return ret;
++
+ ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
+ if (ret)
+ return ret;
+@@ -507,12 +586,14 @@ static int parse_mount_options(struct ce
+ */
+ dev_name_end = strchr(dev_name, '/');
+ if (dev_name_end) {
+- if (strlen(dev_name_end) > 1) {
+- fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+- if (!fsopt->server_path) {
+- err = -ENOMEM;
+- goto out;
+- }
++ /*
++ * The server_path will include the whole chars from userland
++ * including the leading '/'.
++ */
++ fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
++ if (!fsopt->server_path) {
++ err = -ENOMEM;
++ goto out;
+ }
+ } else {
+ dev_name_end = dev_name + strlen(dev_name);
+@@ -842,7 +923,6 @@ static void destroy_caches(void)
+ ceph_fscache_unregister();
+ }
+
+-
+ /*
+ * ceph_umount_begin - initiate forced umount. Tear down down the
+ * mount, skipping steps that may hang while waiting for server(s).
+@@ -929,9 +1009,6 @@ out:
+ return root;
+ }
+
+-
+-
+-
+ /*
+ * mount: join the ceph cluster, and open root directory.
+ */
+@@ -945,7 +1022,7 @@ static struct dentry *ceph_real_mount(st
+ mutex_lock(&fsc->client->mount_mutex);
+
+ if (!fsc->sb->s_root) {
+- const char *path;
++ const char *path, *p;
+ err = __ceph_open_session(fsc->client, started);
+ if (err < 0)
+ goto out;
+@@ -957,17 +1034,22 @@ static struct dentry *ceph_real_mount(st
+ goto out;
+ }
+
+- if (!fsc->mount_options->server_path) {
+- path = "";
+- dout("mount opening path \\t\n");
+- } else {
+- path = fsc->mount_options->server_path + 1;
+- dout("mount opening path %s\n", path);
++ p = path_remove_extra_slash(fsc->mount_options->server_path);
++ if (IS_ERR(p)) {
++ err = PTR_ERR(p);
++ goto out;
+ }
++ /* if the server path is omitted or just consists of '/' */
++ if (!p)
++ path = "";
++ else
++ path = p;
++ dout("mount opening path '%s'\n", path);
+
+ ceph_fs_debugfs_init(fsc);
+
+ root = open_root_dentry(fsc, path, started);
++ kfree(p);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto out;
--- /dev/null
+From sultan@kerneltoast.com Fri Apr 10 11:07:34 2020
+From: Sultan Alsawaf <sultan@kerneltoast.com>
+Date: Tue, 7 Apr 2020 00:18:09 -0700
+Subject: drm/i915: Fix ref->mutex deadlock in i915_active_wait()
+To: Greg KH <greg@kroah.com>
+Cc: stable@vger.kernel.org, Jani Nikula <jani.nikula@linux.intel.com>, Joonas Lahtinen <joonas.lahtinen@linux.intel.com>, Rodrigo Vivi <rodrigo.vivi@intel.com>, David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>, Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, Sultan Alsawaf <sultan@kerneltoast.com>
+Message-ID: <20200407071809.3148-1-sultan@kerneltoast.com>
+
+From: Sultan Alsawaf <sultan@kerneltoast.com>
+
+The following deadlock exists in i915_active_wait() due to a double lock
+on ref->mutex (call chain listed in order from top to bottom):
+ i915_active_wait();
+ mutex_lock_interruptible(&ref->mutex); <-- ref->mutex first acquired
+ i915_active_request_retire();
+ node_retire();
+ active_retire();
+ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); <-- DEADLOCK
+
+Fix the deadlock by skipping the second ref->mutex lock when
+active_retire() is called through i915_active_request_retire().
+
+Note that this bug only affects 5.4 and has since been fixed in 5.5.
+Normally, a backport of the fix from 5.5 would be in order, but the
+patch set that fixes this deadlock involves massive changes that are
+neither feasible nor desirable for backporting [1][2][3]. Therefore,
+this small patch was made to address the deadlock specifically for 5.4.
+
+[1] 274cbf20fd10 ("drm/i915: Push the i915_active.retire into a worker")
+[2] 093b92287363 ("drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree")
+[3] 750bde2fd4ff ("drm/i915: Serialise with remote retirement")
+
+Fixes: 12c255b5dad1 ("drm/i915: Provide an i915_active.acquire callback")
+Cc: <stable@vger.kernel.org> # 5.4.x
+Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_active.c | 27 +++++++++++++++++++++++----
+ drivers/gpu/drm/i915/i915_active.h | 4 ++--
+ 2 files changed, 25 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -120,13 +120,17 @@ static inline void debug_active_assert(s
+
+ #endif
+
++#define I915_ACTIVE_RETIRE_NOLOCK BIT(0)
++
+ static void
+ __active_retire(struct i915_active *ref)
+ {
+ struct active_node *it, *n;
+ struct rb_root root;
+ bool retire = false;
++ unsigned long bits;
+
++ ref = ptr_unpack_bits(ref, &bits, 2);
+ lockdep_assert_held(&ref->mutex);
+
+ /* return the unused nodes to our slabcache -- flushing the allocator */
+@@ -138,7 +142,8 @@ __active_retire(struct i915_active *ref)
+ retire = true;
+ }
+
+- mutex_unlock(&ref->mutex);
++ if (!(bits & I915_ACTIVE_RETIRE_NOLOCK))
++ mutex_unlock(&ref->mutex);
+ if (!retire)
+ return;
+
+@@ -155,13 +160,18 @@ __active_retire(struct i915_active *ref)
+ static void
+ active_retire(struct i915_active *ref)
+ {
++ struct i915_active *ref_packed = ref;
++ unsigned long bits;
++
++ ref = ptr_unpack_bits(ref, &bits, 2);
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ /* One active may be flushed from inside the acquire of another */
+- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+- __active_retire(ref);
++ if (!(bits & I915_ACTIVE_RETIRE_NOLOCK))
++ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
++ __active_retire(ref_packed);
+ }
+
+ static void
+@@ -170,6 +180,14 @@ node_retire(struct i915_active_request *
+ active_retire(node_from_active(base)->ref);
+ }
+
++static void
++node_retire_nolock(struct i915_active_request *base, struct i915_request *rq)
++{
++ struct i915_active *ref = node_from_active(base)->ref;
++
++ active_retire(ptr_pack_bits(ref, I915_ACTIVE_RETIRE_NOLOCK, 2));
++}
++
+ static struct i915_active_request *
+ active_instance(struct i915_active *ref, struct intel_timeline *tl)
+ {
+@@ -421,7 +439,8 @@ int i915_active_wait(struct i915_active
+ break;
+ }
+
+- err = i915_active_request_retire(&it->base, BKL(ref));
++ err = i915_active_request_retire(&it->base, BKL(ref),
++ node_retire_nolock);
+ if (err)
+ break;
+ }
+--- a/drivers/gpu/drm/i915/i915_active.h
++++ b/drivers/gpu/drm/i915/i915_active.h
+@@ -309,7 +309,7 @@ i915_active_request_isset(const struct i
+ */
+ static inline int __must_check
+ i915_active_request_retire(struct i915_active_request *active,
+- struct mutex *mutex)
++ struct mutex *mutex, i915_active_retire_fn retire)
+ {
+ struct i915_request *request;
+ long ret;
+@@ -327,7 +327,7 @@ i915_active_request_retire(struct i915_a
+ list_del_init(&active->link);
+ RCU_INIT_POINTER(active->request, NULL);
+
+- active->retire(active, request);
++ retire(active, request);
+
+ return 0;
+ }
--- /dev/null
+From 47a1f8e8b3637ff5f7806587883d7d94068d9ee8 Mon Sep 17 00:00:00 2001
+From: Martin Kaiser <martin@kaiser.cx>
+Date: Thu, 5 Mar 2020 21:58:20 +0100
+Subject: hwrng: imx-rngc - fix an error path
+
+From: Martin Kaiser <martin@kaiser.cx>
+
+commit 47a1f8e8b3637ff5f7806587883d7d94068d9ee8 upstream.
+
+Make sure that the rngc interrupt is masked if the rngc self test fails.
+Self test failure means that probe fails as well. Interrupts should be
+masked in this case, regardless of the error.
+
+Cc: stable@vger.kernel.org
+Fixes: 1d5449445bd0 ("hwrng: mx-rngc - add a driver for Freescale RNGC")
+Reviewed-by: PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+Signed-off-by: Martin Kaiser <martin@kaiser.cx>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/hw_random/imx-rngc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/hw_random/imx-rngc.c
++++ b/drivers/char/hw_random/imx-rngc.c
+@@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx
+ return -ETIMEDOUT;
+ }
+
+- if (rngc->err_reg != 0)
++ if (rngc->err_reg != 0) {
++ imx_rngc_irq_mask_clear(rngc);
+ return -EIO;
++ }
+
+ return 0;
+ }
--- /dev/null
+From dfb5394f804ed4fcea1fc925be275a38d66712ab Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Thu, 26 Mar 2020 12:38:14 -0400
+Subject: IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit dfb5394f804ed4fcea1fc925be275a38d66712ab upstream.
+
+When kobject_init_and_add() returns an error in the function
+hfi1_create_port_files(), the function kobject_put() is not called for the
+corresponding kobject, which potentially leads to memory leak.
+
+This patch fixes the issue by calling kobject_put() even if
+kobject_init_and_add() fails.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200326163813.21129.44280.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/sysfs.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/sysfs.c
++++ b/drivers/infiniband/hw/hfi1/sysfs.c
+@@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping sc2vl sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail;
++ /*
++ * Based on the documentation for kobject_init_and_add(), the
++ * caller should call kobject_put even if this call fails.
++ */
++ goto bail_sc2vl;
+ }
+ kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
+
+@@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping sl2sc sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_sc2vl;
++ goto bail_sl2sc;
+ }
+ kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
+
+@@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping vl2mtu sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_sl2sc;
++ goto bail_vl2mtu;
+ }
+ kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
+
+@@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping Congestion Control sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_vl2mtu;
++ goto bail_cc;
+ }
+
+ kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
+@@ -742,7 +746,6 @@ bail_sl2sc:
+ kobject_put(&ppd->sl2sc_kobj);
+ bail_sc2vl:
+ kobject_put(&ppd->sc2vl_kobj);
+-bail:
+ return ret;
+ }
+
--- /dev/null
+From 5c15abc4328ad696fa61e2f3604918ed0c207755 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Thu, 26 Mar 2020 12:38:07 -0400
+Subject: IB/hfi1: Fix memory leaks in sysfs registration and unregistration
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit 5c15abc4328ad696fa61e2f3604918ed0c207755 upstream.
+
+When the hfi1 driver is unloaded, kmemleak will report the following
+issue:
+
+unreferenced object 0xffff8888461a4c08 (size 8):
+comm "kworker/0:0", pid 5, jiffies 4298601264 (age 2047.134s)
+hex dump (first 8 bytes):
+73 64 6d 61 30 00 ff ff sdma0...
+backtrace:
+[<00000000311a6ef5>] kvasprintf+0x62/0xd0
+[<00000000ade94d9f>] kobject_set_name_vargs+0x1c/0x90
+[<0000000060657dbb>] kobject_init_and_add+0x5d/0xb0
+[<00000000346fe72b>] 0xffffffffa0c5ecba
+[<000000006cfc5819>] 0xffffffffa0c866b9
+[<0000000031c65580>] 0xffffffffa0c38e87
+[<00000000e9739b3f>] local_pci_probe+0x41/0x80
+[<000000006c69911d>] work_for_cpu_fn+0x16/0x20
+[<00000000601267b5>] process_one_work+0x171/0x380
+[<0000000049a0eefa>] worker_thread+0x1d1/0x3f0
+[<00000000909cf2b9>] kthread+0xf8/0x130
+[<0000000058f5f874>] ret_from_fork+0x35/0x40
+
+This patch fixes the issue by:
+
+- Releasing dd->per_sdma[i].kobject in hfi1_unregister_sysfs().
+ - This will fix the memory leak.
+
+- Calling kobject_put() to unwind operations only for those entries in
+ dd->per_sdma[] whose operations have succeeded (including the current
+ one that has just failed) in hfi1_verbs_register_sysfs().
+
+Cc: <stable@vger.kernel.org>
+Fixes: 0cb2aa690c7e ("IB/hfi1: Add sysfs interface for affinity setup")
+Link: https://lore.kernel.org/r/20200326163807.21129.27371.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/sysfs.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/sysfs.c
++++ b/drivers/infiniband/hw/hfi1/sysfs.c
+@@ -856,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi
+
+ return 0;
+ bail:
+- for (i = 0; i < dd->num_sdma; i++)
+- kobject_del(&dd->per_sdma[i].kobj);
++ /*
++ * The function kobject_put() will call kobject_del() if the kobject
++ * has been added successfully. The sysfs files created under the
++ * kobject directory will also be removed during the process.
++ */
++ for (; i >= 0; i--)
++ kobject_put(&dd->per_sdma[i].kobj);
+
+ return ret;
+ }
+@@ -870,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct
+ struct hfi1_pportdata *ppd;
+ int i;
+
++ /* Unwind operations in hfi1_verbs_register_sysfs() */
++ for (i = 0; i < dd->num_sdma; i++)
++ kobject_put(&dd->per_sdma[i].kobj);
++
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+
--- /dev/null
+From 41e684ef3f37ce6e5eac3fb5b9c7c1853f4b0447 Mon Sep 17 00:00:00 2001
+From: Alex Vesker <valex@mellanox.com>
+Date: Thu, 5 Mar 2020 14:38:41 +0200
+Subject: IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
+
+From: Alex Vesker <valex@mellanox.com>
+
+commit 41e684ef3f37ce6e5eac3fb5b9c7c1853f4b0447 upstream.
+
+Until now the flex parser capability was used in ib_query_device() to
+indicate tunnel_offloads_caps support for mpls_over_gre/mpls_over_udp.
+
+Newer devices and firmware will have configurations with the flexparser
+but without mpls support.
+
+Testing for the flex parser capability was a mistake, the tunnel_stateless
+capability was intended for detecting mpls and was introduced at the same
+time as the flex parser capability.
+
+Otherwise userspace will be incorrectly informed that a future device
+supports MPLS when it does not.
+
+Link: https://lore.kernel.org/r/20200305123841.196086-1-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 4.17
+Fixes: e818e255a58d ("IB/mlx5: Expose MPLS related tunneling offloads")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Reviewed-by: Ariel Levkovich <lariel@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/main.c | 6 ++----
+ include/linux/mlx5/mlx5_ifc.h | 6 +++++-
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1181,12 +1181,10 @@ static int mlx5_ib_query_device(struct i
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+- MLX5_FLEX_PROTO_CW_MPLS_GRE)
++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+- MLX5_FLEX_PROTO_CW_MPLS_UDP)
++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
+ }
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -857,7 +857,11 @@ struct mlx5_ifc_per_protocol_networking_
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+ u8 cqe_checksum_full[0x1];
+- u8 reserved_at_24[0x5];
++ u8 tunnel_stateless_geneve_tx[0x1];
++ u8 tunnel_stateless_mpls_over_udp[0x1];
++ u8 tunnel_stateless_mpls_over_gre[0x1];
++ u8 tunnel_stateless_vxlan_gpe[0x1];
++ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 reserved_at_2a[0x6];
+ u8 max_vxlan_udp_ports[0x8];
--- /dev/null
+From 767191db8220db29f78c031f4d27375173c336d5 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Fri, 3 Apr 2020 17:48:34 +0200
+Subject: platform/x86: intel_int0002_vgpio: Use acpi_register_wakeup_handler()
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 767191db8220db29f78c031f4d27375173c336d5 upstream.
+
+The Power Management Events (PMEs) the INT0002 driver listens for get
+signalled by the Power Management Controller (PMC) using the same IRQ
+as used for the ACPI SCI.
+
+Since commit fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from
+waking up the system") the SCI triggering, without there being a wakeup
+cause recognized by the ACPI sleep code, will no longer wakeup the system.
+
+This breaks PMEs / wakeups signalled to the INT0002 driver, the system
+never leaves the s2idle_loop() now.
+
+Use acpi_register_wakeup_handler() to register a function which checks
+the GPE0a_STS register for a PME and trigger a wakeup when a PME has
+been signalled.
+
+Fixes: fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system")
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/intel_int0002_vgpio.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/platform/x86/intel_int0002_vgpio.c
++++ b/drivers/platform/x86/intel_int0002_vgpio.c
+@@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq,
+ return IRQ_HANDLED;
+ }
+
++static bool int0002_check_wake(void *data)
++{
++ u32 gpe_sts_reg;
++
++ gpe_sts_reg = inl(GPE0A_STS_PORT);
++ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
++}
++
+ static struct irq_chip int0002_byt_irqchip = {
+ .name = DRV_NAME,
+ .irq_ack = int0002_irq_ack,
+@@ -220,6 +228,7 @@ static int int0002_probe(struct platform
+
+ gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
+
++ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
+ device_init_wakeup(dev, true);
+ return 0;
+ }
+@@ -227,6 +236,7 @@ static int int0002_probe(struct platform
+ static int int0002_remove(struct platform_device *pdev)
+ {
+ device_init_wakeup(&pdev->dev, false);
++ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
+ return 0;
+ }
+
--- /dev/null
+From 69efea712f5b0489e67d07565aad5c94e09a3e52 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Fri, 21 Feb 2020 21:10:37 +0100
+Subject: random: always use batched entropy for get_random_u{32,64}
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 69efea712f5b0489e67d07565aad5c94e09a3e52 upstream.
+
+It turns out that RDRAND is pretty slow. Comparing these two
+constructions:
+
+ for (i = 0; i < CHACHA_BLOCK_SIZE; i += sizeof(ret))
+ arch_get_random_long(&ret);
+
+and
+
+ long buf[CHACHA_BLOCK_SIZE / sizeof(long)];
+ extract_crng((u8 *)buf);
+
+it amortizes out to 352 cycles per long for the top one and 107 cycles
+per long for the bottom one, on Coffee Lake Refresh, Intel Core i9-9880H.
+
+And importantly, the top one has the drawback of not benefiting from the
+real rng, whereas the bottom one has all the nice benefits of using our
+own chacha rng. As get_random_u{32,64} gets used in more places (perhaps
+beyond what it was originally intended for when it was introduced as
+get_random_{int,long} back in the md5 monstrosity era), it seems like it
+might be a good thing to strengthen its posture a tiny bit. Doing this
+should only be stronger and not any weaker because that pool is already
+initialized with a bunch of rdrand data (when available). This way, we
+get the benefits of the hardware rng as well as our own rng.
+
+Another benefit of this is that we no longer hit pitfalls of the recent
+stream of AMD bugs in RDRAND. One often used code pattern for various
+things is:
+
+ do {
+ val = get_random_u32();
+ } while (hash_table_contains_key(val));
+
+That recent AMD bug rendered that pattern useless, whereas we're really
+very certain that chacha20 output will give pretty distributed numbers,
+no matter what.
+
+So, this simplification seems better both from a security perspective
+and from a performance perspective.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20200221201037.30231-1-Jason@zx2c4.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c | 20 ++++----------------
+ 1 file changed, 4 insertions(+), 16 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2358,11 +2358,11 @@ struct batched_entropy {
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+- * number is either as good as RDRAND or as good as /dev/urandom, with the
+- * goal of being quite fast and not depleting entropy. In order to ensure
++ * number is good as /dev/urandom, but there is no backtrack protection, with
++ * the goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+- * wait_for_random_bytes() should be called and return 0 at least once
+- * at any point prior.
++ * wait_for_random_bytes() should be called and return 0 at least once at any
++ * point prior.
+ */
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+@@ -2375,15 +2375,6 @@ u64 get_random_u64(void)
+ struct batched_entropy *batch;
+ static void *previous;
+
+-#if BITS_PER_LONG == 64
+- if (arch_get_random_long((unsigned long *)&ret))
+- return ret;
+-#else
+- if (arch_get_random_long((unsigned long *)&ret) &&
+- arch_get_random_long((unsigned long *)&ret + 1))
+- return ret;
+-#endif
+-
+ warn_unseeded_randomness(&previous);
+
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+@@ -2408,9 +2399,6 @@ u32 get_random_u32(void)
+ struct batched_entropy *batch;
+ static void *previous;
+
+- if (arch_get_random_int(&ret))
+- return ret;
+-
+ warn_unseeded_randomness(&previous);
+
+ batch = raw_cpu_ptr(&batched_entropy_u32);
--- /dev/null
+From 0b38b5e1d0e2f361e418e05c179db05bb688bbd6 Mon Sep 17 00:00:00 2001
+From: Sven Schnelle <svens@linux.ibm.com>
+Date: Wed, 22 Jan 2020 13:38:22 +0100
+Subject: s390: prevent leaking kernel address in BEAR
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+commit 0b38b5e1d0e2f361e418e05c179db05bb688bbd6 upstream.
+
+When userspace executes a syscall or gets interrupted,
+BEAR contains a kernel address when returning to userspace.
+This make it pretty easy to figure out where the kernel is
+mapped even with KASLR enabled. To fix this, add lpswe to
+lowcore and always execute it there, so userspace sees only
+the lowcore address of lpswe. For this we have to extend
+both critical_cleanup and the SWITCH_ASYNC macro to also check
+for lpswe addresses in lowcore.
+
+Fixes: b2d24b97b2a9 ("s390/kernel: add support for kernel address space layout randomization (KASLR)")
+Cc: <stable@vger.kernel.org> # v5.2+
+Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/lowcore.h | 4 +-
+ arch/s390/include/asm/processor.h | 1
+ arch/s390/include/asm/setup.h | 7 ++++
+ arch/s390/kernel/asm-offsets.c | 2 +
+ arch/s390/kernel/entry.S | 65 ++++++++++++++++++++++----------------
+ arch/s390/kernel/process.c | 1
+ arch/s390/kernel/setup.c | 3 +
+ arch/s390/kernel/smp.c | 2 +
+ arch/s390/mm/vmem.c | 4 ++
+ 9 files changed, 62 insertions(+), 27 deletions(-)
+
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -141,7 +141,9 @@ struct lowcore {
+
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0400 */
+- __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
++ __u32 return_lpswe; /* 0x0402 */
++ __u32 return_mcck_lpswe; /* 0x0406 */
++ __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
+
+ /*
+ * 0xe00 contains the address of the IPL Parameter Information
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -162,6 +162,7 @@ typedef struct thread_struct thread_stru
+ #define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
+ .fpu.regs = (void *) init_task.thread.fpu.fprs, \
++ .last_break = 1, \
+ }
+
+ /*
+--- a/arch/s390/include/asm/setup.h
++++ b/arch/s390/include/asm/setup.h
+@@ -8,6 +8,7 @@
+
+ #include <linux/bits.h>
+ #include <uapi/asm/setup.h>
++#include <linux/build_bug.h>
+
+ #define EP_OFFSET 0x10008
+ #define EP_STRING "S390EP"
+@@ -157,6 +158,12 @@ static inline unsigned long kaslr_offset
+ return __kaslr_offset;
+ }
+
++static inline u32 gen_lpswe(unsigned long addr)
++{
++ BUILD_BUG_ON(addr > 0xfff);
++ return 0xb2b20000 | addr;
++}
++
+ #else /* __ASSEMBLY__ */
+
+ #define IPL_DEVICE (IPL_DEVICE_OFFSET)
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -125,6 +125,8 @@ int main(void)
+ OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
+ OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
+ OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
++ OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
++ OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
+ OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
+ OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
+ OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
+
+ .macro SWITCH_ASYNC savearea,timer
+ tmhh %r8,0x0001 # interrupting from user ?
+- jnz 1f
++ jnz 2f
+ lgr %r14,%r9
++ cghi %r14,__LC_RETURN_LPSWE
++ je 0f
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
+- jhe 0f
++ jhe 1f
++0:
+ lghi %r11,\savearea # inside critical section, do cleanup
+ brasl %r14,cleanup_critical
+ tmhh %r8,0x0001 # retest problem state after cleanup
+- jnz 1f
+-0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
++ jnz 2f
++1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
+ slgr %r14,%r15
+ srag %r14,%r14,STACK_SHIFT
+- jnz 2f
++ jnz 3f
+ CHECK_STACK \savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+- j 3f
+-1: UPDATE_VTIME %r14,%r15,\timer
++ j 4f
++2: UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+-2: lg %r15,__LC_ASYNC_STACK # load async stack
+-3: la %r11,STACK_FRAME_OVERHEAD(%r15)
++3: lg %r15,__LC_ASYNC_STACK # load async stack
++4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ .endm
+
+ .macro UPDATE_VTIME w1,w2,enter_timer
+@@ -401,7 +404,7 @@ ENTRY(system_call)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_PSW
++ b __LC_RETURN_LPSWE(%r0)
+ .Lsysc_done:
+
+ #
+@@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
+ BPOFF
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+- lg %r12,__LC_CURRENT
++ srag %r11,%r10,12
++ jnz 0f
++ /* if __LC_LAST_BREAK is < 4096, it contains one of
++ * the lpswe addresses in lowcore. Set it to 1 (initial state)
++ * to prevent leaking that address to userspace.
++ */
++ lghi %r10,1
++0: lg %r12,__LC_CURRENT
+ lghi %r11,0
+ larl %r13,cleanup_critical
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ tmhh %r8,0x0001 # test problem state bit
+- jnz 2f # -> fault in user space
++ jnz 3f # -> fault in user space
+ #if IS_ENABLED(CONFIG_KVM)
+ # cleanup critical section for program checks in sie64a
+ lgr %r14,%r9
+ slg %r14,BASED(.Lsie_critical_start)
+ clg %r14,BASED(.Lsie_critical_length)
+- jhe 0f
++ jhe 1f
+ lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ lghi %r11,_PIF_GUEST_FAULT
+ #endif
+-0: tmhh %r8,0x4000 # PER bit set in old PSW ?
+- jnz 1f # -> enabled, can't be a double fault
++1: tmhh %r8,0x4000 # PER bit set in old PSW ?
++ jnz 2f # -> enabled, can't be a double fault
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jnz .Lpgm_svcper # -> single stepped svc
+-1: CHECK_STACK __LC_SAVE_AREA_SYNC
++2: CHECK_STACK __LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+- # CHECK_VMAP_STACK branches to stack_overflow or 4f
+- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
+-2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
++ # CHECK_VMAP_STACK branches to stack_overflow or 5f
++ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
++3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ lg %r15,__LC_KERNEL_STACK
+ lgr %r14,%r12
+ aghi %r14,__TASK_thread # pointer to thread_struct
+ lghi %r13,__LC_PGM_TDB
+ tm __LC_PGM_ILC+2,0x02 # check for transaction abort
+- jz 3f
++ jz 4f
+ mvc __THREAD_trap_tdb(256,%r14),0(%r13)
+-3: stg %r10,__THREAD_last_break(%r14)
+-4: lgr %r13,%r11
++4: stg %r10,__THREAD_last_break(%r14)
++5: lgr %r13,%r11
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+@@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
+ stg %r13,__PT_FLAGS(%r11)
+ stg %r10,__PT_ARGS(%r11)
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+- jz 5f
++ jz 6f
+ tmhh %r8,0x0001 # kernel per event ?
+ jz .Lpgm_kprobe
+ oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
+ mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
+-5: REENABLE_IRQS
++6: REENABLE_IRQS
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ larl %r1,pgm_check_table
+ llgh %r10,__PT_INT_CODE+2(%r11)
+@@ -775,7 +785,7 @@ ENTRY(io_int_handler)
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ .Lio_exit_kernel:
+ lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_PSW
++ b __LC_RETURN_LPSWE(%r0)
+ .Lio_done:
+
+ #
+@@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0: lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_MCCK_PSW
++ b __LC_RETURN_MCCK_LPSWE
+
+ .Lmcck_panic:
+ lg %r15,__LC_NODAT_STACK
+@@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
+ #endif
+
+ ENTRY(cleanup_critical)
++ cghi %r9,__LC_RETURN_LPSWE
++ je .Lcleanup_lpswe
+ #if IS_ENABLED(CONFIG_KVM)
+ clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
+ jl 0f
+@@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
++.Lcleanup_lpswe:
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+ BR_EX %r14,%r11
+ .Lcleanup_sysc_restore_insn:
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -105,6 +105,7 @@ int copy_thread_tls(unsigned long clone_
+ p->thread.system_timer = 0;
+ p->thread.hardirq_timer = 0;
+ p->thread.softirq_timer = 0;
++ p->thread.last_break = 1;
+
+ frame->sf.back_chain = 0;
+ /* new return point is ret_from_fork */
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -73,6 +73,7 @@
+ #include <asm/nospec-branch.h>
+ #include <asm/mem_detect.h>
+ #include <asm/uv.h>
++#include <asm/asm-offsets.h>
+ #include "entry.h"
+
+ /*
+@@ -457,6 +458,8 @@ static void __init setup_lowcore_dat_off
+ lc->spinlock_index = 0;
+ arch_spin_lock_setup(0);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
++ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
++ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+
+ set_prefix((u32)(unsigned long) lc);
+ lowcore_ptr[0] = lc;
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcp
+ lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->spinlock_index = 0;
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
++ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
++ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+ if (nmi_alloc_per_cpu(lc))
+ goto out_async;
+ if (vdso_alloc_per_cpu(lc))
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -415,6 +415,10 @@ void __init vmem_map_init(void)
+ SET_MEMORY_RO | SET_MEMORY_X);
+ __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
++
++ /* we need lowcore executable for our LPSWE instructions */
++ set_memory_x(0, 1);
++
+ pr_info("Write protected kernel read-only data: %luk\n",
+ (unsigned long)(__end_rodata - _stext) >> 10);
+ }
tun-don-t-put_page-for-all-negative-return-values-from-xdp-program.patch
mlxsw-spectrum_flower-do-not-stop-at-flow_action_vlan_mangle.patch
r8169-change-back-sg-and-tso-to-be-disabled-by-default.patch
+s390-prevent-leaking-kernel-address-in-bear.patch
+random-always-use-batched-entropy-for-get_random_u-32-64.patch
+usb-dwc3-gadget-wrap-around-when-skip-trbs.patch
+uapi-rename-ext2_swab-to-swab-and-share-globally-in-swab.h.patch
+slub-improve-bit-diffusion-for-freelist-ptr-obfuscation.patch
+tools-accounting-getdelays.c-fix-netlink-attribute-length.patch
+hwrng-imx-rngc-fix-an-error-path.patch
+acpi-pm-add-acpi_register_wakeup_handler.patch
+platform-x86-intel_int0002_vgpio-use-acpi_register_wakeup_handler.patch
+asoc-jz4740-i2s-fix-divider-written-at-incorrect-offset-in-register.patch
+ib-hfi1-call-kobject_put-when-kobject_init_and_add-fails.patch
+ib-hfi1-fix-memory-leaks-in-sysfs-registration-and-unregistration.patch
+ib-mlx5-replace-tunnel-mpls-capability-bits-for-tunnel_offloads.patch
+arm-imx-enable-arm_errata_814220-for-i.mx6ul-and-i.mx7d.patch
+arm-imx-only-select-arm_errata_814220-for-armv7-a.patch
+drm-i915-fix-ref-mutex-deadlock-in-i915_active_wait.patch
+ceph-remove-the-extra-slashes-in-the-server-path.patch
+ceph-canonicalize-server-path-in-place.patch
--- /dev/null
+From 1ad53d9fa3f6168ebcf48a50e08b170432da2257 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 1 Apr 2020 21:04:23 -0700
+Subject: slub: improve bit diffusion for freelist ptr obfuscation
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 1ad53d9fa3f6168ebcf48a50e08b170432da2257 upstream.
+
+Under CONFIG_SLAB_FREELIST_HARDENED=y, the obfuscation was relatively weak
+in that the ptr and ptr address were usually so close that the first XOR
+would result in an almost entirely 0-byte value[1], leaving most of the
+"secret" number ultimately being stored after the third XOR. A single
+blind memory content exposure of the freelist was generally sufficient to
+learn the secret.
+
+Add a swab() call to mix bits a little more. This is a cheap way (1
+cycle) to make attacks need more than a single exposure to learn the
+secret (or to know _where_ the exposure is in memory).
+
+kmalloc-32 freelist walk, before:
+
+ptr ptr_addr stored value secret
+ffff90c22e019020@ffff90c22e019000 is 86528eb656b3b5bd (86528eb656b3b59d)
+ffff90c22e019040@ffff90c22e019020 is 86528eb656b3b5fd (86528eb656b3b59d)
+ffff90c22e019060@ffff90c22e019040 is 86528eb656b3b5bd (86528eb656b3b59d)
+ffff90c22e019080@ffff90c22e019060 is 86528eb656b3b57d (86528eb656b3b59d)
+ffff90c22e0190a0@ffff90c22e019080 is 86528eb656b3b5bd (86528eb656b3b59d)
+...
+
+after:
+
+ptr ptr_addr stored value secret
+ffff9eed6e019020@ffff9eed6e019000 is 793d1135d52cda42 (86528eb656b3b59d)
+ffff9eed6e019040@ffff9eed6e019020 is 593d1135d52cda22 (86528eb656b3b59d)
+ffff9eed6e019060@ffff9eed6e019040 is 393d1135d52cda02 (86528eb656b3b59d)
+ffff9eed6e019080@ffff9eed6e019060 is 193d1135d52cdae2 (86528eb656b3b59d)
+ffff9eed6e0190a0@ffff9eed6e019080 is f93d1135d52cdac2 (86528eb656b3b59d)
+
+[1] https://blog.infosectcbr.com.au/2020/03/weaknesses-in-linux-kernel-heap.html
+
+Fixes: 2482ddec670f ("mm: add SLUB free list pointer obfuscation")
+Reported-by: Silvio Cesare <silvio.cesare@gmail.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/202003051623.AF4F8CB@keescook
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -261,7 +261,7 @@ static inline void *freelist_ptr(const s
+ * freepointer to be restored incorrectly.
+ */
+ return (void *)((unsigned long)ptr ^ s->random ^
+- (unsigned long)kasan_reset_tag((void *)ptr_addr));
++ swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
+ #else
+ return ptr;
+ #endif
--- /dev/null
+From 4054ab64e29bb05b3dfe758fff3c38a74ba753bb Mon Sep 17 00:00:00 2001
+From: David Ahern <dsahern@kernel.org>
+Date: Wed, 1 Apr 2020 21:02:25 -0700
+Subject: tools/accounting/getdelays.c: fix netlink attribute length
+
+From: David Ahern <dsahern@kernel.org>
+
+commit 4054ab64e29bb05b3dfe758fff3c38a74ba753bb upstream.
+
+A recent change to the netlink code: 6e237d099fac ("netlink: Relax attr
+validation for fixed length types") logs a warning when programs send
+messages with invalid attributes (e.g., wrong length for a u32). Yafang
+reported this error message for tools/accounting/getdelays.c.
+
+send_cmd() is wrongly adding 1 to the attribute length. As noted in
+include/uapi/linux/netlink.h nla_len should be NLA_HDRLEN + payload
+length, so drop the +1.
+
+Fixes: 9e06d3f9f6b1 ("per task delay accounting taskstats interface: documentation fix")
+Reported-by: Yafang Shao <laoar.shao@gmail.com>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Tested-by: Yafang Shao <laoar.shao@gmail.com>
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Shailabh Nagar <nagar@watson.ibm.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200327173111.63922-1-dsahern@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/accounting/getdelays.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/accounting/getdelays.c
++++ b/tools/accounting/getdelays.c
+@@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+- na->nla_len = nla_len + 1 + NLA_HDRLEN;
++ na->nla_len = nla_len + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
--- /dev/null
+From d5767057c9a76a29f073dad66b7fa12a90e8c748 Mon Sep 17 00:00:00 2001
+From: Yury Norov <yury.norov@gmail.com>
+Date: Thu, 30 Jan 2020 22:16:40 -0800
+Subject: uapi: rename ext2_swab() to swab() and share globally in swab.h
+
+From: Yury Norov <yury.norov@gmail.com>
+
+commit d5767057c9a76a29f073dad66b7fa12a90e8c748 upstream.
+
+ext2_swab() is defined locally in lib/find_bit.c However it is not
+specific to ext2, neither to bitmaps.
+
+There are many potential users of it, so rename it to just swab() and
+move to include/uapi/linux/swab.h
+
+ABI guarantees that size of unsigned long corresponds to BITS_PER_LONG,
+therefore drop unneeded cast.
+
+Link: http://lkml.kernel.org/r/20200103202846.21616-1-yury.norov@gmail.com
+Signed-off-by: Yury Norov <yury.norov@gmail.com>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Joe Perches <joe@perches.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: William Breathitt Gray <vilhelm.gray@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/swab.h | 1 +
+ include/uapi/linux/swab.h | 10 ++++++++++
+ lib/find_bit.c | 16 ++--------------
+ 3 files changed, 13 insertions(+), 14 deletions(-)
+
+--- a/include/linux/swab.h
++++ b/include/linux/swab.h
+@@ -7,6 +7,7 @@
+ # define swab16 __swab16
+ # define swab32 __swab32
+ # define swab64 __swab64
++# define swab __swab
+ # define swahw32 __swahw32
+ # define swahb32 __swahb32
+ # define swab16p __swab16p
+--- a/include/uapi/linux/swab.h
++++ b/include/uapi/linux/swab.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/types.h>
+ #include <linux/compiler.h>
++#include <asm/bitsperlong.h>
+ #include <asm/swab.h>
+
+ /*
+@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32
+ __fswab64(x))
+ #endif
+
++static __always_inline unsigned long __swab(const unsigned long y)
++{
++#if BITS_PER_LONG == 64
++ return __swab64(y);
++#else /* BITS_PER_LONG == 32 */
++ return __swab32(y);
++#endif
++}
++
+ /**
+ * __swahw32 - return a word-swapped 32-bit value
+ * @x: value to wordswap
+--- a/lib/find_bit.c
++++ b/lib/find_bit.c
+@@ -149,18 +149,6 @@ EXPORT_SYMBOL(find_last_bit);
+
+ #ifdef __BIG_ENDIAN
+
+-/* include/linux/byteorder does not support "unsigned long" type */
+-static inline unsigned long ext2_swab(const unsigned long y)
+-{
+-#if BITS_PER_LONG == 64
+- return (unsigned long) __swab64((u64) y);
+-#elif BITS_PER_LONG == 32
+- return (unsigned long) __swab32((u32) y);
+-#else
+-#error BITS_PER_LONG not defined
+-#endif
+-}
+-
+ #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
+ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long nbits,
+@@ -177,7 +165,7 @@ static inline unsigned long _find_next_b
+ tmp ^= invert;
+
+ /* Handle 1st word. */
+- tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
++ tmp &= swab(BITMAP_FIRST_WORD_MASK(start));
+ start = round_down(start, BITS_PER_LONG);
+
+ while (!tmp) {
+@@ -191,7 +179,7 @@ static inline unsigned long _find_next_b
+ tmp ^= invert;
+ }
+
+- return min(start + __ffs(ext2_swab(tmp)), nbits);
++ return min(start + __ffs(swab(tmp)), nbits);
+ }
+ #endif
+
--- /dev/null
+From 2dedea035ae82c5af0595637a6eda4655532b21e Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 5 Mar 2020 13:24:01 -0800
+Subject: usb: dwc3: gadget: Wrap around when skip TRBs
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit 2dedea035ae82c5af0595637a6eda4655532b21e upstream.
+
+When skipping TRBs, we need to account for wrapping around the ring
+buffer and not modifying some invalid TRBs. Without this fix, dwc3 won't
+be able to check for available TRBs.
+
+Cc: stable <stable@vger.kernel.org>
+Fixes: 7746a8dfb3f9 ("usb: dwc3: gadget: extract dwc3_gadget_ep_skip_trbs()")
+Signed-off-by: Thinh Nguyen <thinhn@synopsys.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/dwc3/gadget.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1518,7 +1518,7 @@ static void dwc3_gadget_ep_skip_trbs(str
+ for (i = 0; i < req->num_trbs; i++) {
+ struct dwc3_trb *trb;
+
+- trb = req->trb + i;
++ trb = &dep->trb_pool[dep->trb_dequeue];
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }