--- /dev/null
+From 7c420636860a719049fae9403e2c87804f53bdde Mon Sep 17 00:00:00 2001
+From: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Date: Tue, 16 Apr 2019 13:46:07 +0200
+Subject: drm/gma500/cdv: Check vbt config bits when detecting lvds panels
+
+From: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+
+commit 7c420636860a719049fae9403e2c87804f53bdde upstream.
+
+Some machines have an lvds child device in vbt even though a panel is
+not attached. To make detection more reliable we now also check the lvds
+config bits available in the vbt.
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1665766
+Cc: stable@vger.kernel.org
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190416114607.1072-1-patrik.r.jakobsson@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/gma500/cdv_intel_lvds.c | 3 +++
+ drivers/gpu/drm/gma500/intel_bios.c | 3 +++
+ drivers/gpu/drm/gma500/psb_drv.h | 1 +
+ 3 files changed, 7 insertions(+)
+
+--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+@@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_devi
+ int pipe;
+ u8 pin;
+
++ if (!dev_priv->lvds_enabled_in_vbt)
++ return;
++
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+--- a/drivers/gpu/drm/gma500/intel_bios.c
++++ b/drivers/gpu/drm/gma500/intel_bios.c
+@@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_pri
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
++ dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
++ DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
++
+ /* This bit means to use 96Mhz for DPLL_A or not */
+ if (driver->primary_lfp_id)
+ dev_priv->dplla_96mhz = true;
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -538,6 +538,7 @@ struct drm_psb_private {
+ int lvds_ssc_freq;
+ bool is_lvds_on;
+ bool is_mipi_on;
++ bool lvds_enabled_in_vbt;
+ u32 mipi_ctrl_display;
+
+ unsigned int core_freq;
--- /dev/null
+From 474d952b4870cfbdc55d3498f4d498775fe77e81 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Mon, 3 Jun 2019 13:56:08 -0300
+Subject: drm/msm: fix fb references in async update
+
+From: Helen Koike <helen.koike@collabora.com>
+
+commit 474d952b4870cfbdc55d3498f4d498775fe77e81 upstream.
+
+Async update callbacks are expected to set the old_fb in the new_state
+so prepare/cleanup framebuffers are balanced.
+
+Cc: <stable@vger.kernel.org> # v4.14+
+Fixes: 224a4c970987 ("drm/msm: update cursors asynchronously through atomic")
+Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Helen Koike <helen.koike@collabora.com>
+Acked-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190603165610.24614-4-helen.koike@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+@@ -503,6 +503,8 @@ static int mdp5_plane_atomic_async_check
+ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+ {
++ struct drm_framebuffer *old_fb = plane->state->fb;
++
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+@@ -525,6 +527,8 @@ static void mdp5_plane_atomic_async_upda
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
++
++ new_state->fb = old_fb;
+ }
+
+ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
--- /dev/null
+From 35d6fcbb7c3e296a52136347346a698a35af3fda Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Mon, 27 May 2019 11:42:07 +0200
+Subject: fuse: fallocate: fix return with locked inode
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 35d6fcbb7c3e296a52136347346a698a35af3fda upstream.
+
+Do the proper cleanup in case the size check fails.
+
+Tested with xfstests:generic/228
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 0cbade024ba5 ("fuse: honor RLIMIT_FSIZE in fuse_file_fallocate")
+Cc: Liu Bo <bo.liu@linux.alibaba.com>
+Cc: <stable@vger.kernel.org> # v3.5
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2981,7 +2981,7 @@ static long fuse_file_fallocate(struct f
+ offset + length > i_size_read(inode)) {
+ err = inode_newsize_ok(inode, offset + length);
+ if (err)
+- return err;
++ goto out;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
--- /dev/null
+From 110080cea0d0e4dfdb0b536e7f8a5633ead6a781 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 7 May 2019 11:36:34 +0300
+Subject: genwqe: Prevent an integer overflow in the ioctl
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 110080cea0d0e4dfdb0b536e7f8a5633ead6a781 upstream.
+
+There are a couple potential integer overflows here.
+
+ round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+
+The first thing is that the "m->size + (...)" addition could overflow,
+and the second is that round_up() overflows to zero if the result is
+within PAGE_SIZE of the type max.
+
+In this code, the "m->size" variable is an u64 but we're saving the
+result in "map_size" which is an unsigned long and genwqe_user_vmap()
+takes an unsigned long as well. So I have used ULONG_MAX as the upper
+bound. From a practical perspective unsigned long is fine/better than
+trying to change all the types to u64.
+
+Fixes: eaf4722d4645 ("GenWQE Character device and DDCB queue")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/genwqe/card_dev.c | 2 ++
+ drivers/misc/genwqe/card_utils.c | 4 ++++
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/misc/genwqe/card_dev.c
++++ b/drivers/misc/genwqe/card_dev.c
+@@ -780,6 +780,8 @@ static int genwqe_pin_mem(struct genwqe_
+
+ if ((m->addr == 0x0) || (m->size == 0))
+ return -EINVAL;
++ if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
++ return -EINVAL;
+
+ map_addr = (m->addr & PAGE_MASK);
+ map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+--- a/drivers/misc/genwqe/card_utils.c
++++ b/drivers/misc/genwqe/card_utils.c
+@@ -587,6 +587,10 @@ int genwqe_user_vmap(struct genwqe_dev *
+ /* determine space needed for page_list. */
+ data = (unsigned long)uaddr;
+ offs = offset_in_page(data);
++ if (size > ULONG_MAX - PAGE_SIZE - offs) {
++ m->size = 0; /* mark unused and not added */
++ return -EINVAL;
++ }
+ m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
+
+ m->page_list = kcalloc(m->nr_pages,
--- /dev/null
+From 49b809586730a77b57ce620b2f9689de765d790b Mon Sep 17 00:00:00 2001
+From: Robert Hancock <hancock@sedsystems.ca>
+Date: Tue, 4 Jun 2019 15:55:51 -0600
+Subject: i2c: xiic: Add max_read_len quirk
+
+From: Robert Hancock <hancock@sedsystems.ca>
+
+commit 49b809586730a77b57ce620b2f9689de765d790b upstream.
+
+This driver does not support reading more than 255 bytes at once because
+the register for storing the number of bytes to read is only 8 bits. Add
+a max_read_len quirk to enforce this.
+
+This was found when using this driver with the SFP driver, which was
+previously reading all 256 bytes in the SFP EEPROM in one transaction.
+This caused a bunch of hard-to-debug errors in the xiic driver since the
+driver/logic was treating the number of bytes to read as zero.
+Rejecting transactions that aren't supported at least allows the problem
+to be diagnosed more easily.
+
+Signed-off-by: Robert Hancock <hancock@sedsystems.ca>
+Reviewed-by: Michal Simek <michal.simek@xilinx.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-xiic.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -718,11 +718,16 @@ static const struct i2c_algorithm xiic_a
+ .functionality = xiic_func,
+ };
+
++static const struct i2c_adapter_quirks xiic_quirks = {
++ .max_read_len = 255,
++};
++
+ static const struct i2c_adapter xiic_adapter = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .class = I2C_CLASS_DEPRECATED,
+ .algo = &xiic_algorithm,
++ .quirks = &xiic_quirks,
+ };
+
+
--- /dev/null
+From 074a1e1167afd82c26f6d03a9a8b997d564bb241 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 28 May 2019 17:05:03 +0000
+Subject: MIPS: Bounds check virt_addr_valid
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit 074a1e1167afd82c26f6d03a9a8b997d564bb241 upstream.
+
+The virt_addr_valid() function is meant to return true iff
+virt_to_page() will return a valid struct page reference. This is true
+iff the address provided is found within the unmapped address range
+between PAGE_OFFSET & MAP_BASE, but we don't currently check for that
+condition. Instead we simply mask the address to obtain what will be a
+physical address if the virtual address is indeed in the desired range,
+shift it to form a PFN & then call pfn_valid(). This can incorrectly
+return true if called with a virtual address which, after masking,
+happens to form a physical address corresponding to a valid PFN.
+
+For example we may vmalloc an address in the kernel mapped region
+starting a MAP_BASE & obtain the virtual address:
+
+ addr = 0xc000000000002000
+
+When masked by virt_to_phys(), which uses __pa() & in turn CPHYSADDR(),
+we obtain the following (bogus) physical address:
+
+ addr = 0x2000
+
+In a common system with PHYS_OFFSET=0 this will correspond to a valid
+struct page which should really be accessed by virtual address
+PAGE_OFFSET+0x2000, causing virt_addr_valid() to incorrectly return 1
+indicating that the original address corresponds to a struct page.
+
+This is equivalent to the ARM64 change made in commit ca219452c6b8
+("arm64: Correctly bounds check virt_addr_valid").
+
+This fixes fallout when hardened usercopy is enabled caused by the
+related commit 517e1fbeb65f ("mm/usercopy: Drop extra
+is_vmalloc_or_module() check") which removed a check for the vmalloc
+range that was present from the introduction of the hardened usercopy
+feature.
+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+References: ca219452c6b8 ("arm64: Correctly bounds check virt_addr_valid")
+References: 517e1fbeb65f ("mm/usercopy: Drop extra is_vmalloc_or_module() check")
+Reported-by: Julien Cristau <jcristau@debian.org>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Tested-by: YunQiang Su <ysu@wavecomp.com>
+URL: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=929366
+Cc: stable@vger.kernel.org # v4.12+
+Cc: linux-mips@vger.kernel.org
+Cc: Yunqiang Su <ysu@wavecomp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/mmap.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct
+
+ int __virt_addr_valid(const volatile void *kaddr)
+ {
++ unsigned long vaddr = (unsigned long)vaddr;
++
++ if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
++ return 0;
++
+ return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+ }
+ EXPORT_SYMBOL_GPL(__virt_addr_valid);
--- /dev/null
+From e4f2d1af7163becb181419af9dece9206001e0a6 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 28 May 2019 17:21:26 +0000
+Subject: MIPS: pistachio: Build uImage.gz by default
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit e4f2d1af7163becb181419af9dece9206001e0a6 upstream.
+
+The pistachio platform uses the U-Boot bootloader & generally boots a
+kernel in the uImage format. As such it's useful to build one when
+building the kernel, but to do so currently requires the user to
+manually specify a uImage target on the make command line.
+
+Make uImage.gz the pistachio platform's default build target, so that
+the default is to build a kernel image that we can actually boot on a
+board such as the MIPS Creator Ci40.
+
+Marked for stable backport as far as v4.1 where pistachio support was
+introduced. This is primarily useful for CI systems such as kernelci.org
+which will benefit from us building a suitable image which can then be
+booted as part of automated testing, extending our test coverage to the
+affected stable branches.
+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Kevin Hilman <khilman@baylibre.com>
+Tested-by: Kevin Hilman <khilman@baylibre.com>
+URL: https://groups.io/g/kernelci/message/388
+Cc: stable@vger.kernel.org # v4.1+
+Cc: linux-mips@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/pistachio/Platform | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/pistachio/Platform
++++ b/arch/mips/pistachio/Platform
+@@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
+ -I$(srctree)/arch/mips/include/asm/mach-pistachio
+ load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
+ zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
++all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
--- /dev/null
+From 52b042ab9948cc367b61f9ca9c18603aa7813c3a Mon Sep 17 00:00:00 2001
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+Date: Wed, 22 May 2019 01:57:10 +0800
+Subject: NFSv4.1: Again fix a race where CB_NOTIFY_LOCK fails to wake a waiter
+
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+
+commit 52b042ab9948cc367b61f9ca9c18603aa7813c3a upstream.
+
+Commit b7dbcc0e433f "NFSv4.1: Fix a race where CB_NOTIFY_LOCK fails to wake a waiter"
+found this bug. However it didn't fix it.
+
+This commit replaces schedule_timeout() with wait_woken() and
+default_wake_function() with woken_wake_function() in function
+nfs4_retry_setlk() and nfs4_wake_lock_waiter(). wait_woken() uses
+memory barriers in its implementation to avoid potential race condition
+when putting a process into sleeping state and then waking it up.
+
+Fixes: a1d617d8f134 ("nfs: allow blocking locks to be awoken by lock callbacks")
+Cc: stable@vger.kernel.org #4.9+
+Signed-off-by: Yihao Wu <wuyihao@linux.alibaba.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 24 +++++++-----------------
+ 1 file changed, 7 insertions(+), 17 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6850,7 +6850,6 @@ struct nfs4_lock_waiter {
+ struct task_struct *task;
+ struct inode *inode;
+ struct nfs_lowner *owner;
+- bool notified;
+ };
+
+ static int
+@@ -6872,13 +6871,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t
+ /* Make sure it's for the right inode */
+ if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
+ return 0;
+-
+- waiter->notified = true;
+ }
+
+ /* override "private" so we can use default_wake_function */
+ wait->private = waiter->task;
+- ret = autoremove_wake_function(wait, mode, flags, key);
++ ret = woken_wake_function(wait, mode, flags, key);
++ if (ret)
++ list_del_init(&wait->entry);
+ wait->private = waiter;
+ return ret;
+ }
+@@ -6887,7 +6886,6 @@ static int
+ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+ int status = -ERESTARTSYS;
+- unsigned long flags;
+ struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs_client *clp = server->nfs_client;
+@@ -6897,8 +6895,7 @@ nfs4_retry_setlk(struct nfs4_state *stat
+ .s_dev = server->s_dev };
+ struct nfs4_lock_waiter waiter = { .task = current,
+ .inode = state->inode,
+- .owner = &owner,
+- .notified = false };
++ .owner = &owner};
+ wait_queue_entry_t wait;
+
+ /* Don't bother with waitqueue if we don't expect a callback */
+@@ -6911,21 +6908,14 @@ nfs4_retry_setlk(struct nfs4_state *stat
+ add_wait_queue(q, &wait);
+
+ while(!signalled()) {
+- waiter.notified = false;
+ status = nfs4_proc_setlk(state, cmd, request);
+ if ((status != -EAGAIN) || IS_SETLK(cmd))
+ break;
+
+ status = -ERESTARTSYS;
+- spin_lock_irqsave(&q->lock, flags);
+- if (waiter.notified) {
+- spin_unlock_irqrestore(&q->lock, flags);
+- continue;
+- }
+- set_current_state(TASK_INTERRUPTIBLE);
+- spin_unlock_irqrestore(&q->lock, flags);
+-
+- freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
++ freezer_do_not_count();
++ wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
++ freezer_count();
+ }
+
+ finish_wait(q, &wait);
--- /dev/null
+From ba851a39c9703f09684a541885ed176f8fb7c868 Mon Sep 17 00:00:00 2001
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+Date: Mon, 13 May 2019 14:58:22 +0800
+Subject: NFSv4.1: Fix bug only first CB_NOTIFY_LOCK is handled
+
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+
+commit ba851a39c9703f09684a541885ed176f8fb7c868 upstream.
+
+When a waiter is waked by CB_NOTIFY_LOCK, it will retry
+nfs4_proc_setlk(). The waiter may fail to nfs4_proc_setlk() and sleep
+again. However, the waiter is already removed from clp->cl_lock_waitq
+when handling CB_NOTIFY_LOCK in nfs4_wake_lock_waiter(). So any
+subsequent CB_NOTIFY_LOCK won't wake this waiter anymore. We should
+put the waiter back to clp->cl_lock_waitq before retrying.
+
+Cc: stable@vger.kernel.org #4.9+
+Signed-off-by: Yihao Wu <wuyihao@linux.alibaba.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6905,20 +6905,22 @@ nfs4_retry_setlk(struct nfs4_state *stat
+ init_wait(&wait);
+ wait.private = &waiter;
+ wait.func = nfs4_wake_lock_waiter;
+- add_wait_queue(q, &wait);
+
+ while(!signalled()) {
++ add_wait_queue(q, &wait);
+ status = nfs4_proc_setlk(state, cmd, request);
+- if ((status != -EAGAIN) || IS_SETLK(cmd))
++ if ((status != -EAGAIN) || IS_SETLK(cmd)) {
++ finish_wait(q, &wait);
+ break;
++ }
+
+ status = -ERESTARTSYS;
+ freezer_do_not_count();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
+ freezer_count();
++ finish_wait(q, &wait);
+ }
+
+- finish_wait(q, &wait);
+ return status;
+ }
+ #else /* !CONFIG_NFS_V4_1 */
--- /dev/null
+From 63923d2c3800919774f5c651d503d1dd2adaddd5 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Mon, 27 May 2019 20:15:14 -0400
+Subject: parisc: Use implicit space register selection for loading the coherence index of I/O pdirs
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 63923d2c3800919774f5c651d503d1dd2adaddd5 upstream.
+
+We only support I/O to kernel space. Using %sr1 to load the coherence
+index may be racy unless interrupts are disabled. This patch changes the
+code used to load the coherence index to use implicit space register
+selection. This saves one instruction and eliminates the race.
+
+Tested on rp3440, c8000 and c3750.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/parisc/ccio-dma.c | 4 +---
+ drivers/parisc/sba_iommu.c | 3 +--
+ 2 files changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_
+ /* We currently only support kernel addresses */
+ BUG_ON(sid != KERNEL_SPACE);
+
+- mtsp(sid,1);
+-
+ /*
+ ** WORD 1 - low order word
+ ** "hints" parm includes the VALID bit!
+@@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_
+ ** Grab virtual index [0:11]
+ ** Deposit virt_idx bits into I/O PDIR word
+ */
+- asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
++ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
+ asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
+
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t
+ pa = virt_to_phys(vba);
+ pa &= IOVP_MASK;
+
+- mtsp(sid,1);
+- asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
++ asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
+ pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
+
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
--- /dev/null
+From ea84b580b95521644429cc6748b6c2bf27c8b0f3 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Fri, 30 Nov 2018 14:36:58 -0800
+Subject: pstore: Convert buf_lock to semaphore
+
+From: Kees Cook <keescook@chromium.org>
+
+commit ea84b580b95521644429cc6748b6c2bf27c8b0f3 upstream.
+
+Instead of running with interrupts disabled, use a semaphore. This should
+make it easier for backends that may need to sleep (e.g. EFI) when
+performing a write:
+
+|BUG: sleeping function called from invalid context at kernel/sched/completion.c:99
+|in_atomic(): 1, irqs_disabled(): 1, pid: 2236, name: sig-xstate-bum
+|Preemption disabled at:
+|[<ffffffff99d60512>] pstore_dump+0x72/0x330
+|CPU: 26 PID: 2236 Comm: sig-xstate-bum Tainted: G D 4.20.0-rc3 #45
+|Call Trace:
+| dump_stack+0x4f/0x6a
+| ___might_sleep.cold.91+0xd3/0xe4
+| __might_sleep+0x50/0x90
+| wait_for_completion+0x32/0x130
+| virt_efi_query_variable_info+0x14e/0x160
+| efi_query_variable_store+0x51/0x1a0
+| efivar_entry_set_safe+0xa3/0x1b0
+| efi_pstore_write+0x109/0x140
+| pstore_dump+0x11c/0x330
+| kmsg_dump+0xa4/0xd0
+| oops_exit+0x22/0x30
+...
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Fixes: 21b3ddd39fee ("efi: Don't use spinlocks for efi vars")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/nvram_64.c | 2 -
+ drivers/acpi/apei/erst.c | 1
+ drivers/firmware/efi/efi-pstore.c | 4 ---
+ fs/pstore/platform.c | 44 +++++++++++++++++++-------------------
+ fs/pstore/ram.c | 1
+ include/linux/pstore.h | 7 ++----
+ 6 files changed, 27 insertions(+), 32 deletions(-)
+
+--- a/arch/powerpc/kernel/nvram_64.c
++++ b/arch/powerpc/kernel/nvram_64.c
+@@ -563,8 +563,6 @@ static int nvram_pstore_init(void)
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
+
+- spin_lock_init(&nvram_pstore_info.buf_lock);
+-
+ rc = pstore_register(&nvram_pstore_info);
+ if (rc && (rc != -EPERM))
+ /* Print error only when pstore.backend == nvram */
+--- a/drivers/acpi/apei/erst.c
++++ b/drivers/acpi/apei/erst.c
+@@ -1176,7 +1176,6 @@ static int __init erst_init(void)
+ "Error Record Serialization Table (ERST) support is initialized.\n");
+
+ buf = kmalloc(erst_erange.size, GFP_KERNEL);
+- spin_lock_init(&erst_info.buf_lock);
+ if (buf) {
+ erst_info.buf = buf + sizeof(struct cper_pstore_record);
+ erst_info.bufsize = erst_erange.size -
+--- a/drivers/firmware/efi/efi-pstore.c
++++ b/drivers/firmware/efi/efi-pstore.c
+@@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstor
+ efi_name[i] = name[i];
+
+ ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
+- !pstore_cannot_block_path(record->reason),
+- record->size, record->psi->buf);
++ preemptible(), record->size, record->psi->buf);
+
+ if (record->reason == KMSG_DUMP_OOPS)
+ efivar_run_worker();
+@@ -369,7 +368,6 @@ static __init int efivars_pstore_init(vo
+ return -ENOMEM;
+
+ efi_pstore_info.bufsize = 1024;
+- spin_lock_init(&efi_pstore_info.buf_lock);
+
+ if (pstore_register(&efi_pstore_info)) {
+ kfree(efi_pstore_info.buf);
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -124,26 +124,27 @@ static const char *get_reason_str(enum k
+ }
+ }
+
+-bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
++/*
++ * Should pstore_dump() wait for a concurrent pstore_dump()? If
++ * not, the current pstore_dump() will report a failure to dump
++ * and return.
++ */
++static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
+ {
+- /*
+- * In case of NMI path, pstore shouldn't be blocked
+- * regardless of reason.
+- */
++ /* In NMI path, pstore shouldn't block regardless of reason. */
+ if (in_nmi())
+ return true;
+
+ switch (reason) {
+ /* In panic case, other cpus are stopped by smp_send_stop(). */
+ case KMSG_DUMP_PANIC:
+- /* Emergency restart shouldn't be blocked by spin lock. */
++ /* Emergency restart shouldn't be blocked. */
+ case KMSG_DUMP_EMERG:
+ return true;
+ default:
+ return false;
+ }
+ }
+-EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
+
+ #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
+ static int zbufsize_deflate(size_t size)
+@@ -378,23 +379,23 @@ static void pstore_dump(struct kmsg_dump
+ unsigned long total = 0;
+ const char *why;
+ unsigned int part = 1;
+- unsigned long flags = 0;
+- int is_locked;
+ int ret;
+
+ why = get_reason_str(reason);
+
+- if (pstore_cannot_block_path(reason)) {
+- is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
+- if (!is_locked) {
+- pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
+- , in_nmi() ? "NMI" : why);
++ if (down_trylock(&psinfo->buf_lock)) {
++ /* Failed to acquire lock: give up if we cannot wait. */
++ if (pstore_cannot_wait(reason)) {
++ pr_err("dump skipped in %s path: may corrupt error record\n",
++ in_nmi() ? "NMI" : why);
++ return;
++ }
++ if (down_interruptible(&psinfo->buf_lock)) {
++ pr_err("could not grab semaphore?!\n");
+ return;
+ }
+- } else {
+- spin_lock_irqsave(&psinfo->buf_lock, flags);
+- is_locked = 1;
+ }
++
+ oopscount++;
+ while (total < kmsg_bytes) {
+ char *dst;
+@@ -411,7 +412,7 @@ static void pstore_dump(struct kmsg_dump
+ record.part = part;
+ record.buf = psinfo->buf;
+
+- if (big_oops_buf && is_locked) {
++ if (big_oops_buf) {
+ dst = big_oops_buf;
+ dst_size = big_oops_buf_sz;
+ } else {
+@@ -429,7 +430,7 @@ static void pstore_dump(struct kmsg_dump
+ dst_size, &dump_size))
+ break;
+
+- if (big_oops_buf && is_locked) {
++ if (big_oops_buf) {
+ zipped_len = pstore_compress(dst, psinfo->buf,
+ header_size + dump_size,
+ psinfo->bufsize);
+@@ -452,8 +453,8 @@ static void pstore_dump(struct kmsg_dump
+ total += record.size;
+ part++;
+ }
+- if (is_locked)
+- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
++
++ up(&psinfo->buf_lock);
+ }
+
+ static struct kmsg_dumper pstore_dumper = {
+@@ -572,6 +573,7 @@ int pstore_register(struct pstore_info *
+ psi->write_user = pstore_write_user_compat;
+ psinfo = psi;
+ mutex_init(&psinfo->read_mutex);
++ sema_init(&psinfo->buf_lock, 1);
+ spin_unlock(&pstore_lock);
+
+ if (owner && !try_module_get(owner)) {
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -814,7 +814,6 @@ static int ramoops_probe(struct platform
+ err = -ENOMEM;
+ goto fail_clear;
+ }
+- spin_lock_init(&cxt->pstore.buf_lock);
+
+ cxt->pstore.flags = PSTORE_FLAGS_DMESG;
+ if (cxt->console_size)
+--- a/include/linux/pstore.h
++++ b/include/linux/pstore.h
+@@ -26,7 +26,7 @@
+ #include <linux/errno.h>
+ #include <linux/kmsg_dump.h>
+ #include <linux/mutex.h>
+-#include <linux/spinlock.h>
++#include <linux/semaphore.h>
+ #include <linux/time.h>
+ #include <linux/types.h>
+
+@@ -88,7 +88,7 @@ struct pstore_record {
+ * @owner: module which is repsonsible for this backend driver
+ * @name: name of the backend driver
+ *
+- * @buf_lock: spinlock to serialize access to @buf
++ * @buf_lock: semaphore to serialize access to @buf
+ * @buf: preallocated crash dump buffer
+ * @bufsize: size of @buf available for crash dump bytes (must match
+ * smallest number of bytes available for writing to a
+@@ -173,7 +173,7 @@ struct pstore_info {
+ struct module *owner;
+ char *name;
+
+- spinlock_t buf_lock;
++ struct semaphore buf_lock;
+ char *buf;
+ size_t bufsize;
+
+@@ -199,7 +199,6 @@ struct pstore_info {
+
+ extern int pstore_register(struct pstore_info *);
+ extern void pstore_unregister(struct pstore_info *);
+-extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+
+ struct pstore_ftrace_record {
+ unsigned long ip;
--- /dev/null
+From 8880fa32c557600f5f624084152668ed3c2ea51e Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 30 May 2019 23:37:29 -0700
+Subject: pstore/ram: Run without kernel crash dump region
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 8880fa32c557600f5f624084152668ed3c2ea51e upstream.
+
+The ram pstore backend has always had the crash dumper frontend enabled
+unconditionally. However, it was possible to effectively disable it
+by setting a record_size=0. All the machinery would run (storing dumps
+to the temporary crash buffer), but 0 bytes would ultimately get stored
+due to there being no przs allocated for dumps. Commit 89d328f637b9
+("pstore/ram: Correctly calculate usable PRZ bytes"), however, assumed
+that there would always be at least one allocated dprz for calculating
+the size of the temporary crash buffer. This was, of course, not the
+case when record_size=0, and would lead to a NULL deref trying to find
+the dprz buffer size:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+...
+IP: ramoops_probe+0x285/0x37e (fs/pstore/ram.c:808)
+
+ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+
+Instead, we need to only enable the frontends based on the success of the
+prz initialization and only take the needed actions when those zones are
+available. (This also fixes a possible error in detecting if the ftrace
+frontend should be enabled.)
+
+Reported-and-tested-by: Yaro Slav <yaro330@gmail.com>
+Fixes: 89d328f637b9 ("pstore/ram: Correctly calculate usable PRZ bytes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/platform.c | 3 ++-
+ fs/pstore/ram.c | 36 +++++++++++++++++++++++-------------
+ 2 files changed, 25 insertions(+), 14 deletions(-)
+
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -583,7 +583,8 @@ int pstore_register(struct pstore_info *
+ return -EINVAL;
+ }
+
+- allocate_buf_for_compression();
++ if (psi->flags & PSTORE_FLAGS_DMESG)
++ allocate_buf_for_compression();
+
+ if (pstore_is_mounted())
+ pstore_get_records(0);
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -803,26 +803,36 @@ static int ramoops_probe(struct platform
+
+ cxt->pstore.data = cxt;
+ /*
+- * Since bufsize is only used for dmesg crash dumps, it
+- * must match the size of the dprz record (after PRZ header
+- * and ECC bytes have been accounted for).
++ * Prepare frontend flags based on which areas are initialized.
++ * For ramoops_init_przs() cases, the "max count" variable tells
++ * if there are regions present. For ramoops_init_prz() cases,
++ * the single region size is how to check.
+ */
+- cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+- cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
+- if (!cxt->pstore.buf) {
+- pr_err("cannot allocate pstore crash dump buffer\n");
+- err = -ENOMEM;
+- goto fail_clear;
+- }
+-
+- cxt->pstore.flags = PSTORE_FLAGS_DMESG;
++ cxt->pstore.flags = 0;
++ if (cxt->max_dump_cnt)
++ cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
+ if (cxt->console_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
+- if (cxt->ftrace_size)
++ if (cxt->max_ftrace_cnt)
+ cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
+ if (cxt->pmsg_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
+
++ /*
++ * Since bufsize is only used for dmesg crash dumps, it
++ * must match the size of the dprz record (after PRZ header
++ * and ECC bytes have been accounted for).
++ */
++ if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
++ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
++ cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
++ if (!cxt->pstore.buf) {
++ pr_err("cannot allocate pstore crash dump buffer\n");
++ err = -ENOMEM;
++ goto fail_clear;
++ }
++ }
++
+ err = pstore_register(&cxt->pstore);
+ if (err) {
+ pr_err("registering with pstore failed\n");
--- /dev/null
+From b77fa617a2ff4d6beccad3d3d4b3a1f2d10368aa Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 1 Nov 2018 14:08:07 -0700
+Subject: pstore: Remove needless lock during console writes
+
+From: Kees Cook <keescook@chromium.org>
+
+commit b77fa617a2ff4d6beccad3d3d4b3a1f2d10368aa upstream.
+
+Since the console writer does not use the preallocated crash dump buffer
+any more, there is no reason to perform locking around it.
+
+Fixes: 70ad35db3321 ("pstore: Convert console write to use ->write_buf")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/platform.c | 29 ++++++-----------------------
+ 1 file changed, 6 insertions(+), 23 deletions(-)
+
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -476,31 +476,14 @@ static void pstore_unregister_kmsg(void)
+ #ifdef CONFIG_PSTORE_CONSOLE
+ static void pstore_console_write(struct console *con, const char *s, unsigned c)
+ {
+- const char *e = s + c;
++ struct pstore_record record;
+
+- while (s < e) {
+- struct pstore_record record;
+- unsigned long flags;
++ pstore_record_init(&record, psinfo);
++ record.type = PSTORE_TYPE_CONSOLE;
+
+- pstore_record_init(&record, psinfo);
+- record.type = PSTORE_TYPE_CONSOLE;
+-
+- if (c > psinfo->bufsize)
+- c = psinfo->bufsize;
+-
+- if (oops_in_progress) {
+- if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
+- break;
+- } else {
+- spin_lock_irqsave(&psinfo->buf_lock, flags);
+- }
+- record.buf = (char *)s;
+- record.size = c;
+- psinfo->write(&record);
+- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+- s += c;
+- c = e - s;
+- }
++ record.buf = (char *)s;
++ record.size = c;
++ psinfo->write(&record);
+ }
+
+ static struct console pstore_console = {
--- /dev/null
+From a9fb94a99bb515d8720ba8440ce3aba84aec80f8 Mon Sep 17 00:00:00 2001
+From: Pi-Hsun Shih <pihsun@chromium.org>
+Date: Mon, 20 May 2019 14:51:19 +0800
+Subject: pstore: Set tfm to NULL on free_buf_for_compression
+
+From: Pi-Hsun Shih <pihsun@chromium.org>
+
+commit a9fb94a99bb515d8720ba8440ce3aba84aec80f8 upstream.
+
+Set tfm to NULL on free_buf_for_compression() after crypto_free_comp().
+
+This avoid a use-after-free when allocate_buf_for_compression()
+and free_buf_for_compression() are called twice. Although
+free_buf_for_compression() freed the tfm, allocate_buf_for_compression()
+won't reinitialize the tfm since the tfm pointer is not NULL.
+
+Fixes: 95047b0519c1 ("pstore: Refactor compression initialization")
+Signed-off-by: Pi-Hsun Shih <pihsun@chromium.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/platform.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -324,8 +324,10 @@ static void allocate_buf_for_compression
+
+ static void free_buf_for_compression(void)
+ {
+- if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
++ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
+ crypto_free_comp(tfm);
++ tfm = NULL;
++ }
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ big_oops_buf_sz = 0;
--- /dev/null
+From 66be4e66a7f422128748e3c3ef6ee72b20a6197b Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 3 Jun 2019 13:26:20 -0700
+Subject: rcu: locking and unlocking need to always be at least barriers
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 66be4e66a7f422128748e3c3ef6ee72b20a6197b upstream.
+
+Herbert Xu pointed out that commit bb73c52bad36 ("rcu: Don't disable
+preemption for Tiny and Tree RCU readers") was incorrect in making the
+preempt_disable/enable() be conditional on CONFIG_PREEMPT_COUNT.
+
+If CONFIG_PREEMPT_COUNT isn't enabled, the preemption enable/disable is
+a no-op, but still is a compiler barrier.
+
+And RCU locking still _needs_ that compiler barrier.
+
+It is simply fundamentally not true that RCU locking would be a complete
+no-op: we still need to guarantee (for example) that things that can
+trap and cause preemption cannot migrate into the RCU locked region.
+
+The way we do that is by making it a barrier.
+
+See for example commit 386afc91144b ("spinlocks and preemption points
+need to be at least compiler barriers") from back in 2013 that had
+similar issues with spinlocks that become no-ops on UP: they must still
+constrain the compiler from moving other operations into the critical
+region.
+
+Now, it is true that a lot of RCU operations already use READ_ONCE() and
+WRITE_ONCE() (which in practice likely would never be re-ordered wrt
+anything remotely interesting), but it is also true that that is not
+globally the case, and that it's not even necessarily always possible
+(ie bitfields etc).
+
+Reported-by: Herbert Xu <herbert@gondor.apana.org.au>
+Fixes: bb73c52bad36 ("rcu: Don't disable preemption for Tiny and Tree RCU readers")
+Cc: stable@kernel.org
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/rcupdate.h | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -78,14 +78,12 @@ void synchronize_rcu(void);
+
+ static inline void __rcu_read_lock(void)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+- preempt_disable();
++ preempt_disable();
+ }
+
+ static inline void __rcu_read_unlock(void)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+- preempt_enable();
++ preempt_enable();
+ }
+
+ static inline void synchronize_rcu(void)
--- /dev/null
+From e98f09bcf3691350c589c1770532600132bad960 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sun, 9 Jun 2019 12:02:32 +0200
+Subject: Revert "MIPS: perf: ath79: Fix perfcount IRQ assignment"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit ca8648816e3dcc8dadba0e79a034f61c85eb206d which is
+commit a1e8783db8e0d58891681bc1e6d9ada66eae8e20 upstream.
+
+Petr writes:
+ Karl has reported to me today, that he's experiencing weird
+ reboot hang on his devices with 4.9.180 kernel and that he has
+ bisected it down to my backported patch.
+
+ I would like to kindly ask you for removal of this patch. This
+ patch should be reverted from all stable kernels up to 5.1,
+ because perf counters were not broken on those kernels, and this
+ patch won't work on the ath79 legacy IRQ code anyway, it needs
+ new irqchip driver which was enabled on ath79 with commit
+ 51fa4f8912c0 ("MIPS: ath79: drop legacy IRQ code").
+
+Reported-by: Petr Å tetiar <ynezz@true.cz>
+Cc: Kevin 'ldir' Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
+Cc: John Crispin <john@phrozen.org>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Paul Burton <paul.burton@mips.com>
+Cc: linux-mips@vger.kernel.org
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jason Cooper <jason@lakedaemon.net>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/ath79/setup.c | 6 ++++++
+ drivers/irqchip/irq-ath79-misc.c | 11 -----------
+ 2 files changed, 6 insertions(+), 11 deletions(-)
+
+--- a/arch/mips/ath79/setup.c
++++ b/arch/mips/ath79/setup.c
+@@ -211,6 +211,12 @@ const char *get_system_type(void)
+ return ath79_sys_type;
+ }
+
++int get_c0_perfcount_int(void)
++{
++ return ATH79_MISC_IRQ(5);
++}
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
++
+ unsigned int get_c0_compare_int(void)
+ {
+ return CP0_LEGACY_COMPARE_IRQ;
+--- a/drivers/irqchip/irq-ath79-misc.c
++++ b/drivers/irqchip/irq-ath79-misc.c
+@@ -22,15 +22,6 @@
+ #define AR71XX_RESET_REG_MISC_INT_ENABLE 4
+
+ #define ATH79_MISC_IRQ_COUNT 32
+-#define ATH79_MISC_PERF_IRQ 5
+-
+-static int ath79_perfcount_irq;
+-
+-int get_c0_perfcount_int(void)
+-{
+- return ath79_perfcount_irq;
+-}
+-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+ static void ath79_misc_irq_handler(struct irq_desc *desc)
+ {
+@@ -122,8 +113,6 @@ static void __init ath79_misc_intc_domai
+ {
+ void __iomem *base = domain->host_data;
+
+- ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+-
+ /* Disable and clear all interrupts */
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
--- /dev/null
+From 962f0af83c239c0aef05639631e871c874b00f99 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Mon, 27 May 2019 18:40:19 +0200
+Subject: s390/mm: fix address space detection in exception handling
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit 962f0af83c239c0aef05639631e871c874b00f99 upstream.
+
+Commit 0aaba41b58bc ("s390: remove all code using the access register
+mode") removed access register mode from the kernel, and also from the
+address space detection logic. However, user space could still switch
+to access register mode (trans_exc_code == 1), and exceptions in that
+mode would not be correctly assigned.
+
+Fix this by adding a check for trans_exc_code == 1 to get_fault_type(),
+and remove the wrong comment line before that function.
+
+Fixes: 0aaba41b58bc ("s390: remove all code using the access register mode")
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: <stable@vger.kernel.org> # v4.15+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/mm/fault.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -107,7 +107,6 @@ void bust_spinlocks(int yes)
+
+ /*
+ * Find out which address space caused the exception.
+- * Access register mode is impossible, ignore space == 3.
+ */
+ static inline enum fault_type get_fault_type(struct pt_regs *regs)
+ {
+@@ -132,6 +131,10 @@ static inline enum fault_type get_fault_
+ }
+ return VDSO_FAULT;
+ }
++ if (trans_exc_code == 1) {
++ /* access register mode, not used in the kernel */
++ return USER_FAULT;
++ }
+ /* home space exception -> access via kernel ASCE */
+ return KERNEL_FAULT;
+ }
ipv6-use-read_once-for-inet-hdrincl-as-in-ipv4.patch
ipv6-fix-efault-on-sendto-with-icmpv6-and-hdrincl.patch
mtd-spinand-macronix-fix-ecc-status-read.patch
+rcu-locking-and-unlocking-need-to-always-be-at-least-barriers.patch
+parisc-use-implicit-space-register-selection-for-loading-the-coherence-index-of-i-o-pdirs.patch
+nfsv4.1-again-fix-a-race-where-cb_notify_lock-fails-to-wake-a-waiter.patch
+nfsv4.1-fix-bug-only-first-cb_notify_lock-is-handled.patch
+fuse-fallocate-fix-return-with-locked-inode.patch
+pstore-remove-needless-lock-during-console-writes.patch
+pstore-convert-buf_lock-to-semaphore.patch
+pstore-set-tfm-to-null-on-free_buf_for_compression.patch
+pstore-ram-run-without-kernel-crash-dump-region.patch
+x86-power-fix-nosmt-vs-hibernation-triple-fault-during-resume.patch
+x86-insn-eval-fix-use-after-free-access-to-ldt-entry.patch
+i2c-xiic-add-max_read_len-quirk.patch
+s390-mm-fix-address-space-detection-in-exception-handling.patch
+xen-blkfront-switch-kcalloc-to-kvcalloc-for-large-array-allocation.patch
+mips-bounds-check-virt_addr_valid.patch
+mips-pistachio-build-uimage.gz-by-default.patch
+revert-mips-perf-ath79-fix-perfcount-irq-assignment.patch
+genwqe-prevent-an-integer-overflow-in-the-ioctl.patch
+test_firmware-use-correct-snprintf-limit.patch
+drm-gma500-cdv-check-vbt-config-bits-when-detecting-lvds-panels.patch
+drm-msm-fix-fb-references-in-async-update.patch
--- /dev/null
+From bd17cc5a20ae9aaa3ed775f360b75ff93cd66a1d Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 15 May 2019 12:33:22 +0300
+Subject: test_firmware: Use correct snprintf() limit
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit bd17cc5a20ae9aaa3ed775f360b75ff93cd66a1d upstream.
+
+The limit here is supposed to be how much of the page is left, but it's
+just using PAGE_SIZE as the limit.
+
+The other thing to remember is that snprintf() returns the number of
+bytes which would have been copied if we had had enough room. So that
+means that if we run out of space then this code would end up passing a
+negative value as the limit and the kernel would print an error message.
+I have change the code to use scnprintf() which returns the number of
+bytes that were successfully printed (not counting the NUL terminator).
+
+Fixes: c92316bf8e94 ("test_firmware: add batched firmware tests")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_firmware.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -223,30 +223,30 @@ static ssize_t config_show(struct device
+
+ mutex_lock(&test_fw_mutex);
+
+- len += snprintf(buf, PAGE_SIZE,
++ len += scnprintf(buf, PAGE_SIZE - len,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ if (test_fw_config->name)
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "name:\t%s\n",
+ test_fw_config->name);
+ else
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "name:\tEMTPY\n");
+
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "num_requests:\t%u\n", test_fw_config->num_requests);
+
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "send_uevent:\t\t%s\n",
+ test_fw_config->send_uevent ?
+ "FW_ACTION_HOTPLUG" :
+ "FW_ACTION_NOHOTPLUG");
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "sync_direct:\t\t%s\n",
+ test_fw_config->sync_direct ? "true" : "false");
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+
+ mutex_unlock(&test_fw_mutex);
--- /dev/null
+From de9f869616dd95e95c00bdd6b0fcd3421e8a4323 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Sun, 2 Jun 2019 03:15:58 +0200
+Subject: x86/insn-eval: Fix use-after-free access to LDT entry
+
+From: Jann Horn <jannh@google.com>
+
+commit de9f869616dd95e95c00bdd6b0fcd3421e8a4323 upstream.
+
+get_desc() computes a pointer into the LDT while holding a lock that
+protects the LDT from being freed, but then drops the lock and returns the
+(now potentially dangling) pointer to its caller.
+
+Fix it by giving the caller a copy of the LDT entry instead.
+
+Fixes: 670f928ba09b ("x86/insn-eval: Add utility function to get segment descriptor")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/lib/insn-eval.c | 47 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 24 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/lib/insn-eval.c
++++ b/arch/x86/lib/insn-eval.c
+@@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn
+ }
+
+ /**
+- * get_desc() - Obtain pointer to a segment descriptor
++ * get_desc() - Obtain contents of a segment descriptor
++ * @out: Segment descriptor contents on success
+ * @sel: Segment selector
+ *
+ * Given a segment selector, obtain a pointer to the segment descriptor.
+@@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn
+ *
+ * Returns:
+ *
+- * Pointer to segment descriptor on success.
++ * True on success, false on failure.
+ *
+ * NULL on error.
+ */
+-static struct desc_struct *get_desc(unsigned short sel)
++static bool get_desc(struct desc_struct *out, unsigned short sel)
+ {
+ struct desc_ptr gdt_desc = {0, 0};
+ unsigned long desc_base;
+
+ #ifdef CONFIG_MODIFY_LDT_SYSCALL
+ if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+- struct desc_struct *desc = NULL;
++ bool success = false;
+ struct ldt_struct *ldt;
+
+ /* Bits [15:3] contain the index of the desired entry. */
+@@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsi
+
+ mutex_lock(¤t->active_mm->context.lock);
+ ldt = current->active_mm->context.ldt;
+- if (ldt && sel < ldt->nr_entries)
+- desc = &ldt->entries[sel];
++ if (ldt && sel < ldt->nr_entries) {
++ *out = ldt->entries[sel];
++ success = true;
++ }
+
+ mutex_unlock(¤t->active_mm->context.lock);
+
+- return desc;
++ return success;
+ }
+ #endif
+ native_store_gdt(&gdt_desc);
+@@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsi
+ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
+
+ if (desc_base > gdt_desc.size)
+- return NULL;
++ return false;
+
+- return (struct desc_struct *)(gdt_desc.address + desc_base);
++ *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
++ return true;
+ }
+
+ /**
+@@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsi
+ */
+ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
+ {
+- struct desc_struct *desc;
++ struct desc_struct desc;
+ short sel;
+
+ sel = get_segment_selector(regs, seg_reg_idx);
+@@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct p
+ if (!sel)
+ return -1L;
+
+- desc = get_desc(sel);
+- if (!desc)
++ if (!get_desc(&desc, sel))
+ return -1L;
+
+- return get_desc_base(desc);
++ return get_desc_base(&desc);
+ }
+
+ /**
+@@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct p
+ */
+ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
+ {
+- struct desc_struct *desc;
++ struct desc_struct desc;
+ unsigned long limit;
+ short sel;
+
+@@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struc
+ if (!sel)
+ return 0;
+
+- desc = get_desc(sel);
+- if (!desc)
++ if (!get_desc(&desc, sel))
+ return 0;
+
+ /*
+@@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struc
+ * not tested when checking the segment limits. In practice,
+ * this means that the segment ends in (limit << 12) + 0xfff.
+ */
+- limit = get_desc_limit(desc);
+- if (desc->g)
++ limit = get_desc_limit(&desc);
++ if (desc.g)
+ limit = (limit << 12) + 0xfff;
+
+ return limit;
+@@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struc
+ */
+ int insn_get_code_seg_params(struct pt_regs *regs)
+ {
+- struct desc_struct *desc;
++ struct desc_struct desc;
+ short sel;
+
+ if (v8086_mode(regs))
+@@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_r
+ if (sel < 0)
+ return sel;
+
+- desc = get_desc(sel);
+- if (!desc)
++ if (!get_desc(&desc, sel))
+ return -EINVAL;
+
+ /*
+@@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_r
+ * determines whether a segment contains data or code. If this is a data
+ * segment, return error.
+ */
+- if (!(desc->type & BIT(3)))
++ if (!(desc.type & BIT(3)))
+ return -EINVAL;
+
+- switch ((desc->l << 1) | desc->d) {
++ switch ((desc.l << 1) | desc.d) {
+ case 0: /*
+ * Legacy mode. CS.L=0, CS.D=0. Address and operand size are
+ * both 16-bit.
--- /dev/null
+From ec527c318036a65a083ef68d8ba95789d2212246 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 30 May 2019 00:09:39 +0200
+Subject: x86/power: Fix 'nosmt' vs hibernation triple fault during resume
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit ec527c318036a65a083ef68d8ba95789d2212246 upstream.
+
+As explained in
+
+ 0cc3cd21657b ("cpu/hotplug: Boot HT siblings at least once")
+
+we always, no matter what, have to bring up x86 HT siblings during boot at
+least once in order to avoid first MCE bringing the system to its knees.
+
+That means that whenever 'nosmt' is supplied on the kernel command-line,
+all the HT siblings are as a result sitting in mwait or cpudile after
+going through the online-offline cycle at least once.
+
+This causes a serious issue though when a kernel, which saw 'nosmt' on its
+commandline, is going to perform resume from hibernation: if the resume
+from the hibernated image is successful, cr3 is flipped in order to point
+to the address space of the kernel that is being resumed, which in turn
+means that all the HT siblings are all of a sudden mwaiting on address
+which is no longer valid.
+
+That results in triple fault shortly after cr3 is switched, and machine
+reboots.
+
+Fix this by always waking up all the SMT siblings before initiating the
+'restore from hibernation' process; this guarantees that all the HT
+siblings will be properly carried over to the resumed kernel waiting in
+resume_play_dead(), and acted upon accordingly afterwards, based on the
+target kernel configuration.
+
+Symmetricaly, the resumed kernel has to push the SMT siblings to mwait
+again in case it has SMT disabled; this means it has to online all
+the siblings when resuming (so that they come out of hlt) and offline
+them again to let them reach mwait.
+
+Cc: 4.19+ <stable@vger.kernel.org> # v4.19+
+Debugged-by: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 0cc3cd21657b ("cpu/hotplug: Boot HT siblings at least once")
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/power/cpu.c | 10 ++++++++++
+ arch/x86/power/hibernate_64.c | 33 +++++++++++++++++++++++++++++++++
+ include/linux/cpu.h | 4 ++++
+ kernel/cpu.c | 4 ++--
+ kernel/power/hibernate.c | 9 +++++++++
+ 5 files changed, 58 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable
+ * address in its instruction pointer may not be possible to resolve
+ * any more at that point (the page tables used by it previously may
+ * have been overwritten by hibernate image data).
++ *
++ * First, make sure that we wake up all the potentially disabled SMT
++ * threads which have been initially brought up and then put into
++ * mwait/cpuidle sleep.
++ * Those will be put to proper (not interfering with hibernation
++ * resume) sleep afterwards, and the resumed kernel will decide itself
++ * what to do with them.
+ */
++ ret = cpuhp_smt_enable();
++ if (ret)
++ return ret;
+ smp_ops.play_dead = resume_play_dead;
+ ret = disable_nonboot_cpus();
+ smp_ops.play_dead = play_dead;
+--- a/arch/x86/power/hibernate_64.c
++++ b/arch/x86/power/hibernate_64.c
+@@ -13,6 +13,7 @@
+ #include <linux/suspend.h>
+ #include <linux/scatterlist.h>
+ #include <linux/kdebug.h>
++#include <linux/cpu.h>
+
+ #include <crypto/hash.h>
+
+@@ -363,3 +364,35 @@ int arch_hibernation_header_restore(void
+
+ return 0;
+ }
++
++int arch_resume_nosmt(void)
++{
++ int ret = 0;
++ /*
++ * We reached this while coming out of hibernation. This means
++ * that SMT siblings are sleeping in hlt, as mwait is not safe
++ * against control transition during resume (see comment in
++ * hibernate_resume_nonboot_cpu_disable()).
++ *
++ * If the resumed kernel has SMT disabled, we have to take all the
++ * SMT siblings out of hlt, and offline them again so that they
++ * end up in mwait proper.
++ *
++ * Called with hotplug disabled.
++ */
++ cpu_hotplug_enable();
++ if (cpu_smt_control == CPU_SMT_DISABLED ||
++ cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
++ enum cpuhp_smt_control old = cpu_smt_control;
++
++ ret = cpuhp_smt_enable();
++ if (ret)
++ goto out;
++ ret = cpuhp_smt_disable(old);
++ if (ret)
++ goto out;
++ }
++out:
++ cpu_hotplug_disable();
++ return ret;
++}
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -183,10 +183,14 @@ enum cpuhp_smt_control {
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
+ extern void cpu_smt_check_topology(void);
++extern int cpuhp_smt_enable(void);
++extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
+ static inline void cpu_smt_check_topology(void) { }
++static inline int cpuhp_smt_enable(void) { return 0; }
++static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+ #endif
+
+ /*
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2035,7 +2035,7 @@ static void cpuhp_online_cpu_device(unsi
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ }
+
+-static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
++int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
+
+@@ -2069,7 +2069,7 @@ static int cpuhp_smt_disable(enum cpuhp_
+ return ret;
+ }
+
+-static int cpuhp_smt_enable(void)
++int cpuhp_smt_enable(void)
+ {
+ int cpu, ret = 0;
+
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -258,6 +258,11 @@ void swsusp_show_speed(ktime_t start, kt
+ (kps % 1000) / 10);
+ }
+
++__weak int arch_resume_nosmt(void)
++{
++ return 0;
++}
++
+ /**
+ * create_image - Create a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+@@ -325,6 +330,10 @@ static int create_image(int platform_mod
+ Enable_cpus:
+ enable_nonboot_cpus();
+
++ /* Allow architectures to do nosmt-specific post-resume dances */
++ if (!in_suspend)
++ error = arch_resume_nosmt();
++
+ Platform_finish:
+ platform_finish(platform_mode);
+
--- /dev/null
+From 1d5c76e66433382a1e170d1d5845bb0fed7467aa Mon Sep 17 00:00:00 2001
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Fri, 3 May 2019 17:04:01 +0200
+Subject: xen-blkfront: switch kcalloc to kvcalloc for large array allocation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 1d5c76e66433382a1e170d1d5845bb0fed7467aa upstream.
+
+There's no reason to request physically contiguous memory for those
+allocations.
+
+[boris: added CC to stable]
+
+Cc: stable@vger.kernel.org
+Reported-by: Ian Jackson <ian.jackson@citrix.com>
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/xen-blkfront.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1310,11 +1310,11 @@ static void blkif_free_ring(struct blkfr
+ }
+
+ free_shadow:
+- kfree(rinfo->shadow[i].grants_used);
++ kvfree(rinfo->shadow[i].grants_used);
+ rinfo->shadow[i].grants_used = NULL;
+- kfree(rinfo->shadow[i].indirect_grants);
++ kvfree(rinfo->shadow[i].indirect_grants);
+ rinfo->shadow[i].indirect_grants = NULL;
+- kfree(rinfo->shadow[i].sg);
++ kvfree(rinfo->shadow[i].sg);
+ rinfo->shadow[i].sg = NULL;
+ }
+
+@@ -1353,7 +1353,7 @@ static void blkif_free(struct blkfront_i
+ for (i = 0; i < info->nr_rings; i++)
+ blkif_free_ring(&info->rinfo[i]);
+
+- kfree(info->rinfo);
++ kvfree(info->rinfo);
+ info->rinfo = NULL;
+ info->nr_rings = 0;
+ }
+@@ -1914,9 +1914,9 @@ static int negotiate_mq(struct blkfront_
+ if (!info->nr_rings)
+ info->nr_rings = 1;
+
+- info->rinfo = kcalloc(info->nr_rings,
+- sizeof(struct blkfront_ring_info),
+- GFP_KERNEL);
++ info->rinfo = kvcalloc(info->nr_rings,
++ sizeof(struct blkfront_ring_info),
++ GFP_KERNEL);
+ if (!info->rinfo) {
+ xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ info->nr_rings = 0;
+@@ -2232,17 +2232,17 @@ static int blkfront_setup_indirect(struc
+
+ for (i = 0; i < BLK_RING_SIZE(info); i++) {
+ rinfo->shadow[i].grants_used =
+- kcalloc(grants,
+- sizeof(rinfo->shadow[i].grants_used[0]),
+- GFP_NOIO);
+- rinfo->shadow[i].sg = kcalloc(psegs,
+- sizeof(rinfo->shadow[i].sg[0]),
+- GFP_NOIO);
++ kvcalloc(grants,
++ sizeof(rinfo->shadow[i].grants_used[0]),
++ GFP_NOIO);
++ rinfo->shadow[i].sg = kvcalloc(psegs,
++ sizeof(rinfo->shadow[i].sg[0]),
++ GFP_NOIO);
+ if (info->max_indirect_segments)
+ rinfo->shadow[i].indirect_grants =
+- kcalloc(INDIRECT_GREFS(grants),
+- sizeof(rinfo->shadow[i].indirect_grants[0]),
+- GFP_NOIO);
++ kvcalloc(INDIRECT_GREFS(grants),
++ sizeof(rinfo->shadow[i].indirect_grants[0]),
++ GFP_NOIO);
+ if ((rinfo->shadow[i].grants_used == NULL) ||
+ (rinfo->shadow[i].sg == NULL) ||
+ (info->max_indirect_segments &&
+@@ -2256,11 +2256,11 @@ static int blkfront_setup_indirect(struc
+
+ out_of_memory:
+ for (i = 0; i < BLK_RING_SIZE(info); i++) {
+- kfree(rinfo->shadow[i].grants_used);
++ kvfree(rinfo->shadow[i].grants_used);
+ rinfo->shadow[i].grants_used = NULL;
+- kfree(rinfo->shadow[i].sg);
++ kvfree(rinfo->shadow[i].sg);
+ rinfo->shadow[i].sg = NULL;
+- kfree(rinfo->shadow[i].indirect_grants);
++ kvfree(rinfo->shadow[i].indirect_grants);
+ rinfo->shadow[i].indirect_grants = NULL;
+ }
+ if (!list_empty(&rinfo->indirect_pages)) {