--- /dev/null
+From a8c715b4dd73c26a81a9cc8dc792aa715d8b4bb2 Mon Sep 17 00:00:00 2001
+From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Date: Mon, 13 May 2019 20:28:00 +0300
+Subject: ARC: mm: SIGSEGV userspace trying to access kernel virtual memory
+
+From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+
+commit a8c715b4dd73c26a81a9cc8dc792aa715d8b4bb2 upstream.
+
+As of today if userspace process tries to access a kernel virtual addres
+(0x7000_0000 to 0x7ffff_ffff) such that a legit kernel mapping already
+exists, that process hangs instead of being killed with SIGSEGV
+
+Fix that by ensuring that do_page_fault() handles kenrel vaddr only if
+in kernel mode.
+
+And given this, we can also simplify the code a bit. Now a vmalloc fault
+implies kernel mode so its failure (for some reason) can reuse the
+@no_context label and we can remove @bad_area_nosemaphore.
+
+Reproduce user test for original problem:
+
+------------------------>8-----------------
+ #include <stdlib.h>
+ #include <stdint.h>
+
+ int main(int argc, char *argv[])
+ {
+ volatile uint32_t temp;
+
+ temp = *(uint32_t *)(0x70000000);
+ }
+------------------------>8-----------------
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/mm/fault.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/arch/arc/mm/fault.c
++++ b/arch/arc/mm/fault.c
+@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+- int si_code = 0;
++ int si_code = SEGV_MAPERR;
+ int ret;
+ vm_fault_t fault;
+ int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
+@@ -81,16 +81,14 @@ void do_page_fault(unsigned long address
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+- if (address >= VMALLOC_START) {
++ if (address >= VMALLOC_START && !user_mode(regs)) {
+ ret = handle_kernel_vaddr_fault(address);
+ if (unlikely(ret))
+- goto bad_area_nosemaphore;
++ goto no_context;
+ else
+ return;
+ }
+
+- si_code = SEGV_MAPERR;
+-
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+@@ -198,7 +196,6 @@ good_area:
+ bad_area:
+ up_read(&mm->mmap_sem);
+
+-bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.fault_address = address;
--- /dev/null
+From 7c420636860a719049fae9403e2c87804f53bdde Mon Sep 17 00:00:00 2001
+From: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Date: Tue, 16 Apr 2019 13:46:07 +0200
+Subject: drm/gma500/cdv: Check vbt config bits when detecting lvds panels
+
+From: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+
+commit 7c420636860a719049fae9403e2c87804f53bdde upstream.
+
+Some machines have an lvds child device in vbt even though a panel is
+not attached. To make detection more reliable we now also check the lvds
+config bits available in the vbt.
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1665766
+Cc: stable@vger.kernel.org
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190416114607.1072-1-patrik.r.jakobsson@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/gma500/cdv_intel_lvds.c | 3 +++
+ drivers/gpu/drm/gma500/intel_bios.c | 3 +++
+ drivers/gpu/drm/gma500/psb_drv.h | 1 +
+ 3 files changed, 7 insertions(+)
+
+--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+@@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_devi
+ int pipe;
+ u8 pin;
+
++ if (!dev_priv->lvds_enabled_in_vbt)
++ return;
++
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+--- a/drivers/gpu/drm/gma500/intel_bios.c
++++ b/drivers/gpu/drm/gma500/intel_bios.c
+@@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_pri
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
+
++ dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
++ DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
++
+ /* This bit means to use 96Mhz for DPLL_A or not */
+ if (driver->primary_lfp_id)
+ dev_priv->dplla_96mhz = true;
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -537,6 +537,7 @@ struct drm_psb_private {
+ int lvds_ssc_freq;
+ bool is_lvds_on;
+ bool is_mipi_on;
++ bool lvds_enabled_in_vbt;
+ u32 mipi_ctrl_display;
+
+ unsigned int core_freq;
--- /dev/null
+From 474d952b4870cfbdc55d3498f4d498775fe77e81 Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Mon, 3 Jun 2019 13:56:08 -0300
+Subject: drm/msm: fix fb references in async update
+
+From: Helen Koike <helen.koike@collabora.com>
+
+commit 474d952b4870cfbdc55d3498f4d498775fe77e81 upstream.
+
+Async update callbacks are expected to set the old_fb in the new_state
+so prepare/cleanup framebuffers are balanced.
+
+Cc: <stable@vger.kernel.org> # v4.14+
+Fixes: 224a4c970987 ("drm/msm: update cursors asynchronously through atomic")
+Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Helen Koike <helen.koike@collabora.com>
+Acked-by: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190603165610.24614-4-helen.koike@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+@@ -502,6 +502,8 @@ static int mdp5_plane_atomic_async_check
+ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+ {
++ struct drm_framebuffer *old_fb = plane->state->fb;
++
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+@@ -524,6 +526,8 @@ static void mdp5_plane_atomic_async_upda
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
++
++ new_state->fb = old_fb;
+ }
+
+ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
--- /dev/null
+From d985a3533274ef7dd1ccb25cb05a72259b25268f Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Mon, 3 Jun 2019 13:56:06 -0300
+Subject: drm/rockchip: fix fb references in async update
+
+From: Helen Koike <helen.koike@collabora.com>
+
+commit d985a3533274ef7dd1ccb25cb05a72259b25268f upstream.
+
+In the case of async update, modifications are done in place, i.e. in the
+current plane state, so the new_state is prepared and the new_state is
+cleaned up (instead of the old_state, unlike what happens in a
+normal sync update).
+To cleanup the old_fb properly, it needs to be placed in the new_state
+in the end of async_update, so cleanup call will unreference the old_fb
+correctly.
+
+Also, the previous code had a:
+
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ ...
+ swap(plane_state, plane->state);
+
+ if (plane->state->fb && plane->state->fb != new_state->fb) {
+ ...
+ }
+
+Which was wrong, as the fb were just assigned to be equal, so this if
+statement nevers evaluates to true.
+
+Another details is that the function drm_crtc_vblank_get() can only be
+called when vop->is_enabled is true, otherwise it has no effect and
+trows a WARN_ON().
+
+Calling drm_atomic_set_fb_for_plane() (which get a referent of the new
+fb and pus the old fb) is not required, as it is taken care by
+drm_mode_cursor_universal() when calling
+drm_atomic_helper_update_plane().
+
+Fixes: 15609559a834 ("drm/rockchip: update cursors asynchronously through atomic.")
+Cc: <stable@vger.kernel.org> # v4.20+
+Signed-off-by: Helen Koike <helen.koike@collabora.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190603165610.24614-2-helen.koike@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 49 ++++++++++++++--------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -924,29 +924,17 @@ static void vop_plane_atomic_async_updat
+ struct drm_plane_state *new_state)
+ {
+ struct vop *vop = to_vop(plane->state->crtc);
+- struct drm_plane_state *plane_state;
++ struct drm_framebuffer *old_fb = plane->state->fb;
+
+- plane_state = plane->funcs->atomic_duplicate_state(plane);
+- plane_state->crtc_x = new_state->crtc_x;
+- plane_state->crtc_y = new_state->crtc_y;
+- plane_state->crtc_h = new_state->crtc_h;
+- plane_state->crtc_w = new_state->crtc_w;
+- plane_state->src_x = new_state->src_x;
+- plane_state->src_y = new_state->src_y;
+- plane_state->src_h = new_state->src_h;
+- plane_state->src_w = new_state->src_w;
+-
+- if (plane_state->fb != new_state->fb)
+- drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
+-
+- swap(plane_state, plane->state);
+-
+- if (plane->state->fb && plane->state->fb != new_state->fb) {
+- drm_framebuffer_get(plane->state->fb);
+- WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
+- drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
+- set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+- }
++ plane->state->crtc_x = new_state->crtc_x;
++ plane->state->crtc_y = new_state->crtc_y;
++ plane->state->crtc_h = new_state->crtc_h;
++ plane->state->crtc_w = new_state->crtc_w;
++ plane->state->src_x = new_state->src_x;
++ plane->state->src_y = new_state->src_y;
++ plane->state->src_h = new_state->src_h;
++ plane->state->src_w = new_state->src_w;
++ swap(plane->state->fb, new_state->fb);
+
+ if (vop->is_enabled) {
+ rockchip_drm_psr_inhibit_get_state(new_state->state);
+@@ -955,9 +943,22 @@ static void vop_plane_atomic_async_updat
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+ rockchip_drm_psr_inhibit_put_state(new_state->state);
+- }
+
+- plane->funcs->atomic_destroy_state(plane, plane_state);
++ /*
++ * A scanout can still be occurring, so we can't drop the
++ * reference to the old framebuffer. To solve this we get a
++ * reference to old_fb and set a worker to release it later.
++ * FIXME: if we perform 500 async_update calls before the
++ * vblank, then we can have 500 different framebuffers waiting
++ * to be released.
++ */
++ if (old_fb && plane->state->fb != old_fb) {
++ drm_framebuffer_get(old_fb);
++ WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
++ drm_flip_work_queue(&vop->fb_unref_work, old_fb);
++ set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
++ }
++ }
+ }
+
+ static const struct drm_plane_helper_funcs plane_helper_funcs = {
--- /dev/null
+From c16b85559dcfb5a348cc085a7b4c75ed49b05e2c Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Mon, 3 Jun 2019 13:56:09 -0300
+Subject: drm/vc4: fix fb references in async update
+
+From: Helen Koike <helen.koike@collabora.com>
+
+commit c16b85559dcfb5a348cc085a7b4c75ed49b05e2c upstream.
+
+Async update callbacks are expected to set the old_fb in the new_state
+so prepare/cleanup framebuffers are balanced.
+
+Calling drm_atomic_set_fb_for_plane() (which gets a reference of the new
+fb and put the old fb) is not required, as it's taken care by
+drm_mode_cursor_universal() when calling drm_atomic_helper_update_plane().
+
+Cc: <stable@vger.kernel.org> # v4.19+
+Fixes: 539c320bfa97 ("drm/vc4: update cursors asynchronously through atomic")
+Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Helen Koike <helen.koike@collabora.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190603165610.24614-5-helen.koike@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vc4/vc4_plane.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -968,7 +968,7 @@ static void vc4_plane_atomic_async_updat
+ {
+ struct vc4_plane_state *vc4_state, *new_vc4_state;
+
+- drm_atomic_set_fb_for_plane(plane->state, state->fb);
++ swap(plane->state->fb, state->fb);
+ plane->state->crtc_x = state->crtc_x;
+ plane->state->crtc_y = state->crtc_y;
+ plane->state->crtc_w = state->crtc_w;
--- /dev/null
+From 35d6fcbb7c3e296a52136347346a698a35af3fda Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Mon, 27 May 2019 11:42:07 +0200
+Subject: fuse: fallocate: fix return with locked inode
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 35d6fcbb7c3e296a52136347346a698a35af3fda upstream.
+
+Do the proper cleanup in case the size check fails.
+
+Tested with xfstests:generic/228
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 0cbade024ba5 ("fuse: honor RLIMIT_FSIZE in fuse_file_fallocate")
+Cc: Liu Bo <bo.liu@linux.alibaba.com>
+Cc: <stable@vger.kernel.org> # v3.5
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3050,7 +3050,7 @@ static long fuse_file_fallocate(struct f
+ offset + length > i_size_read(inode)) {
+ err = inode_newsize_ok(inode, offset + length);
+ if (err)
+- return err;
++ goto out;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
--- /dev/null
+From a2bc92362941006830afa3dfad6caec1f99acbf5 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 28 May 2019 13:22:50 +0200
+Subject: fuse: fix copy_file_range() in the writeback case
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit a2bc92362941006830afa3dfad6caec1f99acbf5 upstream.
+
+Prior to sending COPY_FILE_RANGE to userspace filesystem, we must flush all
+dirty pages in both the source and destination files.
+
+This patch adds the missing flush of the source file.
+
+Tested on libfuse-3.5.0 with:
+
+ libfuse/example/passthrough_ll /mnt/fuse/ -o writeback
+ libfuse/test/test_syscalls /mnt/fuse/tmp/test
+
+Fixes: 88bc7d5097a1 ("fuse: add support for copy_file_range()")
+Cc: <stable@vger.kernel.org> # v4.20
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3098,6 +3098,7 @@ static ssize_t fuse_copy_file_range(stru
+ {
+ struct fuse_file *ff_in = file_in->private_data;
+ struct fuse_file *ff_out = file_out->private_data;
++ struct inode *inode_in = file_inode(file_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct fuse_inode *fi_out = get_fuse_inode(inode_out);
+ struct fuse_conn *fc = ff_in->fc;
+@@ -3121,6 +3122,17 @@ static ssize_t fuse_copy_file_range(stru
+ if (fc->no_copy_file_range)
+ return -EOPNOTSUPP;
+
++ if (fc->writeback_cache) {
++ inode_lock(inode_in);
++ err = filemap_write_and_wait_range(inode_in->i_mapping,
++ pos_in, pos_in + len);
++ if (!err)
++ fuse_sync_writes(inode_in);
++ inode_unlock(inode_in);
++ if (err)
++ return err;
++ }
++
+ inode_lock(inode_out);
+
+ if (fc->writeback_cache) {
--- /dev/null
+From 110080cea0d0e4dfdb0b536e7f8a5633ead6a781 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 7 May 2019 11:36:34 +0300
+Subject: genwqe: Prevent an integer overflow in the ioctl
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 110080cea0d0e4dfdb0b536e7f8a5633ead6a781 upstream.
+
+There are a couple potential integer overflows here.
+
+ round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+
+The first thing is that the "m->size + (...)" addition could overflow,
+and the second is that round_up() overflows to zero if the result is
+within PAGE_SIZE of the type max.
+
+In this code, the "m->size" variable is an u64 but we're saving the
+result in "map_size" which is an unsigned long and genwqe_user_vmap()
+takes an unsigned long as well. So I have used ULONG_MAX as the upper
+bound. From a practical perspective unsigned long is fine/better than
+trying to change all the types to u64.
+
+Fixes: eaf4722d4645 ("GenWQE Character device and DDCB queue")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/genwqe/card_dev.c | 2 ++
+ drivers/misc/genwqe/card_utils.c | 4 ++++
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/misc/genwqe/card_dev.c
++++ b/drivers/misc/genwqe/card_dev.c
+@@ -780,6 +780,8 @@ static int genwqe_pin_mem(struct genwqe_
+
+ if ((m->addr == 0x0) || (m->size == 0))
+ return -EINVAL;
++ if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
++ return -EINVAL;
+
+ map_addr = (m->addr & PAGE_MASK);
+ map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
+--- a/drivers/misc/genwqe/card_utils.c
++++ b/drivers/misc/genwqe/card_utils.c
+@@ -586,6 +586,10 @@ int genwqe_user_vmap(struct genwqe_dev *
+ /* determine space needed for page_list. */
+ data = (unsigned long)uaddr;
+ offs = offset_in_page(data);
++ if (size > ULONG_MAX - PAGE_SIZE - offs) {
++ m->size = 0; /* mark unused and not added */
++ return -EINVAL;
++ }
+ m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
+
+ m->page_list = kcalloc(m->nr_pages,
--- /dev/null
+From 8438846cce61e284a22316c13aa4b63772963070 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Sat, 4 May 2019 15:56:08 +0200
+Subject: habanalabs: fix debugfs code
+
+From: Jann Horn <jannh@google.com>
+
+commit 8438846cce61e284a22316c13aa4b63772963070 upstream.
+
+This fixes multiple things in the habanalabs debugfs code, in particular:
+
+ - mmu_write() was unnecessarily verbose, copying around between multiple
+ buffers
+ - mmu_write() could write a user-specified, unbounded amount of userspace
+ memory into a kernel buffer (out-of-bounds write)
+ - multiple debugfs read handlers ignored the user-supplied count,
+ potentially corrupting out-of-bounds userspace data
+ - hl_device_read() was unnecessarily verbose
+ - hl_device_write() could read uninitialized stack memory
+ - multiple debugfs read handlers copied terminating null characters to
+ userspace
+
+Signed-off-by: Jann Horn <jannh@google.com>
+Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
+Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/habanalabs/debugfs.c | 60 +++++++++++---------------------------
+ 1 file changed, 18 insertions(+), 42 deletions(-)
+
+--- a/drivers/misc/habanalabs/debugfs.c
++++ b/drivers/misc/habanalabs/debugfs.c
+@@ -459,41 +459,31 @@ static ssize_t mmu_write(struct file *fi
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+- char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE],
+- addr_kbuf[MMU_ADDR_BUF_SIZE];
++ char kbuf[MMU_KBUF_SIZE];
+ char *c;
+ ssize_t rc;
+
+ if (!hdev->mmu_enable)
+ return count;
+
+- memset(kbuf, 0, sizeof(kbuf));
+- memset(asid_kbuf, 0, sizeof(asid_kbuf));
+- memset(addr_kbuf, 0, sizeof(addr_kbuf));
+-
++ if (count > sizeof(kbuf) - 1)
++ goto err;
+ if (copy_from_user(kbuf, buf, count))
+ goto err;
+-
+- kbuf[MMU_KBUF_SIZE - 1] = 0;
++ kbuf[count] = 0;
+
+ c = strchr(kbuf, ' ');
+ if (!c)
+ goto err;
++ *c = '\0';
+
+- memcpy(asid_kbuf, kbuf, c - kbuf);
+-
+- rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
++ rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
+ if (rc)
+ goto err;
+
+- c = strstr(kbuf, " 0x");
+- if (!c)
++ if (strncmp(c+1, "0x", 2))
+ goto err;
+-
+- c += 3;
+- memcpy(addr_kbuf, c, (kbuf + count) - c);
+-
+- rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
++ rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
+ if (rc)
+ goto err;
+
+@@ -525,10 +515,8 @@ static ssize_t hl_data_read32(struct fil
+ }
+
+ sprintf(tmp_buf, "0x%08x\n", val);
+- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+- strlen(tmp_buf) + 1);
+-
+- return rc;
++ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
++ strlen(tmp_buf));
+ }
+
+ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
+@@ -559,7 +547,6 @@ static ssize_t hl_get_power_state(struct
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+- ssize_t rc;
+ int i;
+
+ if (*ppos)
+@@ -574,10 +561,8 @@ static ssize_t hl_get_power_state(struct
+
+ sprintf(tmp_buf,
+ "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
+- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+- strlen(tmp_buf) + 1);
+-
+- return rc;
++ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
++ strlen(tmp_buf));
+ }
+
+ static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
+@@ -630,8 +615,8 @@ static ssize_t hl_i2c_data_read(struct f
+ }
+
+ sprintf(tmp_buf, "0x%02x\n", val);
+- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+- strlen(tmp_buf) + 1);
++ rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
++ strlen(tmp_buf));
+
+ return rc;
+ }
+@@ -720,18 +705,9 @@ static ssize_t hl_led2_write(struct file
+ static ssize_t hl_device_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+- char tmp_buf[200];
+- ssize_t rc;
+-
+- if (*ppos)
+- return 0;
+-
+- sprintf(tmp_buf,
+- "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
+- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+- strlen(tmp_buf) + 1);
+-
+- return rc;
++ static const char *help =
++ "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
++ return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
+ }
+
+ static ssize_t hl_device_write(struct file *f, const char __user *buf,
+@@ -739,7 +715,7 @@ static ssize_t hl_device_write(struct fi
+ {
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+- char data[30];
++ char data[30] = {0};
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
--- /dev/null
+From 49b809586730a77b57ce620b2f9689de765d790b Mon Sep 17 00:00:00 2001
+From: Robert Hancock <hancock@sedsystems.ca>
+Date: Tue, 4 Jun 2019 15:55:51 -0600
+Subject: i2c: xiic: Add max_read_len quirk
+
+From: Robert Hancock <hancock@sedsystems.ca>
+
+commit 49b809586730a77b57ce620b2f9689de765d790b upstream.
+
+This driver does not support reading more than 255 bytes at once because
+the register for storing the number of bytes to read is only 8 bits. Add
+a max_read_len quirk to enforce this.
+
+This was found when using this driver with the SFP driver, which was
+previously reading all 256 bytes in the SFP EEPROM in one transaction.
+This caused a bunch of hard-to-debug errors in the xiic driver since the
+driver/logic was treating the number of bytes to read as zero.
+Rejecting transactions that aren't supported at least allows the problem
+to be diagnosed more easily.
+
+Signed-off-by: Robert Hancock <hancock@sedsystems.ca>
+Reviewed-by: Michal Simek <michal.simek@xilinx.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-xiic.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -718,11 +718,16 @@ static const struct i2c_algorithm xiic_a
+ .functionality = xiic_func,
+ };
+
++static const struct i2c_adapter_quirks xiic_quirks = {
++ .max_read_len = 255,
++};
++
+ static const struct i2c_adapter xiic_adapter = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .class = I2C_CLASS_DEPRECATED,
+ .algo = &xiic_algorithm,
++ .quirks = &xiic_quirks,
+ };
+
+
--- /dev/null
+From 913ab9780fc021298949cc5514d6255a008e69f9 Mon Sep 17 00:00:00 2001
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+Date: Thu, 6 Jun 2019 13:13:58 +0900
+Subject: kbuild: use more portable 'command -v' for cc-cross-prefix
+
+From: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+commit 913ab9780fc021298949cc5514d6255a008e69f9 upstream.
+
+To print the pathname that will be used by shell in the current
+environment, 'command -v' is a standardized way. [1]
+
+'which' is also often used in scripts, but it is less portable.
+
+When I worked on commit bd55f96fa9fc ("kbuild: refactor cc-cross-prefix
+implementation"), I was eager to use 'command -v' but it did not work.
+(The reason is explained below.)
+
+I kept 'which' as before but got rid of '> /dev/null 2>&1' as I
+thought it was no longer needed. Sorry, I was wrong.
+
+It works well on my Ubuntu machine, but Alexey Brodkin reports noisy
+warnings on CentOS7 when 'which' fails to find the given command in
+the PATH environment.
+
+ $ which foo
+ which: no foo in (/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin)
+
+Given that behavior of 'which' depends on system (and it may not be
+installed by default), I want to try 'command -v' once again.
+
+The specification [1] clearly describes the behavior of 'command -v'
+when the given command is not found:
+
+ Otherwise, no output shall be written and the exit status shall reflect
+ that the name was not found.
+
+However, we need a little magic to use 'command -v' from Make.
+
+$(shell ...) passes the argument to a subshell for execution, and
+returns the standard output of the command.
+
+Here is a trick. GNU Make may optimize this by executing the command
+directly instead of forking a subshell, if no shell special characters
+are found in the command and omitting the subshell will not change the
+behavior.
+
+In this case, no shell special character is used. So, Make will try
+to run it directly. However, 'command' is a shell-builtin command,
+then Make would fail to find it in the PATH environment:
+
+ $ make ARCH=m68k defconfig
+ make: command: Command not found
+ make: command: Command not found
+ make: command: Command not found
+
+In fact, Make has a table of shell-builtin commands because it must
+ask the shell to execute them.
+
+Until recently, 'command' was missing in the table.
+
+This issue was fixed by the following commit:
+
+| commit 1af314465e5dfe3e8baa839a32a72e83c04f26ef
+| Author: Paul Smith <psmith@gnu.org>
+| Date: Sun Nov 12 18:10:28 2017 -0500
+|
+| * job.c: Add "command" as a known shell built-in.
+|
+| This is not a POSIX shell built-in but it's common in UNIX shells.
+| Reported by Nick Bowler <nbowler@draconx.ca>.
+
+Because the latest release is GNU Make 4.2.1 in 2016, this commit is
+not included in any released versions. (But some distributions may
+have back-ported it.)
+
+We need to trick Make to spawn a subshell. There are various ways to
+do so:
+
+ 1) Use a shell special character '~' as dummy
+
+ $(shell : ~; command -v $(c)gcc)
+
+ 2) Use a variable reference that always expands to the empty string
+ (suggested by David Laight)
+
+ $(shell command$${x:+} -v $(c)gcc)
+
+ 3) Use redirect
+
+ $(shell command -v $(c)gcc 2>/dev/null)
+
+I chose 3) to not confuse people. The stderr would not be polluted
+anyway, but it will provide extra safety, and is easy to understand.
+
+Tested on Make 3.81, 3.82, 4.0, 4.1, 4.2, 4.2.1
+
+[1] http://pubs.opengroup.org/onlinepubs/9699919799/utilities/command.html
+
+Fixes: bd55f96fa9fc ("kbuild: refactor cc-cross-prefix implementation")
+Cc: linux-stable <stable@vger.kernel.org> # 5.1
+Reported-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Tested-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/Kbuild.include | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -73,8 +73,13 @@ endef
+ # Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-)
+ # Return first <prefix> where a <prefix>gcc is found in PATH.
+ # If no gcc found in PATH with listed prefixes return nothing
++#
++# Note: '2>/dev/null' is here to force Make to invoke a shell. Otherwise, it
++# would try to directly execute the shell builtin 'command'. This workaround
++# should be kept for a long time since this issue was fixed only after the
++# GNU Make 4.2.1 release.
+ cc-cross-prefix = $(firstword $(foreach c, $(filter-out -%, $(1)), \
+- $(if $(shell which $(c)gcc), $(c))))
++ $(if $(shell command -v $(c)gcc 2>/dev/null), $(c))))
+
+ # output directory for tests below
+ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
--- /dev/null
+From 61009f82a93f7c0b33cd9b3b263a6ab48f8b49d4 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 10 May 2019 14:24:41 +0300
+Subject: memstick: mspro_block: Fix an error code in mspro_block_issue_req()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 61009f82a93f7c0b33cd9b3b263a6ab48f8b49d4 upstream.
+
+We accidentally changed the error code from -EAGAIN to 1 when we did the
+blk-mq conversion.
+
+Maybe a contributing factor to this mistake is that it wasn't obvious
+that the "while (chunk) {" condition is always true. I have cleaned
+that up as well.
+
+Fixes: d0be12274dad ("mspro_block: convert to blk-mq")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/memstick/core/mspro_block.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -694,13 +694,13 @@ static void h_mspro_block_setup_cmd(stru
+
+ /*** Data transfer ***/
+
+-static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
++static int mspro_block_issue_req(struct memstick_dev *card)
+ {
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ u64 t_off;
+ unsigned int count;
+
+- while (chunk) {
++ while (true) {
+ msb->current_page = 0;
+ msb->current_seg = 0;
+ msb->seg_count = blk_rq_map_sg(msb->block_req->q,
+@@ -709,6 +709,7 @@ static int mspro_block_issue_req(struct
+
+ if (!msb->seg_count) {
+ unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
++ bool chunk;
+
+ chunk = blk_update_request(msb->block_req,
+ BLK_STS_RESOURCE,
+@@ -718,7 +719,7 @@ static int mspro_block_issue_req(struct
+ __blk_mq_end_request(msb->block_req,
+ BLK_STS_RESOURCE);
+ msb->block_req = NULL;
+- break;
++ return -EAGAIN;
+ }
+
+ t_off = blk_rq_pos(msb->block_req);
+@@ -735,8 +736,6 @@ static int mspro_block_issue_req(struct
+ memstick_new_req(card->host);
+ return 0;
+ }
+-
+- return 1;
+ }
+
+ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+@@ -779,7 +778,7 @@ static int mspro_block_complete_req(stru
+ chunk = blk_update_request(msb->block_req,
+ errno_to_blk_status(error), t_len);
+ if (chunk) {
+- error = mspro_block_issue_req(card, chunk);
++ error = mspro_block_issue_req(card);
+ if (!error)
+ goto out;
+ } else {
+@@ -849,7 +848,7 @@ static blk_status_t mspro_queue_rq(struc
+ msb->block_req = bd->rq;
+ blk_mq_start_request(bd->rq);
+
+- if (mspro_block_issue_req(card, true))
++ if (mspro_block_issue_req(card))
+ msb->block_req = NULL;
+
+ spin_unlock_irq(&msb->q_lock);
--- /dev/null
+From 074a1e1167afd82c26f6d03a9a8b997d564bb241 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 28 May 2019 17:05:03 +0000
+Subject: MIPS: Bounds check virt_addr_valid
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit 074a1e1167afd82c26f6d03a9a8b997d564bb241 upstream.
+
+The virt_addr_valid() function is meant to return true iff
+virt_to_page() will return a valid struct page reference. This is true
+iff the address provided is found within the unmapped address range
+between PAGE_OFFSET & MAP_BASE, but we don't currently check for that
+condition. Instead we simply mask the address to obtain what will be a
+physical address if the virtual address is indeed in the desired range,
+shift it to form a PFN & then call pfn_valid(). This can incorrectly
+return true if called with a virtual address which, after masking,
+happens to form a physical address corresponding to a valid PFN.
+
+For example we may vmalloc an address in the kernel mapped region
+starting a MAP_BASE & obtain the virtual address:
+
+ addr = 0xc000000000002000
+
+When masked by virt_to_phys(), which uses __pa() & in turn CPHYSADDR(),
+we obtain the following (bogus) physical address:
+
+ addr = 0x2000
+
+In a common system with PHYS_OFFSET=0 this will correspond to a valid
+struct page which should really be accessed by virtual address
+PAGE_OFFSET+0x2000, causing virt_addr_valid() to incorrectly return 1
+indicating that the original address corresponds to a struct page.
+
+This is equivalent to the ARM64 change made in commit ca219452c6b8
+("arm64: Correctly bounds check virt_addr_valid").
+
+This fixes fallout when hardened usercopy is enabled caused by the
+related commit 517e1fbeb65f ("mm/usercopy: Drop extra
+is_vmalloc_or_module() check") which removed a check for the vmalloc
+range that was present from the introduction of the hardened usercopy
+feature.
+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+References: ca219452c6b8 ("arm64: Correctly bounds check virt_addr_valid")
+References: 517e1fbeb65f ("mm/usercopy: Drop extra is_vmalloc_or_module() check")
+Reported-by: Julien Cristau <jcristau@debian.org>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Tested-by: YunQiang Su <ysu@wavecomp.com>
+URL: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=929366
+Cc: stable@vger.kernel.org # v4.12+
+Cc: linux-mips@vger.kernel.org
+Cc: Yunqiang Su <ysu@wavecomp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/mmap.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct
+
+ int __virt_addr_valid(const volatile void *kaddr)
+ {
++ unsigned long vaddr = (unsigned long)vaddr;
++
++ if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
++ return 0;
++
+ return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+ }
+ EXPORT_SYMBOL_GPL(__virt_addr_valid);
--- /dev/null
+From e4f2d1af7163becb181419af9dece9206001e0a6 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@mips.com>
+Date: Tue, 28 May 2019 17:21:26 +0000
+Subject: MIPS: pistachio: Build uImage.gz by default
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paul Burton <paul.burton@mips.com>
+
+commit e4f2d1af7163becb181419af9dece9206001e0a6 upstream.
+
+The pistachio platform uses the U-Boot bootloader & generally boots a
+kernel in the uImage format. As such it's useful to build one when
+building the kernel, but to do so currently requires the user to
+manually specify a uImage target on the make command line.
+
+Make uImage.gz the pistachio platform's default build target, so that
+the default is to build a kernel image that we can actually boot on a
+board such as the MIPS Creator Ci40.
+
+Marked for stable backport as far as v4.1 where pistachio support was
+introduced. This is primarily useful for CI systems such as kernelci.org
+which will benefit from us building a suitable image which can then be
+booted as part of automated testing, extending our test coverage to the
+affected stable branches.
+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Reviewed-by: Kevin Hilman <khilman@baylibre.com>
+Tested-by: Kevin Hilman <khilman@baylibre.com>
+URL: https://groups.io/g/kernelci/message/388
+Cc: stable@vger.kernel.org # v4.1+
+Cc: linux-mips@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/pistachio/Platform | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/pistachio/Platform
++++ b/arch/mips/pistachio/Platform
+@@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
+ -I$(srctree)/arch/mips/include/asm/mach-pistachio
+ load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
+ zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
++all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
--- /dev/null
+From 7397993145872c74871ab2aa7fa26a427144088a Mon Sep 17 00:00:00 2001
+From: Faiz Abbas <faiz_abbas@ti.com>
+Date: Tue, 28 May 2019 15:29:26 +0530
+Subject: mmc: sdhci_am654: Fix SLOTTYPE write
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+commit 7397993145872c74871ab2aa7fa26a427144088a upstream.
+
+In the call to regmap_update_bits() for SLOTTYPE, the mask and value
+fields are exchanged. Fix this.
+
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Fixes: 41fd4caeb00b ("mmc: sdhci_am654: Add Initial Support for AM654 SDHCI driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci_am654.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -209,7 +209,7 @@ static int sdhci_am654_init(struct sdhci
+ ctl_cfg_2 = SLOTTYPE_EMBEDDED;
+
+ regmap_update_bits(sdhci_am654->base, CTL_CFG_2,
+- ctl_cfg_2, SLOTTYPE_MASK);
++ SLOTTYPE_MASK, ctl_cfg_2);
+
+ return sdhci_add_host(host);
+ }
--- /dev/null
+From 51b72656bb39fdcb8f3174f4007bcc83ad1d275f Mon Sep 17 00:00:00 2001
+From: Takeshi Saito <takeshi.saito.xv@renesas.com>
+Date: Wed, 15 May 2019 20:23:46 +0200
+Subject: mmc: tmio: fix SCC error handling to avoid false positive CRC error
+
+From: Takeshi Saito <takeshi.saito.xv@renesas.com>
+
+commit 51b72656bb39fdcb8f3174f4007bcc83ad1d275f upstream.
+
+If an SCC error occurs during a read/write command execution, a false
+positive CRC error message is output.
+
+mmcblk0: response CRC error sending r/w cmd command, card status 0x900
+
+check_scc_error() checks SCC_RVSREQ.RVSERR bit. RVSERR detects a
+correction error in the next (up or down) delay tap position. However,
+since the command is successful, only retuning needs to be executed.
+This has been confirmed by HW engineers.
+
+Thus, on SCC error, set retuning flag instead of setting an error code.
+
+Fixes: b85fb0a1c8ae ("mmc: tmio: Fix SCC error detection")
+Signed-off-by: Takeshi Saito <takeshi.saito.xv@renesas.com>
+[wsa: updated comment and commit message, removed some braces]
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/tmio_mmc_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -842,8 +842,9 @@ static void tmio_mmc_finish_request(stru
+ if (mrq->cmd->error || (mrq->data && mrq->data->error))
+ tmio_mmc_abort_dma(host);
+
++ /* SCC error means retune, but executed command was still successful */
+ if (host->check_scc_error && host->check_scc_error(host))
+- mrq->cmd->error = -EILSEQ;
++ mmc_retune_needed(host->mmc);
+
+ /* If SET_BLOCK_COUNT, continue with main command */
+ if (host->mrq && !mrq->cmd->error) {
--- /dev/null
+From 52b042ab9948cc367b61f9ca9c18603aa7813c3a Mon Sep 17 00:00:00 2001
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+Date: Wed, 22 May 2019 01:57:10 +0800
+Subject: NFSv4.1: Again fix a race where CB_NOTIFY_LOCK fails to wake a waiter
+
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+
+commit 52b042ab9948cc367b61f9ca9c18603aa7813c3a upstream.
+
+Commit b7dbcc0e433f "NFSv4.1: Fix a race where CB_NOTIFY_LOCK fails to wake a waiter"
+found this bug. However it didn't fix it.
+
+This commit replaces schedule_timeout() with wait_woken() and
+default_wake_function() with woken_wake_function() in function
+nfs4_retry_setlk() and nfs4_wake_lock_waiter(). wait_woken() uses
+memory barriers in its implementation to avoid potential race condition
+when putting a process into sleeping state and then waking it up.
+
+Fixes: a1d617d8f134 ("nfs: allow blocking locks to be awoken by lock callbacks")
+Cc: stable@vger.kernel.org #4.9+
+Signed-off-by: Yihao Wu <wuyihao@linux.alibaba.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 24 +++++++-----------------
+ 1 file changed, 7 insertions(+), 17 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6867,7 +6867,6 @@ struct nfs4_lock_waiter {
+ struct task_struct *task;
+ struct inode *inode;
+ struct nfs_lowner *owner;
+- bool notified;
+ };
+
+ static int
+@@ -6889,13 +6888,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t
+ /* Make sure it's for the right inode */
+ if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
+ return 0;
+-
+- waiter->notified = true;
+ }
+
+ /* override "private" so we can use default_wake_function */
+ wait->private = waiter->task;
+- ret = autoremove_wake_function(wait, mode, flags, key);
++ ret = woken_wake_function(wait, mode, flags, key);
++ if (ret)
++ list_del_init(&wait->entry);
+ wait->private = waiter;
+ return ret;
+ }
+@@ -6904,7 +6903,6 @@ static int
+ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+ int status = -ERESTARTSYS;
+- unsigned long flags;
+ struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs_client *clp = server->nfs_client;
+@@ -6914,8 +6912,7 @@ nfs4_retry_setlk(struct nfs4_state *stat
+ .s_dev = server->s_dev };
+ struct nfs4_lock_waiter waiter = { .task = current,
+ .inode = state->inode,
+- .owner = &owner,
+- .notified = false };
++ .owner = &owner};
+ wait_queue_entry_t wait;
+
+ /* Don't bother with waitqueue if we don't expect a callback */
+@@ -6928,21 +6925,14 @@ nfs4_retry_setlk(struct nfs4_state *stat
+ add_wait_queue(q, &wait);
+
+ while(!signalled()) {
+- waiter.notified = false;
+ status = nfs4_proc_setlk(state, cmd, request);
+ if ((status != -EAGAIN) || IS_SETLK(cmd))
+ break;
+
+ status = -ERESTARTSYS;
+- spin_lock_irqsave(&q->lock, flags);
+- if (waiter.notified) {
+- spin_unlock_irqrestore(&q->lock, flags);
+- continue;
+- }
+- set_current_state(TASK_INTERRUPTIBLE);
+- spin_unlock_irqrestore(&q->lock, flags);
+-
+- freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
++ freezer_do_not_count();
++ wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
++ freezer_count();
+ }
+
+ finish_wait(q, &wait);
--- /dev/null
+From ba851a39c9703f09684a541885ed176f8fb7c868 Mon Sep 17 00:00:00 2001
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+Date: Mon, 13 May 2019 14:58:22 +0800
+Subject: NFSv4.1: Fix bug only first CB_NOTIFY_LOCK is handled
+
+From: Yihao Wu <wuyihao@linux.alibaba.com>
+
+commit ba851a39c9703f09684a541885ed176f8fb7c868 upstream.
+
+When a waiter is waked by CB_NOTIFY_LOCK, it will retry
+nfs4_proc_setlk(). The waiter may fail to nfs4_proc_setlk() and sleep
+again. However, the waiter is already removed from clp->cl_lock_waitq
+when handling CB_NOTIFY_LOCK in nfs4_wake_lock_waiter(). So any
+subsequent CB_NOTIFY_LOCK won't wake this waiter anymore. We should
+put the waiter back to clp->cl_lock_waitq before retrying.
+
+Cc: stable@vger.kernel.org #4.9+
+Signed-off-by: Yihao Wu <wuyihao@linux.alibaba.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6922,20 +6922,22 @@ nfs4_retry_setlk(struct nfs4_state *stat
+ init_wait(&wait);
+ wait.private = &waiter;
+ wait.func = nfs4_wake_lock_waiter;
+- add_wait_queue(q, &wait);
+
+ while(!signalled()) {
++ add_wait_queue(q, &wait);
+ status = nfs4_proc_setlk(state, cmd, request);
+- if ((status != -EAGAIN) || IS_SETLK(cmd))
++ if ((status != -EAGAIN) || IS_SETLK(cmd)) {
++ finish_wait(q, &wait);
+ break;
++ }
+
+ status = -ERESTARTSYS;
+ freezer_do_not_count();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
+ freezer_count();
++ finish_wait(q, &wait);
+ }
+
+- finish_wait(q, &wait);
+ return status;
+ }
+ #else /* !CONFIG_NFS_V4_1 */
--- /dev/null
+From 5651cd3c43368873d0787b52acb2e0e08f3c5da4 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagi@grimberg.me>
+Date: Tue, 28 May 2019 22:49:04 -0700
+Subject: nvme-rdma: fix queue mapping when queue count is limited
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+commit 5651cd3c43368873d0787b52acb2e0e08f3c5da4 upstream.
+
+When the controller supports less queues than requested, we
+should make sure that queue mapping does the right thing and
+not assume that all queues are available. This fixes a crash
+when the controller supports less queues than requested.
+
+The rules are:
+1. if no write/poll queues are requested, we assign the available queues
+ to the default queue map. The default and read queue maps share the
+ existing queues.
+2. if write queues are requested:
+ - first make sure that read queue map gets the requested
+ nr_io_queues count
+ - then grant the default queue map the minimum between the requested
+ nr_write_queues and the remaining queues. If there are no available
+ queues to dedicate to the default queue map, fallback to (1) and
+ share all the queues in the existing queue map.
+3. if poll queues are requested:
+ - map the remaining queues to the poll queue map.
+
+Also, provide a log indication on how we constructed the different
+queue maps.
+
+Reported-by: Harris, James R <james.r.harris@intel.com>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Tested-by: Jim Harris <james.r.harris@intel.com>
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/rdma.c | 99 ++++++++++++++++++++++++++++-------------------
+ 1 file changed, 61 insertions(+), 38 deletions(-)
+
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -641,34 +641,16 @@ static int nvme_rdma_alloc_io_queues(str
+ {
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ struct ib_device *ibdev = ctrl->device->dev;
+- unsigned int nr_io_queues;
++ unsigned int nr_io_queues, nr_default_queues;
++ unsigned int nr_read_queues, nr_poll_queues;
+ int i, ret;
+
+- nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+-
+- /*
+- * we map queues according to the device irq vectors for
+- * optimal locality so we don't need more queues than
+- * completion vectors.
+- */
+- nr_io_queues = min_t(unsigned int, nr_io_queues,
+- ibdev->num_comp_vectors);
+-
+- if (opts->nr_write_queues) {
+- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+- min(opts->nr_write_queues, nr_io_queues);
+- nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
+- } else {
+- ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
+- }
+-
+- ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
+-
+- if (opts->nr_poll_queues) {
+- ctrl->io_queues[HCTX_TYPE_POLL] =
+- min(opts->nr_poll_queues, num_online_cpus());
+- nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
+- }
++ nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
++ min(opts->nr_io_queues, num_online_cpus()));
++ nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
++ min(opts->nr_write_queues, num_online_cpus()));
++ nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
++ nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret)
+@@ -681,6 +663,34 @@ static int nvme_rdma_alloc_io_queues(str
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", nr_io_queues);
+
++ if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
++ /*
++ * separate read/write queues
++ * hand out dedicated default queues only after we have
++ * sufficient read queues.
++ */
++ ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
++ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
++ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
++ min(nr_default_queues, nr_io_queues);
++ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
++ } else {
++ /*
++ * shared read/write queues
++ * either no write queues were requested, or we don't have
++ * sufficient queue count to have dedicated default queues.
++ */
++ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
++ min(nr_read_queues, nr_io_queues);
++ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
++ }
++
++ if (opts->nr_poll_queues && nr_io_queues) {
++ /* map dedicated poll queues only if we have queues left */
++ ctrl->io_queues[HCTX_TYPE_POLL] =
++ min(nr_poll_queues, nr_io_queues);
++ }
++
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ ret = nvme_rdma_alloc_queue(ctrl, i,
+ ctrl->ctrl.sqsize + 1);
+@@ -1787,17 +1797,24 @@ static void nvme_rdma_complete_rq(struct
+ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+ {
+ struct nvme_rdma_ctrl *ctrl = set->driver_data;
++ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+
+- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+- set->map[HCTX_TYPE_DEFAULT].nr_queues =
+- ctrl->io_queues[HCTX_TYPE_DEFAULT];
+- set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
+- if (ctrl->ctrl.opts->nr_write_queues) {
++ if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
+ /* separate read/write queues */
++ set->map[HCTX_TYPE_DEFAULT].nr_queues =
++ ctrl->io_queues[HCTX_TYPE_DEFAULT];
++ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
++ set->map[HCTX_TYPE_READ].nr_queues =
++ ctrl->io_queues[HCTX_TYPE_READ];
+ set->map[HCTX_TYPE_READ].queue_offset =
+- ctrl->io_queues[HCTX_TYPE_DEFAULT];
++ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+- /* mixed read/write queues */
++ /* shared read/write queues */
++ set->map[HCTX_TYPE_DEFAULT].nr_queues =
++ ctrl->io_queues[HCTX_TYPE_DEFAULT];
++ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
++ set->map[HCTX_TYPE_READ].nr_queues =
++ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_READ].queue_offset = 0;
+ }
+ blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+@@ -1805,16 +1822,22 @@ static int nvme_rdma_map_queues(struct b
+ blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
+ ctrl->device->dev, 0);
+
+- if (ctrl->ctrl.opts->nr_poll_queues) {
++ if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
++ /* map dedicated poll queues only if we have queues left */
+ set->map[HCTX_TYPE_POLL].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_POLL];
+ set->map[HCTX_TYPE_POLL].queue_offset =
+- ctrl->io_queues[HCTX_TYPE_DEFAULT];
+- if (ctrl->ctrl.opts->nr_write_queues)
+- set->map[HCTX_TYPE_POLL].queue_offset +=
+- ctrl->io_queues[HCTX_TYPE_READ];
++ ctrl->io_queues[HCTX_TYPE_DEFAULT] +
++ ctrl->io_queues[HCTX_TYPE_READ];
+ blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
+ }
++
++ dev_info(ctrl->ctrl.device,
++ "mapped %d/%d/%d default/read/poll queues.\n",
++ ctrl->io_queues[HCTX_TYPE_DEFAULT],
++ ctrl->io_queues[HCTX_TYPE_READ],
++ ctrl->io_queues[HCTX_TYPE_POLL]);
++
+ return 0;
+ }
+
--- /dev/null
+From 527a1d1ede98479bf90c31a64822107ac7e6d276 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 27 May 2019 21:20:00 +0200
+Subject: parisc: Fix crash due alternative coding for NP iopdir_fdc bit
+
+From: Helge Deller <deller@gmx.de>
+
+commit 527a1d1ede98479bf90c31a64822107ac7e6d276 upstream.
+
+According to the found documentation, data cache flushes and sync
+instructions are needed on the PCX-U+ (PA8200, e.g. C200/C240)
+platforms, while PCX-W (PA8500, e.g. C360) platforms aparently don't
+need those flushes when changing the IO PDIR data structures.
+
+We have no documentation for PCX-W+ (PA8600) and PCX-W2 (PA8700) CPUs,
+but Carlo Pisani reported that his C3600 machine (PA8600, PCX-W+) fails
+when the fdc instructions were removed. His firmware didn't set the NIOP
+bit, so one may assume it's a firmware bug since other C3750 machines
+had the bit set.
+
+Even if documentation (as mentioned above) states that PCX-W (PA8500,
+e.g. J5000) does not need fdc flushes, Sven could show that an Adaptec
+29320A PCI-X SCSI controller reliably failed on a dd command during the
+first five minutes in his J5000 when fdc flushes were missing.
+
+Going forward, we will now NOT replace the fdc and sync assembler
+instructions by NOPS if:
+a) the NP iopdir_fdc bit was set by firmware, or
+b) we find a CPU up to and including a PCX-W+ (PA8600).
+
+This fixes the HPMC crashes on a C240 and C36XX machines. For other
+machines we rely on the firmware to set the bit when needed.
+
+In case one finds HPMC issues, people could try to boot their machines
+with the "no-alternatives" kernel option to turn off any alternative
+patching.
+
+Reported-by: Sven Schnelle <svens@stackframe.org>
+Reported-by: Carlo Pisani <carlojpisani@gmail.com>
+Tested-by: Sven Schnelle <svens@stackframe.org>
+Fixes: 3847dab77421 ("parisc: Add alternative coding infrastructure")
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: stable@vger.kernel.org # 5.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/alternative.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/kernel/alternative.c
++++ b/arch/parisc/kernel/alternative.c
+@@ -56,7 +56,8 @@ void __init_or_module apply_alternatives
+ * time IO-PDIR is changed in Ike/Astro.
+ */
+ if ((cond & ALT_COND_NO_IOC_FDC) &&
+- (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC))
++ ((boot_cpu_data.cpu_type <= pcxw_) ||
++ (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
+ continue;
+
+ /* Want to replace pdtlb by a pdtlb,l instruction? */
--- /dev/null
+From 63923d2c3800919774f5c651d503d1dd2adaddd5 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Mon, 27 May 2019 20:15:14 -0400
+Subject: parisc: Use implicit space register selection for loading the coherence index of I/O pdirs
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 63923d2c3800919774f5c651d503d1dd2adaddd5 upstream.
+
+We only support I/O to kernel space. Using %sr1 to load the coherence
+index may be racy unless interrupts are disabled. This patch changes the
+code used to load the coherence index to use implicit space register
+selection. This saves one instruction and eliminates the race.
+
+Tested on rp3440, c8000 and c3750.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/parisc/ccio-dma.c | 4 +---
+ drivers/parisc/sba_iommu.c | 3 +--
+ 2 files changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_
+ /* We currently only support kernel addresses */
+ BUG_ON(sid != KERNEL_SPACE);
+
+- mtsp(sid,1);
+-
+ /*
+ ** WORD 1 - low order word
+ ** "hints" parm includes the VALID bit!
+@@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_
+ ** Grab virtual index [0:11]
+ ** Deposit virt_idx bits into I/O PDIR word
+ */
+- asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
++ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
+ asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
+
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t
+ pa = virt_to_phys(vba);
+ pa &= IOVP_MASK;
+
+- mtsp(sid,1);
+- asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
++ asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
+ pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
+
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
--- /dev/null
+From 8880fa32c557600f5f624084152668ed3c2ea51e Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 30 May 2019 23:37:29 -0700
+Subject: pstore/ram: Run without kernel crash dump region
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 8880fa32c557600f5f624084152668ed3c2ea51e upstream.
+
+The ram pstore backend has always had the crash dumper frontend enabled
+unconditionally. However, it was possible to effectively disable it
+by setting a record_size=0. All the machinery would run (storing dumps
+to the temporary crash buffer), but 0 bytes would ultimately get stored
+due to there being no przs allocated for dumps. Commit 89d328f637b9
+("pstore/ram: Correctly calculate usable PRZ bytes"), however, assumed
+that there would always be at least one allocated dprz for calculating
+the size of the temporary crash buffer. This was, of course, not the
+case when record_size=0, and would lead to a NULL deref trying to find
+the dprz buffer size:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+...
+IP: ramoops_probe+0x285/0x37e (fs/pstore/ram.c:808)
+
+ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+
+Instead, we need to only enable the frontends based on the success of the
+prz initialization and only take the needed actions when those zones are
+available. (This also fixes a possible error in detecting if the ftrace
+frontend should be enabled.)
+
+Reported-and-tested-by: Yaro Slav <yaro330@gmail.com>
+Fixes: 89d328f637b9 ("pstore/ram: Correctly calculate usable PRZ bytes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/platform.c | 3 ++-
+ fs/pstore/ram.c | 36 +++++++++++++++++++++++-------------
+ 2 files changed, 25 insertions(+), 14 deletions(-)
+
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -608,7 +608,8 @@ int pstore_register(struct pstore_info *
+ return -EINVAL;
+ }
+
+- allocate_buf_for_compression();
++ if (psi->flags & PSTORE_FLAGS_DMESG)
++ allocate_buf_for_compression();
+
+ if (pstore_is_mounted())
+ pstore_get_records(0);
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -800,26 +800,36 @@ static int ramoops_probe(struct platform
+
+ cxt->pstore.data = cxt;
+ /*
+- * Since bufsize is only used for dmesg crash dumps, it
+- * must match the size of the dprz record (after PRZ header
+- * and ECC bytes have been accounted for).
++ * Prepare frontend flags based on which areas are initialized.
++ * For ramoops_init_przs() cases, the "max count" variable tells
++ * if there are regions present. For ramoops_init_prz() cases,
++ * the single region size is how to check.
+ */
+- cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+- cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
+- if (!cxt->pstore.buf) {
+- pr_err("cannot allocate pstore crash dump buffer\n");
+- err = -ENOMEM;
+- goto fail_clear;
+- }
+-
+- cxt->pstore.flags = PSTORE_FLAGS_DMESG;
++ cxt->pstore.flags = 0;
++ if (cxt->max_dump_cnt)
++ cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
+ if (cxt->console_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
+- if (cxt->ftrace_size)
++ if (cxt->max_ftrace_cnt)
+ cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
+ if (cxt->pmsg_size)
+ cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
+
++ /*
++ * Since bufsize is only used for dmesg crash dumps, it
++ * must match the size of the dprz record (after PRZ header
++ * and ECC bytes have been accounted for).
++ */
++ if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
++ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
++ cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
++ if (!cxt->pstore.buf) {
++ pr_err("cannot allocate pstore crash dump buffer\n");
++ err = -ENOMEM;
++ goto fail_clear;
++ }
++ }
++
+ err = pstore_register(&cxt->pstore);
+ if (err) {
+ pr_err("registering with pstore failed\n");
--- /dev/null
+From a9fb94a99bb515d8720ba8440ce3aba84aec80f8 Mon Sep 17 00:00:00 2001
+From: Pi-Hsun Shih <pihsun@chromium.org>
+Date: Mon, 20 May 2019 14:51:19 +0800
+Subject: pstore: Set tfm to NULL on free_buf_for_compression
+
+From: Pi-Hsun Shih <pihsun@chromium.org>
+
+commit a9fb94a99bb515d8720ba8440ce3aba84aec80f8 upstream.
+
+Set tfm to NULL on free_buf_for_compression() after crypto_free_comp().
+
+This avoid a use-after-free when allocate_buf_for_compression()
+and free_buf_for_compression() are called twice. Although
+free_buf_for_compression() freed the tfm, allocate_buf_for_compression()
+won't reinitialize the tfm since the tfm pointer is not NULL.
+
+Fixes: 95047b0519c1 ("pstore: Refactor compression initialization")
+Signed-off-by: Pi-Hsun Shih <pihsun@chromium.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/platform.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -347,8 +347,10 @@ static void allocate_buf_for_compression
+
+ static void free_buf_for_compression(void)
+ {
+- if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
++ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
+ crypto_free_comp(tfm);
++ tfm = NULL;
++ }
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ big_oops_buf_sz = 0;
--- /dev/null
+From 66be4e66a7f422128748e3c3ef6ee72b20a6197b Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 3 Jun 2019 13:26:20 -0700
+Subject: rcu: locking and unlocking need to always be at least barriers
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 66be4e66a7f422128748e3c3ef6ee72b20a6197b upstream.
+
+Herbert Xu pointed out that commit bb73c52bad36 ("rcu: Don't disable
+preemption for Tiny and Tree RCU readers") was incorrect in making the
+preempt_disable/enable() be conditional on CONFIG_PREEMPT_COUNT.
+
+If CONFIG_PREEMPT_COUNT isn't enabled, the preemption enable/disable is
+a no-op, but still is a compiler barrier.
+
+And RCU locking still _needs_ that compiler barrier.
+
+It is simply fundamentally not true that RCU locking would be a complete
+no-op: we still need to guarantee (for example) that things that can
+trap and cause preemption cannot migrate into the RCU locked region.
+
+The way we do that is by making it a barrier.
+
+See for example commit 386afc91144b ("spinlocks and preemption points
+need to be at least compiler barriers") from back in 2013 that had
+similar issues with spinlocks that become no-ops on UP: they must still
+constrain the compiler from moving other operations into the critical
+region.
+
+Now, it is true that a lot of RCU operations already use READ_ONCE() and
+WRITE_ONCE() (which in practice likely would never be re-ordered wrt
+anything remotely interesting), but it is also true that that is not
+globally the case, and that it's not even necessarily always possible
+(ie bitfields etc).
+
+Reported-by: Herbert Xu <herbert@gondor.apana.org.au>
+Fixes: bb73c52bad36 ("rcu: Don't disable preemption for Tiny and Tree RCU readers")
+Cc: stable@kernel.org
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/rcupdate.h | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -56,14 +56,12 @@ void __rcu_read_unlock(void);
+
+ static inline void __rcu_read_lock(void)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+- preempt_disable();
++ preempt_disable();
+ }
+
+ static inline void __rcu_read_unlock(void)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+- preempt_enable();
++ preempt_enable();
+ }
+
+ static inline int rcu_preempt_depth(void)
--- /dev/null
+From 962f0af83c239c0aef05639631e871c874b00f99 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Mon, 27 May 2019 18:40:19 +0200
+Subject: s390/mm: fix address space detection in exception handling
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit 962f0af83c239c0aef05639631e871c874b00f99 upstream.
+
+Commit 0aaba41b58bc ("s390: remove all code using the access register
+mode") removed access register mode from the kernel, and also from the
+address space detection logic. However, user space could still switch
+to access register mode (trans_exc_code == 1), and exceptions in that
+mode would not be correctly assigned.
+
+Fix this by adding a check for trans_exc_code == 1 to get_fault_type(),
+and remove the wrong comment line before that function.
+
+Fixes: 0aaba41b58bc ("s390: remove all code using the access register mode")
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: <stable@vger.kernel.org> # v4.15+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/mm/fault.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -83,7 +83,6 @@ static inline int notify_page_fault(stru
+
+ /*
+ * Find out which address space caused the exception.
+- * Access register mode is impossible, ignore space == 3.
+ */
+ static inline enum fault_type get_fault_type(struct pt_regs *regs)
+ {
+@@ -108,6 +107,10 @@ static inline enum fault_type get_fault_
+ }
+ return VDSO_FAULT;
+ }
++ if (trans_exc_code == 1) {
++ /* access register mode, not used in the kernel */
++ return USER_FAULT;
++ }
+ /* home space exception -> access via kernel ASCE */
+ return KERNEL_FAULT;
+ }
ipmr_base-do-not-reset-index-in-mr_table_dump.patch
net-mlx4_en-ethtool-remove-unsupported-sfp-eeprom-high-pages-query.patch
net-tls-replace-the-sleeping-lock-around-rx-resync-with-a-bit-lock.patch
+rcu-locking-and-unlocking-need-to-always-be-at-least-barriers.patch
+habanalabs-fix-debugfs-code.patch
+arc-mm-sigsegv-userspace-trying-to-access-kernel-virtual-memory.patch
+parisc-use-implicit-space-register-selection-for-loading-the-coherence-index-of-i-o-pdirs.patch
+parisc-fix-crash-due-alternative-coding-for-np-iopdir_fdc-bit.patch
+sunrpc-fix-regression-in-umount-of-a-secure-mount.patch
+sunrpc-fix-a-use-after-free-when-a-server-rejects-the-rpcsec_gss-credential.patch
+nfsv4.1-again-fix-a-race-where-cb_notify_lock-fails-to-wake-a-waiter.patch
+nfsv4.1-fix-bug-only-first-cb_notify_lock-is-handled.patch
+fuse-fallocate-fix-return-with-locked-inode.patch
+fuse-fix-copy_file_range-in-the-writeback-case.patch
+pstore-set-tfm-to-null-on-free_buf_for_compression.patch
+pstore-ram-run-without-kernel-crash-dump-region.patch
+kbuild-use-more-portable-command-v-for-cc-cross-prefix.patch
+memstick-mspro_block-fix-an-error-code-in-mspro_block_issue_req.patch
+mmc-tmio-fix-scc-error-handling-to-avoid-false-positive-crc-error.patch
+mmc-sdhci_am654-fix-slottype-write.patch
+x86-power-fix-nosmt-vs-hibernation-triple-fault-during-resume.patch
+x86-insn-eval-fix-use-after-free-access-to-ldt-entry.patch
+i2c-xiic-add-max_read_len-quirk.patch
+s390-mm-fix-address-space-detection-in-exception-handling.patch
+nvme-rdma-fix-queue-mapping-when-queue-count-is-limited.patch
+xen-blkfront-switch-kcalloc-to-kvcalloc-for-large-array-allocation.patch
+mips-bounds-check-virt_addr_valid.patch
+mips-pistachio-build-uimage.gz-by-default.patch
+genwqe-prevent-an-integer-overflow-in-the-ioctl.patch
+test_firmware-use-correct-snprintf-limit.patch
+drm-rockchip-fix-fb-references-in-async-update.patch
+drm-vc4-fix-fb-references-in-async-update.patch
+drm-gma500-cdv-check-vbt-config-bits-when-detecting-lvds-panels.patch
+drm-msm-fix-fb-references-in-async-update.patch
--- /dev/null
+From 7987b694ade8cc465ce10fb3dceaa614f13ceaf3 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Wed, 29 May 2019 12:49:52 -0400
+Subject: SUNRPC: Fix a use after free when a server rejects the RPCSEC_GSS credential
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 7987b694ade8cc465ce10fb3dceaa614f13ceaf3 upstream.
+
+The addition of rpc_check_timeout() to call_decode causes an Oops
+when the RPCSEC_GSS credential is rejected.
+The reason is that rpc_decode_header() will call xprt_release() in
+order to free task->tk_rqstp, which is needed by rpc_check_timeout()
+to check whether or not we should exit due to a soft timeout.
+
+The fix is to move the call to xprt_release() into call_decode() so
+we can perform it after rpc_check_timeout().
+
+Reported-by: Olga Kornievskaia <olga.kornievskaia@gmail.com>
+Reported-by: Nick Bowler <nbowler@draconx.ca>
+Fixes: cea57789e408 ("SUNRPC: Clean up")
+Cc: stable@vger.kernel.org # v5.1+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2387,17 +2387,21 @@ call_decode(struct rpc_task *task)
+ return;
+ case -EAGAIN:
+ task->tk_status = 0;
+- /* Note: rpc_decode_header() may have freed the RPC slot */
+- if (task->tk_rqstp == req) {
+- xdr_free_bvec(&req->rq_rcv_buf);
+- req->rq_reply_bytes_recvd = 0;
+- req->rq_rcv_buf.len = 0;
+- if (task->tk_client->cl_discrtry)
+- xprt_conditional_disconnect(req->rq_xprt,
+- req->rq_connect_cookie);
+- }
++ xdr_free_bvec(&req->rq_rcv_buf);
++ req->rq_reply_bytes_recvd = 0;
++ req->rq_rcv_buf.len = 0;
++ if (task->tk_client->cl_discrtry)
++ xprt_conditional_disconnect(req->rq_xprt,
++ req->rq_connect_cookie);
+ task->tk_action = call_encode;
+ rpc_check_timeout(task);
++ break;
++ case -EKEYREJECTED:
++ task->tk_action = call_reserve;
++ rpc_check_timeout(task);
++ rpcauth_invalcred(task);
++ /* Ensure we obtain a new XID if we retry! */
++ xprt_release(task);
+ }
+ }
+
+@@ -2533,11 +2537,7 @@ out_msg_denied:
+ break;
+ task->tk_cred_retry--;
+ trace_rpc__stale_creds(task);
+- rpcauth_invalcred(task);
+- /* Ensure we obtain a new XID! */
+- xprt_release(task);
+- task->tk_action = call_reserve;
+- return -EAGAIN;
++ return -EKEYREJECTED;
+ case rpc_autherr_badcred:
+ case rpc_autherr_badverf:
+ /* possibly garbled cred/verf? */
--- /dev/null
+From ec6017d9035986a36de064f48a63245930bfad6f Mon Sep 17 00:00:00 2001
+From: Olga Kornievskaia <kolga@netapp.com>
+Date: Wed, 29 May 2019 10:46:00 -0400
+Subject: SUNRPC fix regression in umount of a secure mount
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+commit ec6017d9035986a36de064f48a63245930bfad6f upstream.
+
+If call_status returns ENOTCONN, we need to re-establish the connection
+state after. Otherwise the client goes into an infinite loop of call_encode,
+call_transmit, call_status (ENOTCONN), call_encode.
+
+Fixes: c8485e4d63 ("SUNRPC: Handle ECONNREFUSED correctly in xprt_transmit()")
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Cc: stable@vger.kernel.org # v2.6.29+
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2260,13 +2260,13 @@ call_status(struct rpc_task *task)
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ case -ECONNABORTED:
++ case -ENOTCONN:
+ rpc_force_rebind(clnt);
+ /* fall through */
+ case -EADDRINUSE:
+ rpc_delay(task, 3*HZ);
+ /* fall through */
+ case -EPIPE:
+- case -ENOTCONN:
+ case -EAGAIN:
+ break;
+ case -EIO:
--- /dev/null
+From bd17cc5a20ae9aaa3ed775f360b75ff93cd66a1d Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 15 May 2019 12:33:22 +0300
+Subject: test_firmware: Use correct snprintf() limit
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit bd17cc5a20ae9aaa3ed775f360b75ff93cd66a1d upstream.
+
+The limit here is supposed to be how much of the page is left, but it's
+just using PAGE_SIZE as the limit.
+
+The other thing to remember is that snprintf() returns the number of
+bytes which would have been copied if we had had enough room. So that
+means that if we run out of space then this code would end up passing a
+negative value as the limit and the kernel would print an error message.
+I have change the code to use scnprintf() which returns the number of
+bytes that were successfully printed (not counting the NUL terminator).
+
+Fixes: c92316bf8e94 ("test_firmware: add batched firmware tests")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_firmware.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -223,30 +223,30 @@ static ssize_t config_show(struct device
+
+ mutex_lock(&test_fw_mutex);
+
+- len += snprintf(buf, PAGE_SIZE,
++ len += scnprintf(buf, PAGE_SIZE - len,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ if (test_fw_config->name)
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "name:\t%s\n",
+ test_fw_config->name);
+ else
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "name:\tEMTPY\n");
+
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "num_requests:\t%u\n", test_fw_config->num_requests);
+
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "send_uevent:\t\t%s\n",
+ test_fw_config->send_uevent ?
+ "FW_ACTION_HOTPLUG" :
+ "FW_ACTION_NOHOTPLUG");
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "sync_direct:\t\t%s\n",
+ test_fw_config->sync_direct ? "true" : "false");
+- len += snprintf(buf+len, PAGE_SIZE,
++ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+
+ mutex_unlock(&test_fw_mutex);
--- /dev/null
+From de9f869616dd95e95c00bdd6b0fcd3421e8a4323 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Sun, 2 Jun 2019 03:15:58 +0200
+Subject: x86/insn-eval: Fix use-after-free access to LDT entry
+
+From: Jann Horn <jannh@google.com>
+
+commit de9f869616dd95e95c00bdd6b0fcd3421e8a4323 upstream.
+
+get_desc() computes a pointer into the LDT while holding a lock that
+protects the LDT from being freed, but then drops the lock and returns the
+(now potentially dangling) pointer to its caller.
+
+Fix it by giving the caller a copy of the LDT entry instead.
+
+Fixes: 670f928ba09b ("x86/insn-eval: Add utility function to get segment descriptor")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/lib/insn-eval.c | 47 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 24 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/lib/insn-eval.c
++++ b/arch/x86/lib/insn-eval.c
+@@ -557,7 +557,8 @@ static int get_reg_offset_16(struct insn
+ }
+
+ /**
+- * get_desc() - Obtain pointer to a segment descriptor
++ * get_desc() - Obtain contents of a segment descriptor
++ * @out: Segment descriptor contents on success
+ * @sel: Segment selector
+ *
+ * Given a segment selector, obtain a pointer to the segment descriptor.
+@@ -565,18 +566,18 @@ static int get_reg_offset_16(struct insn
+ *
+ * Returns:
+ *
+- * Pointer to segment descriptor on success.
++ * True on success, false on failure.
+ *
+ * NULL on error.
+ */
+-static struct desc_struct *get_desc(unsigned short sel)
++static bool get_desc(struct desc_struct *out, unsigned short sel)
+ {
+ struct desc_ptr gdt_desc = {0, 0};
+ unsigned long desc_base;
+
+ #ifdef CONFIG_MODIFY_LDT_SYSCALL
+ if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+- struct desc_struct *desc = NULL;
++ bool success = false;
+ struct ldt_struct *ldt;
+
+ /* Bits [15:3] contain the index of the desired entry. */
+@@ -584,12 +585,14 @@ static struct desc_struct *get_desc(unsi
+
+ mutex_lock(¤t->active_mm->context.lock);
+ ldt = current->active_mm->context.ldt;
+- if (ldt && sel < ldt->nr_entries)
+- desc = &ldt->entries[sel];
++ if (ldt && sel < ldt->nr_entries) {
++ *out = ldt->entries[sel];
++ success = true;
++ }
+
+ mutex_unlock(¤t->active_mm->context.lock);
+
+- return desc;
++ return success;
+ }
+ #endif
+ native_store_gdt(&gdt_desc);
+@@ -604,9 +607,10 @@ static struct desc_struct *get_desc(unsi
+ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
+
+ if (desc_base > gdt_desc.size)
+- return NULL;
++ return false;
+
+- return (struct desc_struct *)(gdt_desc.address + desc_base);
++ *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
++ return true;
+ }
+
+ /**
+@@ -628,7 +632,7 @@ static struct desc_struct *get_desc(unsi
+ */
+ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
+ {
+- struct desc_struct *desc;
++ struct desc_struct desc;
+ short sel;
+
+ sel = get_segment_selector(regs, seg_reg_idx);
+@@ -666,11 +670,10 @@ unsigned long insn_get_seg_base(struct p
+ if (!sel)
+ return -1L;
+
+- desc = get_desc(sel);
+- if (!desc)
++ if (!get_desc(&desc, sel))
+ return -1L;
+
+- return get_desc_base(desc);
++ return get_desc_base(&desc);
+ }
+
+ /**
+@@ -692,7 +695,7 @@ unsigned long insn_get_seg_base(struct p
+ */
+ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
+ {
+- struct desc_struct *desc;
++ struct desc_struct desc;
+ unsigned long limit;
+ short sel;
+
+@@ -706,8 +709,7 @@ static unsigned long get_seg_limit(struc
+ if (!sel)
+ return 0;
+
+- desc = get_desc(sel);
+- if (!desc)
++ if (!get_desc(&desc, sel))
+ return 0;
+
+ /*
+@@ -716,8 +718,8 @@ static unsigned long get_seg_limit(struc
+ * not tested when checking the segment limits. In practice,
+ * this means that the segment ends in (limit << 12) + 0xfff.
+ */
+- limit = get_desc_limit(desc);
+- if (desc->g)
++ limit = get_desc_limit(&desc);
++ if (desc.g)
+ limit = (limit << 12) + 0xfff;
+
+ return limit;
+@@ -741,7 +743,7 @@ static unsigned long get_seg_limit(struc
+ */
+ int insn_get_code_seg_params(struct pt_regs *regs)
+ {
+- struct desc_struct *desc;
++ struct desc_struct desc;
+ short sel;
+
+ if (v8086_mode(regs))
+@@ -752,8 +754,7 @@ int insn_get_code_seg_params(struct pt_r
+ if (sel < 0)
+ return sel;
+
+- desc = get_desc(sel);
+- if (!desc)
++ if (!get_desc(&desc, sel))
+ return -EINVAL;
+
+ /*
+@@ -761,10 +762,10 @@ int insn_get_code_seg_params(struct pt_r
+ * determines whether a segment contains data or code. If this is a data
+ * segment, return error.
+ */
+- if (!(desc->type & BIT(3)))
++ if (!(desc.type & BIT(3)))
+ return -EINVAL;
+
+- switch ((desc->l << 1) | desc->d) {
++ switch ((desc.l << 1) | desc.d) {
+ case 0: /*
+ * Legacy mode. CS.L=0, CS.D=0. Address and operand size are
+ * both 16-bit.
--- /dev/null
+From ec527c318036a65a083ef68d8ba95789d2212246 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 30 May 2019 00:09:39 +0200
+Subject: x86/power: Fix 'nosmt' vs hibernation triple fault during resume
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit ec527c318036a65a083ef68d8ba95789d2212246 upstream.
+
+As explained in
+
+ 0cc3cd21657b ("cpu/hotplug: Boot HT siblings at least once")
+
+we always, no matter what, have to bring up x86 HT siblings during boot at
+least once in order to avoid first MCE bringing the system to its knees.
+
+That means that whenever 'nosmt' is supplied on the kernel command-line,
+all the HT siblings are as a result sitting in mwait or cpudile after
+going through the online-offline cycle at least once.
+
+This causes a serious issue though when a kernel, which saw 'nosmt' on its
+commandline, is going to perform resume from hibernation: if the resume
+from the hibernated image is successful, cr3 is flipped in order to point
+to the address space of the kernel that is being resumed, which in turn
+means that all the HT siblings are all of a sudden mwaiting on address
+which is no longer valid.
+
+That results in triple fault shortly after cr3 is switched, and machine
+reboots.
+
+Fix this by always waking up all the SMT siblings before initiating the
+'restore from hibernation' process; this guarantees that all the HT
+siblings will be properly carried over to the resumed kernel waiting in
+resume_play_dead(), and acted upon accordingly afterwards, based on the
+target kernel configuration.
+
+Symmetricaly, the resumed kernel has to push the SMT siblings to mwait
+again in case it has SMT disabled; this means it has to online all
+the siblings when resuming (so that they come out of hlt) and offline
+them again to let them reach mwait.
+
+Cc: 4.19+ <stable@vger.kernel.org> # v4.19+
+Debugged-by: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 0cc3cd21657b ("cpu/hotplug: Boot HT siblings at least once")
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/power/cpu.c | 10 ++++++++++
+ arch/x86/power/hibernate.c | 33 +++++++++++++++++++++++++++++++++
+ include/linux/cpu.h | 4 ++++
+ kernel/cpu.c | 4 ++--
+ kernel/power/hibernate.c | 9 +++++++++
+ 5 files changed, 58 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable
+ * address in its instruction pointer may not be possible to resolve
+ * any more at that point (the page tables used by it previously may
+ * have been overwritten by hibernate image data).
++ *
++ * First, make sure that we wake up all the potentially disabled SMT
++ * threads which have been initially brought up and then put into
++ * mwait/cpuidle sleep.
++ * Those will be put to proper (not interfering with hibernation
++ * resume) sleep afterwards, and the resumed kernel will decide itself
++ * what to do with them.
+ */
++ ret = cpuhp_smt_enable();
++ if (ret)
++ return ret;
+ smp_ops.play_dead = resume_play_dead;
+ ret = disable_nonboot_cpus();
+ smp_ops.play_dead = play_dead;
+--- a/arch/x86/power/hibernate.c
++++ b/arch/x86/power/hibernate.c
+@@ -11,6 +11,7 @@
+ #include <linux/suspend.h>
+ #include <linux/scatterlist.h>
+ #include <linux/kdebug.h>
++#include <linux/cpu.h>
+
+ #include <crypto/hash.h>
+
+@@ -246,3 +247,35 @@ out:
+ __flush_tlb_all();
+ return 0;
+ }
++
++int arch_resume_nosmt(void)
++{
++ int ret = 0;
++ /*
++ * We reached this while coming out of hibernation. This means
++ * that SMT siblings are sleeping in hlt, as mwait is not safe
++ * against control transition during resume (see comment in
++ * hibernate_resume_nonboot_cpu_disable()).
++ *
++ * If the resumed kernel has SMT disabled, we have to take all the
++ * SMT siblings out of hlt, and offline them again so that they
++ * end up in mwait proper.
++ *
++ * Called with hotplug disabled.
++ */
++ cpu_hotplug_enable();
++ if (cpu_smt_control == CPU_SMT_DISABLED ||
++ cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
++ enum cpuhp_smt_control old = cpu_smt_control;
++
++ ret = cpuhp_smt_enable();
++ if (ret)
++ goto out;
++ ret = cpuhp_smt_disable(old);
++ if (ret)
++ goto out;
++ }
++out:
++ cpu_hotplug_disable();
++ return ret;
++}
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -183,10 +183,14 @@ enum cpuhp_smt_control {
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
+ extern void cpu_smt_check_topology(void);
++extern int cpuhp_smt_enable(void);
++extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
+ static inline void cpu_smt_check_topology(void) { }
++static inline int cpuhp_smt_enable(void) { return 0; }
++static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+ #endif
+
+ /*
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2064,7 +2064,7 @@ static void cpuhp_online_cpu_device(unsi
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ }
+
+-static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
++int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
+
+@@ -2096,7 +2096,7 @@ static int cpuhp_smt_disable(enum cpuhp_
+ return ret;
+ }
+
+-static int cpuhp_smt_enable(void)
++int cpuhp_smt_enable(void)
+ {
+ int cpu, ret = 0;
+
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -258,6 +258,11 @@ void swsusp_show_speed(ktime_t start, kt
+ (kps % 1000) / 10);
+ }
+
++__weak int arch_resume_nosmt(void)
++{
++ return 0;
++}
++
+ /**
+ * create_image - Create a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+@@ -325,6 +330,10 @@ static int create_image(int platform_mod
+ Enable_cpus:
+ enable_nonboot_cpus();
+
++ /* Allow architectures to do nosmt-specific post-resume dances */
++ if (!in_suspend)
++ error = arch_resume_nosmt();
++
+ Platform_finish:
+ platform_finish(platform_mode);
+
--- /dev/null
+From 1d5c76e66433382a1e170d1d5845bb0fed7467aa Mon Sep 17 00:00:00 2001
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Fri, 3 May 2019 17:04:01 +0200
+Subject: xen-blkfront: switch kcalloc to kvcalloc for large array allocation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 1d5c76e66433382a1e170d1d5845bb0fed7467aa upstream.
+
+There's no reason to request physically contiguous memory for those
+allocations.
+
+[boris: added CC to stable]
+
+Cc: stable@vger.kernel.org
+Reported-by: Ian Jackson <ian.jackson@citrix.com>
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/xen-blkfront.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1310,11 +1310,11 @@ static void blkif_free_ring(struct blkfr
+ }
+
+ free_shadow:
+- kfree(rinfo->shadow[i].grants_used);
++ kvfree(rinfo->shadow[i].grants_used);
+ rinfo->shadow[i].grants_used = NULL;
+- kfree(rinfo->shadow[i].indirect_grants);
++ kvfree(rinfo->shadow[i].indirect_grants);
+ rinfo->shadow[i].indirect_grants = NULL;
+- kfree(rinfo->shadow[i].sg);
++ kvfree(rinfo->shadow[i].sg);
+ rinfo->shadow[i].sg = NULL;
+ }
+
+@@ -1353,7 +1353,7 @@ static void blkif_free(struct blkfront_i
+ for (i = 0; i < info->nr_rings; i++)
+ blkif_free_ring(&info->rinfo[i]);
+
+- kfree(info->rinfo);
++ kvfree(info->rinfo);
+ info->rinfo = NULL;
+ info->nr_rings = 0;
+ }
+@@ -1914,9 +1914,9 @@ static int negotiate_mq(struct blkfront_
+ if (!info->nr_rings)
+ info->nr_rings = 1;
+
+- info->rinfo = kcalloc(info->nr_rings,
+- sizeof(struct blkfront_ring_info),
+- GFP_KERNEL);
++ info->rinfo = kvcalloc(info->nr_rings,
++ sizeof(struct blkfront_ring_info),
++ GFP_KERNEL);
+ if (!info->rinfo) {
+ xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ info->nr_rings = 0;
+@@ -2232,17 +2232,17 @@ static int blkfront_setup_indirect(struc
+
+ for (i = 0; i < BLK_RING_SIZE(info); i++) {
+ rinfo->shadow[i].grants_used =
+- kcalloc(grants,
+- sizeof(rinfo->shadow[i].grants_used[0]),
+- GFP_NOIO);
+- rinfo->shadow[i].sg = kcalloc(psegs,
+- sizeof(rinfo->shadow[i].sg[0]),
+- GFP_NOIO);
++ kvcalloc(grants,
++ sizeof(rinfo->shadow[i].grants_used[0]),
++ GFP_NOIO);
++ rinfo->shadow[i].sg = kvcalloc(psegs,
++ sizeof(rinfo->shadow[i].sg[0]),
++ GFP_NOIO);
+ if (info->max_indirect_segments)
+ rinfo->shadow[i].indirect_grants =
+- kcalloc(INDIRECT_GREFS(grants),
+- sizeof(rinfo->shadow[i].indirect_grants[0]),
+- GFP_NOIO);
++ kvcalloc(INDIRECT_GREFS(grants),
++ sizeof(rinfo->shadow[i].indirect_grants[0]),
++ GFP_NOIO);
+ if ((rinfo->shadow[i].grants_used == NULL) ||
+ (rinfo->shadow[i].sg == NULL) ||
+ (info->max_indirect_segments &&
+@@ -2256,11 +2256,11 @@ static int blkfront_setup_indirect(struc
+
+ out_of_memory:
+ for (i = 0; i < BLK_RING_SIZE(info); i++) {
+- kfree(rinfo->shadow[i].grants_used);
++ kvfree(rinfo->shadow[i].grants_used);
+ rinfo->shadow[i].grants_used = NULL;
+- kfree(rinfo->shadow[i].sg);
++ kvfree(rinfo->shadow[i].sg);
+ rinfo->shadow[i].sg = NULL;
+- kfree(rinfo->shadow[i].indirect_grants);
++ kvfree(rinfo->shadow[i].indirect_grants);
+ rinfo->shadow[i].indirect_grants = NULL;
+ }
+ if (!list_empty(&rinfo->indirect_pages)) {