]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.16-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Apr 2018 10:05:02 +0000 (12:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Apr 2018 10:05:02 +0000 (12:05 +0200)
added patches:
autofs-mount-point-create-should-honour-passed-in-mode.patch
device-dax-allow-map_sync-to-succeed.patch
don-t-leak-mnt_internal-away-from-internal-mounts.patch
drm-i915-correctly-handle-limited-range-ycbcr-data-on-vlv-chv.patch
drm-i915-fix-hibernation-with-acpi-s0-target-state.patch
drm-i915-gvt-init-mmio-by-lri-command-in-vgpu-inhibit-context.patch
hypfs_kill_super-deal-with-failed-allocations.patch
jffs2_kill_sb-deal-with-failed-allocations.patch
libnvdimm-dimm-handle-eacces-failures-from-label-reads.patch
mm-filemap.c-fix-null-pointer-in-page_cache_tree_insert.patch
orangefs_kill_sb-deal-with-allocation-failures.patch
rpc_pipefs-fix-double-dput.patch

13 files changed:
queue-4.16/autofs-mount-point-create-should-honour-passed-in-mode.patch [new file with mode: 0644]
queue-4.16/device-dax-allow-map_sync-to-succeed.patch [new file with mode: 0644]
queue-4.16/don-t-leak-mnt_internal-away-from-internal-mounts.patch [new file with mode: 0644]
queue-4.16/drm-i915-correctly-handle-limited-range-ycbcr-data-on-vlv-chv.patch [new file with mode: 0644]
queue-4.16/drm-i915-fix-hibernation-with-acpi-s0-target-state.patch [new file with mode: 0644]
queue-4.16/drm-i915-gvt-init-mmio-by-lri-command-in-vgpu-inhibit-context.patch [new file with mode: 0644]
queue-4.16/hypfs_kill_super-deal-with-failed-allocations.patch [new file with mode: 0644]
queue-4.16/jffs2_kill_sb-deal-with-failed-allocations.patch [new file with mode: 0644]
queue-4.16/libnvdimm-dimm-handle-eacces-failures-from-label-reads.patch [new file with mode: 0644]
queue-4.16/mm-filemap.c-fix-null-pointer-in-page_cache_tree_insert.patch [new file with mode: 0644]
queue-4.16/orangefs_kill_sb-deal-with-allocation-failures.patch [new file with mode: 0644]
queue-4.16/rpc_pipefs-fix-double-dput.patch [new file with mode: 0644]
queue-4.16/series

diff --git a/queue-4.16/autofs-mount-point-create-should-honour-passed-in-mode.patch b/queue-4.16/autofs-mount-point-create-should-honour-passed-in-mode.patch
new file mode 100644 (file)
index 0000000..a5a3fba
--- /dev/null
@@ -0,0 +1,40 @@
+From 1e6306652ba18723015d1b4967fe9de55f042499 Mon Sep 17 00:00:00 2001
+From: Ian Kent <raven@themaw.net>
+Date: Fri, 20 Apr 2018 14:55:59 -0700
+Subject: autofs: mount point create should honour passed in mode
+
+From: Ian Kent <raven@themaw.net>
+
+commit 1e6306652ba18723015d1b4967fe9de55f042499 upstream.
+
+The autofs file system mkdir inode operation blindly sets the created
+directory mode to S_IFDIR | 0555, ingoring the passed in mode, which can
+cause selinux dac_override denials.
+
+But the function also checks if the caller is the daemon (as no-one else
+should be able to do anything here) so there's no point in not honouring
+the passed in mode, allowing the daemon to set appropriate mode when
+required.
+
+Link: http://lkml.kernel.org/r/152361593601.8051.14014139124905996173.stgit@pluto.themaw.net
+Signed-off-by: Ian Kent <raven@themaw.net>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/autofs4/root.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inod
+       autofs4_del_active(dentry);
+-      inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
++      inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
+       if (!inode)
+               return -ENOMEM;
+       d_add(dentry, inode);
diff --git a/queue-4.16/device-dax-allow-map_sync-to-succeed.patch b/queue-4.16/device-dax-allow-map_sync-to-succeed.patch
new file mode 100644 (file)
index 0000000..8fbc39f
--- /dev/null
@@ -0,0 +1,44 @@
+From ef8423022324cf79bd1b41d8707c766461e7e555 Mon Sep 17 00:00:00 2001
+From: Dave Jiang <dave.jiang@intel.com>
+Date: Thu, 19 Apr 2018 13:39:43 -0700
+Subject: device-dax: allow MAP_SYNC to succeed
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+commit ef8423022324cf79bd1b41d8707c766461e7e555 upstream.
+
+MAP_SYNC is a nop for device-dax. Allow MAP_SYNC to succeed on device-dax
+to eliminate special casing between device-dax and fs-dax as to when the
+flag can be specified. Device-dax users already implicitly assume that they do
+not need to call fsync(), and this enables them to explicitly check for this
+capability.
+
+Cc: <stable@vger.kernel.org>
+Fixes: b6fb293f2497 ("mm: Define MAP_SYNC and VM_SYNC flags")
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dax/device.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/dax/device.c
++++ b/drivers/dax/device.c
+@@ -19,6 +19,7 @@
+ #include <linux/dax.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
++#include <linux/mman.h>
+ #include "dax-private.h"
+ #include "dax.h"
+@@ -534,6 +535,7 @@ static const struct file_operations dax_
+       .release = dax_release,
+       .get_unmapped_area = dax_get_unmapped_area,
+       .mmap = dax_mmap,
++      .mmap_supported_flags = MAP_SYNC,
+ };
+ static void dev_dax_release(struct device *dev)
diff --git a/queue-4.16/don-t-leak-mnt_internal-away-from-internal-mounts.patch b/queue-4.16/don-t-leak-mnt_internal-away-from-internal-mounts.patch
new file mode 100644 (file)
index 0000000..48cb692
--- /dev/null
@@ -0,0 +1,37 @@
+From 16a34adb9392b2fe4195267475ab5b472e55292c Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 19 Apr 2018 22:03:08 -0400
+Subject: Don't leak MNT_INTERNAL away from internal mounts
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 16a34adb9392b2fe4195267475ab5b472e55292c upstream.
+
+We want it only for the stuff created by SB_KERNMOUNT mounts, *not* for
+their copies.  As it is, creating a deep stack of bindings of /proc/*/ns/*
+somewhere in a new namespace and exiting yields a stack overflow.
+
+Cc: stable@kernel.org
+Reported-by: Alexander Aring <aring@mojatatu.com>
+Bisected-by: Kirill Tkhai <ktkhai@virtuozzo.com>
+Tested-by: Kirill Tkhai <ktkhai@virtuozzo.com>
+Tested-by: Alexander Aring <aring@mojatatu.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namespace.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mo
+                       goto out_free;
+       }
+-      mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
++      mnt->mnt.mnt_flags = old->mnt.mnt_flags;
++      mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+       /* Don't allow unprivileged users to change mount flags */
+       if (flag & CL_UNPRIVILEGED) {
+               mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
diff --git a/queue-4.16/drm-i915-correctly-handle-limited-range-ycbcr-data-on-vlv-chv.patch b/queue-4.16/drm-i915-correctly-handle-limited-range-ycbcr-data-on-vlv-chv.patch
new file mode 100644 (file)
index 0000000..a1db231
--- /dev/null
@@ -0,0 +1,213 @@
+From 5deae9191130db6b617c94fb261804597cf9b508 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 14 Feb 2018 21:23:23 +0200
+Subject: drm/i915: Correctly handle limited range YCbCr data on VLV/CHV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 5deae9191130db6b617c94fb261804597cf9b508 upstream.
+
+Turns out the VLV/CHV fixed function sprite CSC expects full range
+data as input. We've been feeding it limited range data to it all
+along. To expand the data out to full range we'll use the color
+correction registers (brightness, contrast, and saturation).
+
+On CHV pipe B we were actually doing the right thing already because we
+progammed the custom CSC matrix to do expect limited range input. Now
+that well pre-expand the data out with the color correction unit, we
+need to change the CSC matrix to operate with full range input instead.
+
+This should make the sprite output of the other pipes match the sprite
+output of pipe B reasonably well. Looking at the resulting pipe CRCs,
+there can be a slight difference in the output, but as I don't know
+the formula used by the fixed function CSC of the other pipes, I don't
+think it's worth the effort to try to match the output exactly. It
+might not even be possible due to difference in internal precision etc.
+
+One slight caveat here is that the color correction registers are single
+bufferred, so we should really be updating them during vblank, but we
+still don't have a mechanism for that, so just toss in another FIXME.
+
+v2: Rebase
+v3: s/bri/brightness/ s/con/contrast/ (Shashank)
+v4: Clarify the constants and math (Shashank)
+
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Daniel Stone <daniel@fooishbar.org>
+Cc: Russell King - ARM Linux <linux@armlinux.org.uk>
+Cc: Ilia Mirkin <imirkin@alum.mit.edu>
+Cc: Hans Verkuil <hverkuil@xs4all.nl>
+Cc: Shashank Sharma <shashank.sharma@intel.com>
+Cc: Uma Shankar <uma.shankar@intel.com>
+Cc: Jyri Sarha <jsarha@ti.com>
+Cc: "Tang, Jun" <jun.tang@intel.com>
+Reported-by: "Tang, Jun" <jun.tang@intel.com>
+Cc: stable@vger.kernel.org
+Fixes: 7f1f3851feb0 ("drm/i915: sprite support for ValleyView v4")
+Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180214192327.3250-5-ville.syrjala@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_reg.h     |   10 ++++
+ drivers/gpu/drm/i915/intel_sprite.c |   83 +++++++++++++++++++++++++++---------
+ 2 files changed, 74 insertions(+), 19 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -6236,6 +6236,12 @@ enum {
+ #define _SPATILEOFF           (VLV_DISPLAY_BASE + 0x721a4)
+ #define _SPACONSTALPHA                (VLV_DISPLAY_BASE + 0x721a8)
+ #define   SP_CONST_ALPHA_ENABLE               (1<<31)
++#define _SPACLRC0             (VLV_DISPLAY_BASE + 0x721d0)
++#define   SP_CONTRAST(x)              ((x) << 18) /* u3.6 */
++#define   SP_BRIGHTNESS(x)            ((x) & 0xff) /* s8 */
++#define _SPACLRC1             (VLV_DISPLAY_BASE + 0x721d4)
++#define   SP_SH_SIN(x)                        (((x) & 0x7ff) << 16) /* s4.7 */
++#define   SP_SH_COS(x)                        (x) /* u3.7 */
+ #define _SPAGAMC              (VLV_DISPLAY_BASE + 0x721f4)
+ #define _SPBCNTR              (VLV_DISPLAY_BASE + 0x72280)
+@@ -6249,6 +6255,8 @@ enum {
+ #define _SPBKEYMAXVAL         (VLV_DISPLAY_BASE + 0x722a0)
+ #define _SPBTILEOFF           (VLV_DISPLAY_BASE + 0x722a4)
+ #define _SPBCONSTALPHA                (VLV_DISPLAY_BASE + 0x722a8)
++#define _SPBCLRC0             (VLV_DISPLAY_BASE + 0x722d0)
++#define _SPBCLRC1             (VLV_DISPLAY_BASE + 0x722d4)
+ #define _SPBGAMC              (VLV_DISPLAY_BASE + 0x722f4)
+ #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+@@ -6265,6 +6273,8 @@ enum {
+ #define SPKEYMAXVAL(pipe, plane_id)   _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+ #define SPTILEOFF(pipe, plane_id)     _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+ #define SPCONSTALPHA(pipe, plane_id)  _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
++#define SPCLRC0(pipe, plane_id)               _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
++#define SPCLRC1(pipe, plane_id)               _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
+ #define SPGAMC(pipe, plane_id)                _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
+ /*
+--- a/drivers/gpu/drm/i915/intel_sprite.c
++++ b/drivers/gpu/drm/i915/intel_sprite.c
+@@ -346,44 +346,87 @@ skl_plane_get_hw_state(struct intel_plan
+ }
+ static void
+-chv_update_csc(struct intel_plane *plane, uint32_t format)
++chv_update_csc(const struct intel_plane_state *plane_state)
+ {
++      struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++      const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum plane_id plane_id = plane->id;
+       /* Seems RGB data bypasses the CSC always */
+-      if (!format_is_yuv(format))
++      if (!format_is_yuv(fb->format->format))
+               return;
+       /*
+-       * BT.601 limited range YCbCr -> full range RGB
++       * BT.601 full range YCbCr -> full range RGB
+        *
+-       * |r|   | 6537 4769     0|   |cr  |
+-       * |g| = |-3330 4769 -1605| x |y-64|
+-       * |b|   |    0 4769  8263|   |cb  |
++       * |r|   | 5743 4096     0|   |cr|
++       * |g| = |-2925 4096 -1410| x |y |
++       * |b|   |    0 4096  7258|   |cb|
+        *
+-       * Cb and Cr apparently come in as signed already, so no
+-       * need for any offset. For Y we need to remove the offset.
++       * Cb and Cr apparently come in as signed already,
++       * and we get full range data in on account of CLRC0/1
+        */
+-      I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
++      I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+       I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+       I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+-      I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
+-      I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
+-      I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
+-      I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
+-      I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
+-
+-      I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+-      I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+-      I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
++      I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4096) | SPCSC_C0(5743));
++      I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-2925) | SPCSC_C0(0));
++      I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1410) | SPCSC_C0(4096));
++      I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4096) | SPCSC_C0(0));
++      I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(7258));
++
++      I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
++      I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
++      I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+       I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+       I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+       I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ }
++#define SIN_0 0
++#define COS_0 1
++
++static void
++vlv_update_clrc(const struct intel_plane_state *plane_state)
++{
++      struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
++      struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++      const struct drm_framebuffer *fb = plane_state->base.fb;
++      enum pipe pipe = plane->pipe;
++      enum plane_id plane_id = plane->id;
++      int contrast, brightness, sh_scale, sh_sin, sh_cos;
++
++      if (format_is_yuv(fb->format->format)) {
++              /*
++               * Expand limited range to full range:
++               * Contrast is applied first and is used to expand Y range.
++               * Brightness is applied second and is used to remove the
++               * offset from Y. Saturation/hue is used to expand CbCr range.
++               */
++              contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
++              brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
++              sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
++              sh_sin = SIN_0 * sh_scale;
++              sh_cos = COS_0 * sh_scale;
++      } else {
++              /* Pass-through everything. */
++              contrast = 1 << 6;
++              brightness = 0;
++              sh_scale = 1 << 7;
++              sh_sin = SIN_0 * sh_scale;
++              sh_cos = COS_0 * sh_scale;
++      }
++
++      /* FIXME these register are single buffered :( */
++      I915_WRITE_FW(SPCLRC0(pipe, plane_id),
++                    SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
++      I915_WRITE_FW(SPCLRC1(pipe, plane_id),
++                    SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
++}
++
+ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *plane_state)
+ {
+@@ -477,8 +520,10 @@ vlv_update_plane(struct intel_plane *pla
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
++      vlv_update_clrc(plane_state);
++
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
+-              chv_update_csc(plane, fb->format->format);
++              chv_update_csc(plane_state);
+       if (key->flags) {
+               I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
diff --git a/queue-4.16/drm-i915-fix-hibernation-with-acpi-s0-target-state.patch b/queue-4.16/drm-i915-fix-hibernation-with-acpi-s0-target-state.patch
new file mode 100644 (file)
index 0000000..d5e437a
--- /dev/null
@@ -0,0 +1,154 @@
+From 300efa9eea451bdcf3b5a1eb292222e06e85bb2c Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Thu, 22 Mar 2018 16:36:42 +0200
+Subject: drm/i915: Fix hibernation with ACPI S0 target state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 300efa9eea451bdcf3b5a1eb292222e06e85bb2c upstream.
+
+After
+
+commit dd9f31c7a3887950cbd0d49eb9d43f7a1518a356
+Author: Imre Deak <imre.deak@intel.com>
+Date:   Wed Aug 16 17:46:07 2017 +0300
+
+    drm/i915/gen9+: Set same power state before hibernation image
+    save/restore
+
+during hibernation/suspend the power domain functionality got disabled,
+after which resume could leave it incorrectly disabled if the ACPI
+target state was S0 during suspend and i915 was not loaded by the loader
+kernel.
+
+This was caused by not considering if we resumed from hibernation as the
+condition for power domains reiniting.
+
+Fix this by simply tracking if we suspended power domains during system
+suspend and reinit power domains accordingly during resume. This will
+result in reiniting power domains always when resuming from hibernation,
+regardless of the platform and whether or not i915 is loaded by the
+loader kernel.
+
+The reason we didn't catch this earlier is that the enabled/disabled
+state of power domains during PMSG_FREEZE/PMSG_QUIESCE is platform
+and kernel config dependent: on my SKL the target state is S4
+during PMSG_FREEZE and (with the driver loaded in the loader kernel)
+S0 during PMSG_QUIESCE. On the reporter's machine it's S0 during
+PMSG_FREEZE but (contrary to this) power domains are not initialized
+during PMSG_QUIESCE since i915 is not loaded in the loader kernel, or
+it's loaded but without the DMC firmware being available.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105196
+Reported-and-tested-by: amn-bas@hotmail.com
+Fixes: dd9f31c7a388 ("drm/i915/gen9+: Set same power state before hibernation image save/restore")
+Cc: amn-bas@hotmail.com
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180322143642.26883-1-imre.deak@intel.com
+(cherry picked from commit 0f90603c33bdf6575cfdc81edd53f3f13ba166fb)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_drv.c |   22 ++++++++++------------
+ drivers/gpu/drm/i915/i915_drv.h |    2 +-
+ 2 files changed, 11 insertions(+), 13 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1599,15 +1599,12 @@ static int i915_drm_suspend_late(struct
+ {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+-      bool fw_csr;
+       int ret;
+       disable_rpm_wakeref_asserts(dev_priv);
+       intel_display_set_init_power(dev_priv, false);
+-      fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
+-              suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+       /*
+        * In case of firmware assisted context save/restore don't manually
+        * deinit the power domains. This also means the CSR/DMC firmware will
+@@ -1615,8 +1612,11 @@ static int i915_drm_suspend_late(struct
+        * also enable deeper system power states that would be blocked if the
+        * firmware was inactive.
+        */
+-      if (!fw_csr)
++      if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
++          dev_priv->csr.dmc_payload == NULL) {
+               intel_power_domains_suspend(dev_priv);
++              dev_priv->power_domains_suspended = true;
++      }
+       ret = 0;
+       if (IS_GEN9_LP(dev_priv))
+@@ -1628,8 +1628,10 @@ static int i915_drm_suspend_late(struct
+       if (ret) {
+               DRM_ERROR("Suspend complete failed: %d\n", ret);
+-              if (!fw_csr)
++              if (dev_priv->power_domains_suspended) {
+                       intel_power_domains_init_hw(dev_priv, true);
++                      dev_priv->power_domains_suspended = false;
++              }
+               goto out;
+       }
+@@ -1650,8 +1652,6 @@ static int i915_drm_suspend_late(struct
+       if (!(hibernation && INTEL_GEN(dev_priv) < 6))
+               pci_set_power_state(pdev, PCI_D3hot);
+-      dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
+-
+ out:
+       enable_rpm_wakeref_asserts(dev_priv);
+@@ -1818,8 +1818,7 @@ static int i915_drm_resume_early(struct
+       intel_uncore_resume_early(dev_priv);
+       if (IS_GEN9_LP(dev_priv)) {
+-              if (!dev_priv->suspended_to_idle)
+-                      gen9_sanitize_dc_state(dev_priv);
++              gen9_sanitize_dc_state(dev_priv);
+               bxt_disable_dc9(dev_priv);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               hsw_disable_pc8(dev_priv);
+@@ -1827,8 +1826,7 @@ static int i915_drm_resume_early(struct
+       intel_uncore_sanitize(dev_priv);
+-      if (IS_GEN9_LP(dev_priv) ||
+-          !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
++      if (dev_priv->power_domains_suspended)
+               intel_power_domains_init_hw(dev_priv, true);
+       else
+               intel_display_set_init_power(dev_priv, true);
+@@ -1838,7 +1836,7 @@ static int i915_drm_resume_early(struct
+       enable_rpm_wakeref_asserts(dev_priv);
+ out:
+-      dev_priv->suspended_to_idle = false;
++      dev_priv->power_domains_suspended = false;
+       return ret;
+ }
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2099,7 +2099,7 @@ struct drm_i915_private {
+       u32 bxt_phy_grc;
+       u32 suspend_count;
+-      bool suspended_to_idle;
++      bool power_domains_suspended;
+       struct i915_suspend_saved_registers regfile;
+       struct vlv_s0ix_state vlv_s0ix_state;
diff --git a/queue-4.16/drm-i915-gvt-init-mmio-by-lri-command-in-vgpu-inhibit-context.patch b/queue-4.16/drm-i915-gvt-init-mmio-by-lri-command-in-vgpu-inhibit-context.patch
new file mode 100644 (file)
index 0000000..205979c
--- /dev/null
@@ -0,0 +1,390 @@
+From cd7e61b93d068a80bfe6cb55bf00f17332d831a1 Mon Sep 17 00:00:00 2001
+From: Weinan Li <weinan.z.li@intel.com>
+Date: Fri, 23 Feb 2018 14:46:45 +0800
+Subject: drm/i915/gvt: init mmio by lri command in vgpu inhibit context
+
+From: Weinan Li <weinan.z.li@intel.com>
+
+commit cd7e61b93d068a80bfe6cb55bf00f17332d831a1 upstream.
+
+There is one issue relates to Coarse Power Gating(CPG) on KBL NUC in GVT-g,
+vgpu can't get the correct default context by updating the registers before
+inhibit context submission. It always get back the hardware default value
+unless the inhibit context submission happened before the 1st time
+forcewake put. With this wrong default context, vgpu will run with
+incorrect state and meet unknown issues.
+
+The solution is initialize these mmios by adding lri command in ring buffer
+of the inhibit context, then gpu hardware has no chance to go down RC6 when
+lri commands are right being executed, and then vgpu can get correct
+default context for further use.
+
+v3:
+- fix code fault, use 'for' to loop through mmio render list(Zhenyu)
+
+v4:
+- save the count of engine mmio need to be restored for inhibit context and
+  refine some comments. (Kevin)
+
+v5:
+- code rebase
+
+Cc: Kevin Tian <kevin.tian@intel.com>
+Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Weinan Li <weinan.z.li@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Changbin Du <changbin.du@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gvt/gvt.h          |    5 
+ drivers/gpu/drm/i915/gvt/mmio_context.c |  210 +++++++++++++++++++++++++++++---
+ drivers/gpu/drm/i915/gvt/mmio_context.h |    5 
+ drivers/gpu/drm/i915/gvt/scheduler.c    |    5 
+ 4 files changed, 205 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/gvt.h
++++ b/drivers/gpu/drm/i915/gvt/gvt.h
+@@ -308,7 +308,10 @@ struct intel_gvt {
+       wait_queue_head_t service_thread_wq;
+       unsigned long service_request;
+-      struct engine_mmio *engine_mmio_list;
++      struct {
++              struct engine_mmio *mmio;
++              int ctx_mmio_count[I915_NUM_ENGINES];
++      } engine_mmio_list;
+       struct dentry *debugfs_root;
+ };
+--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
++++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
+@@ -50,6 +50,8 @@
+ #define RING_GFX_MODE(base)   _MMIO((base) + 0x29c)
+ #define VF_GUARDBAND          _MMIO(0x83a4)
++#define GEN9_MOCS_SIZE                64
++
+ /* Raw offset is appened to each line for convenience. */
+ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
+       {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+@@ -152,8 +154,8 @@ static struct engine_mmio gen9_engine_mm
+ static struct {
+       bool initialized;
+-      u32 control_table[I915_NUM_ENGINES][64];
+-      u32 l3cc_table[32];
++      u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
++      u32 l3cc_table[GEN9_MOCS_SIZE / 2];
+ } gen9_render_mocs;
+ static void load_render_mocs(struct drm_i915_private *dev_priv)
+@@ -170,7 +172,7 @@ static void load_render_mocs(struct drm_
+       for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+               offset.reg = regs[ring_id];
+-              for (i = 0; i < 64; i++) {
++              for (i = 0; i < GEN9_MOCS_SIZE; i++) {
+                       gen9_render_mocs.control_table[ring_id][i] =
+                               I915_READ_FW(offset);
+                       offset.reg += 4;
+@@ -178,7 +180,7 @@ static void load_render_mocs(struct drm_
+       }
+       offset.reg = 0xb020;
+-      for (i = 0; i < 32; i++) {
++      for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
+               gen9_render_mocs.l3cc_table[i] =
+                       I915_READ_FW(offset);
+               offset.reg += 4;
+@@ -186,6 +188,153 @@ static void load_render_mocs(struct drm_
+       gen9_render_mocs.initialized = true;
+ }
++static int
++restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
++                               struct drm_i915_gem_request *req)
++{
++      u32 *cs;
++      int ret;
++      struct engine_mmio *mmio;
++      struct intel_gvt *gvt = vgpu->gvt;
++      int ring_id = req->engine->id;
++      int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
++
++      if (count == 0)
++              return 0;
++
++      ret = req->engine->emit_flush(req, EMIT_BARRIER);
++      if (ret)
++              return ret;
++
++      cs = intel_ring_begin(req, count * 2 + 2);
++      if (IS_ERR(cs))
++              return PTR_ERR(cs);
++
++      *cs++ = MI_LOAD_REGISTER_IMM(count);
++      for (mmio = gvt->engine_mmio_list.mmio;
++           i915_mmio_reg_valid(mmio->reg); mmio++) {
++              if (mmio->ring_id != ring_id ||
++                  !mmio->in_context)
++                      continue;
++
++              *cs++ = i915_mmio_reg_offset(mmio->reg);
++              *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
++                              (mmio->mask << 16);
++              gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
++                            *(cs-2), *(cs-1), vgpu->id, ring_id);
++      }
++
++      *cs++ = MI_NOOP;
++      intel_ring_advance(req, cs);
++
++      ret = req->engine->emit_flush(req, EMIT_BARRIER);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++static int
++restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
++                                      struct drm_i915_gem_request *req)
++{
++      unsigned int index;
++      u32 *cs;
++
++      cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
++      if (IS_ERR(cs))
++              return PTR_ERR(cs);
++
++      *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
++
++      for (index = 0; index < GEN9_MOCS_SIZE; index++) {
++              *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
++              *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
++              gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
++                            *(cs-2), *(cs-1), vgpu->id, req->engine->id);
++
++      }
++
++      *cs++ = MI_NOOP;
++      intel_ring_advance(req, cs);
++
++      return 0;
++}
++
++static int
++restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
++                                   struct drm_i915_gem_request *req)
++{
++      unsigned int index;
++      u32 *cs;
++
++      cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
++      if (IS_ERR(cs))
++              return PTR_ERR(cs);
++
++      *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
++
++      for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
++              *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
++              *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
++              gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
++                            *(cs-2), *(cs-1), vgpu->id, req->engine->id);
++
++      }
++
++      *cs++ = MI_NOOP;
++      intel_ring_advance(req, cs);
++
++      return 0;
++}
++
++/*
++ * Use lri command to initialize the mmio which is in context state image for
++ * inhibit context, it contains tracked engine mmio, render_mocs and
++ * render_mocs_l3cc.
++ */
++int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
++                                     struct drm_i915_gem_request *req)
++{
++      int ret;
++      u32 *cs;
++
++      cs = intel_ring_begin(req, 2);
++      if (IS_ERR(cs))
++              return PTR_ERR(cs);
++
++      *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++      *cs++ = MI_NOOP;
++      intel_ring_advance(req, cs);
++
++      ret = restore_context_mmio_for_inhibit(vgpu, req);
++      if (ret)
++              goto out;
++
++      /* no MOCS register in context except render engine */
++      if (req->engine->id != RCS)
++              goto out;
++
++      ret = restore_render_mocs_control_for_inhibit(vgpu, req);
++      if (ret)
++              goto out;
++
++      ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
++      if (ret)
++              goto out;
++
++out:
++      cs = intel_ring_begin(req, 2);
++      if (IS_ERR(cs))
++              return PTR_ERR(cs);
++
++      *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
++      *cs++ = MI_NOOP;
++      intel_ring_advance(req, cs);
++
++      return ret;
++}
++
+ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+ {
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+@@ -252,11 +401,14 @@ static void switch_mocs(struct intel_vgp
+       if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+               return;
++      if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
++              return;
++
+       if (!pre && !gen9_render_mocs.initialized)
+               load_render_mocs(dev_priv);
+       offset.reg = regs[ring_id];
+-      for (i = 0; i < 64; i++) {
++      for (i = 0; i < GEN9_MOCS_SIZE; i++) {
+               if (pre)
+                       old_v = vgpu_vreg_t(pre, offset);
+               else
+@@ -274,7 +426,7 @@ static void switch_mocs(struct intel_vgp
+       if (ring_id == RCS) {
+               l3_offset.reg = 0xb020;
+-              for (i = 0; i < 32; i++) {
++              for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
+                       if (pre)
+                               old_v = vgpu_vreg_t(pre, l3_offset);
+                       else
+@@ -294,6 +446,16 @@ static void switch_mocs(struct intel_vgp
+ #define CTX_CONTEXT_CONTROL_VAL       0x03
++bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
++{
++      u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
++      u32 inhibit_mask =
++              _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
++
++      return inhibit_mask ==
++              (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
++}
++
+ /* Switch ring mmio values (context). */
+ static void switch_mmio(struct intel_vgpu *pre,
+                       struct intel_vgpu *next,
+@@ -301,9 +463,6 @@ static void switch_mmio(struct intel_vgp
+ {
+       struct drm_i915_private *dev_priv;
+       struct intel_vgpu_submission *s;
+-      u32 *reg_state, ctx_ctrl;
+-      u32 inhibit_mask =
+-              _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+       struct engine_mmio *mmio;
+       u32 old_v, new_v;
+@@ -311,10 +470,18 @@ static void switch_mmio(struct intel_vgp
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               switch_mocs(pre, next, ring_id);
+-      for (mmio = dev_priv->gvt->engine_mmio_list;
++      for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+            i915_mmio_reg_valid(mmio->reg); mmio++) {
+               if (mmio->ring_id != ring_id)
+                       continue;
++              /*
++               * No need to do save or restore of the mmio which is in context
++               * state image on kabylake, it's initialized by lri command and
++               * save or restore with context together.
++               */
++              if (IS_KABYLAKE(dev_priv) && mmio->in_context)
++                      continue;
++
+               // save
+               if (pre) {
+                       vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+@@ -328,16 +495,13 @@ static void switch_mmio(struct intel_vgp
+               // restore
+               if (next) {
+                       s = &next->submission;
+-                      reg_state =
+-                              s->shadow_ctx->engine[ring_id].lrc_reg_state;
+-                      ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+                       /*
+-                       * if it is an inhibit context, load in_context mmio
+-                       * into HW by mmio write. If it is not, skip this mmio
+-                       * write.
++                       * No need to restore the mmio which is in context state
++                       * image if it's not inhibit context, it will restore
++                       * itself.
+                        */
+                       if (mmio->in_context &&
+-                          (ctx_ctrl & inhibit_mask) != inhibit_mask)
++                          !is_inhibit_context(s->shadow_ctx, ring_id))
+                               continue;
+                       if (mmio->mask)
+@@ -408,8 +572,16 @@ void intel_gvt_switch_mmio(struct intel_
+  */
+ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+ {
++      struct engine_mmio *mmio;
++
+       if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+-              gvt->engine_mmio_list = gen9_engine_mmio_list;
++              gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
+       else
+-              gvt->engine_mmio_list = gen8_engine_mmio_list;
++              gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
++
++      for (mmio = gvt->engine_mmio_list.mmio;
++           i915_mmio_reg_valid(mmio->reg); mmio++) {
++              if (mmio->in_context)
++                      gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
++      }
+ }
+--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
++++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
+@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_
+ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
++bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
++
++int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
++                                     struct drm_i915_gem_request *req);
++
+ #endif
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -275,6 +275,11 @@ static int copy_workload_to_ring_buffer(
+       struct intel_vgpu *vgpu = workload->vgpu;
+       void *shadow_ring_buffer_va;
+       u32 *cs;
++      struct drm_i915_gem_request *req = workload->req;
++
++      if (IS_KABYLAKE(req->i915) &&
++          is_inhibit_context(req->ctx, req->engine->id))
++              intel_vgpu_restore_inhibit_context(vgpu, req);
+       /* allocate shadow ring buffer */
+       cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
diff --git a/queue-4.16/hypfs_kill_super-deal-with-failed-allocations.patch b/queue-4.16/hypfs_kill_super-deal-with-failed-allocations.patch
new file mode 100644 (file)
index 0000000..52cbb81
--- /dev/null
@@ -0,0 +1,31 @@
+From a24cd490739586a7d2da3549a1844e1d7c4f4fc4 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Mon, 2 Apr 2018 23:50:31 -0400
+Subject: hypfs_kill_super(): deal with failed allocations
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit a24cd490739586a7d2da3549a1844e1d7c4f4fc4 upstream.
+
+hypfs_fill_super() might fail to allocate sbi; hypfs_kill_super()
+should not oops on that.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/hypfs/inode.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -320,7 +320,7 @@ static void hypfs_kill_super(struct supe
+       if (sb->s_root)
+               hypfs_delete_tree(sb->s_root);
+-      if (sb_info->update_file)
++      if (sb_info && sb_info->update_file)
+               hypfs_remove(sb_info->update_file);
+       kfree(sb->s_fs_info);
+       sb->s_fs_info = NULL;
diff --git a/queue-4.16/jffs2_kill_sb-deal-with-failed-allocations.patch b/queue-4.16/jffs2_kill_sb-deal-with-failed-allocations.patch
new file mode 100644 (file)
index 0000000..6cec5f3
--- /dev/null
@@ -0,0 +1,31 @@
+From c66b23c2840446a82c389e4cb1a12eb2a71fa2e4 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Mon, 2 Apr 2018 23:56:44 -0400
+Subject: jffs2_kill_sb(): deal with failed allocations
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit c66b23c2840446a82c389e4cb1a12eb2a71fa2e4 upstream.
+
+jffs2_fill_super() might fail to allocate jffs2_sb_info;
+jffs2_kill_sb() must survive that.
+
+Cc: stable@kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jffs2/super.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -342,7 +342,7 @@ static void jffs2_put_super (struct supe
+ static void jffs2_kill_sb(struct super_block *sb)
+ {
+       struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+-      if (!sb_rdonly(sb))
++      if (c && !sb_rdonly(sb))
+               jffs2_stop_garbage_collect_thread(c);
+       kill_mtd_super(sb);
+       kfree(c);
diff --git a/queue-4.16/libnvdimm-dimm-handle-eacces-failures-from-label-reads.patch b/queue-4.16/libnvdimm-dimm-handle-eacces-failures-from-label-reads.patch
new file mode 100644 (file)
index 0000000..5ebaa3e
--- /dev/null
@@ -0,0 +1,94 @@
+From e7c5a571a8d6a266aee9ca3f3f26e5afe3717eca Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 9 Apr 2018 12:34:24 -0700
+Subject: libnvdimm, dimm: handle EACCES failures from label reads
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit e7c5a571a8d6a266aee9ca3f3f26e5afe3717eca upstream.
+
+The new support for the standard _LSR and _LSW methods neglected to also
+update the nvdimm_init_config_data() and nvdimm_set_config_data() to
+return the translated error code from failed commands. This precision is
+necessary because the locked status that was previously returned on
+ND_CMD_GET_CONFIG_SIZE commands is now returned on
+ND_CMD_{GET,SET}_CONFIG_DATA commands.
+
+If the kernel misses this indication it can inadvertently fall back to
+label-less mode when it should otherwise avoid all access to locked
+regions.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 4b27db7e26cd ("acpi, nfit: add support for the _LSI, _LSR, and...")
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvdimm/dimm_devs.c |   22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/nvdimm/dimm_devs.c
++++ b/drivers/nvdimm/dimm_devs.c
+@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drv
+ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+ {
+       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
++      int rc = validate_dimm(ndd), cmd_rc = 0;
+       struct nd_cmd_get_config_data_hdr *cmd;
+       struct nvdimm_bus_descriptor *nd_desc;
+-      int rc = validate_dimm(ndd);
+       u32 max_cmd_size, config_size;
+       size_t offset;
+@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdim
+               cmd->in_offset = offset;
+               rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+                               ND_CMD_GET_CONFIG_DATA, cmd,
+-                              cmd->in_length + sizeof(*cmd), NULL);
+-              if (rc || cmd->status) {
+-                      rc = -ENXIO;
++                              cmd->in_length + sizeof(*cmd), &cmd_rc);
++              if (rc < 0)
++                      break;
++              if (cmd_rc < 0) {
++                      rc = cmd_rc;
+                       break;
+               }
+               memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
+@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdim
+ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
+               void *buf, size_t len)
+ {
+-      int rc = validate_dimm(ndd);
+       size_t max_cmd_size, buf_offset;
+       struct nd_cmd_set_config_hdr *cmd;
++      int rc = validate_dimm(ndd), cmd_rc = 0;
+       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+       struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm
+       for (buf_offset = 0; len; len -= cmd->in_length,
+                       buf_offset += cmd->in_length) {
+               size_t cmd_size;
+-              u32 *status;
+               cmd->in_offset = offset + buf_offset;
+               cmd->in_length = min(max_cmd_size, len);
+@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm
+               /* status is output in the last 4-bytes of the command buffer */
+               cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
+-              status = ((void *) cmd) + cmd_size - sizeof(u32);
+               rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+-                              ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
+-              if (rc || *status) {
+-                      rc = rc ? rc : -ENXIO;
++                              ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
++              if (rc < 0)
++                      break;
++              if (cmd_rc < 0) {
++                      rc = cmd_rc;
+                       break;
+               }
+       }
diff --git a/queue-4.16/mm-filemap.c-fix-null-pointer-in-page_cache_tree_insert.patch b/queue-4.16/mm-filemap.c-fix-null-pointer-in-page_cache_tree_insert.patch
new file mode 100644 (file)
index 0000000..4d80ec1
--- /dev/null
@@ -0,0 +1,80 @@
+From abc1be13fd113ddef5e2d807a466286b864caed3 Mon Sep 17 00:00:00 2001
+From: Matthew Wilcox <mawilcox@microsoft.com>
+Date: Fri, 20 Apr 2018 14:56:20 -0700
+Subject: mm/filemap.c: fix NULL pointer in page_cache_tree_insert()
+
+From: Matthew Wilcox <mawilcox@microsoft.com>
+
+commit abc1be13fd113ddef5e2d807a466286b864caed3 upstream.
+
+f2fs specifies the __GFP_ZERO flag for allocating some of its pages.
+Unfortunately, the page cache also uses the mapping's GFP flags for
+allocating radix tree nodes.  It always masked off the __GFP_HIGHMEM
+flag, and masks off __GFP_ZERO in some paths, but not all.  That causes
+radix tree nodes to be allocated with a NULL list_head, which causes
+backtraces like:
+
+  __list_del_entry+0x30/0xd0
+  list_lru_del+0xac/0x1ac
+  page_cache_tree_insert+0xd8/0x110
+
+The __GFP_DMA and __GFP_DMA32 flags would also be able to sneak through
+if they are ever used.  Fix them all by using GFP_RECLAIM_MASK at the
+innermost location, and remove it from earlier in the callchain.
+
+Link: http://lkml.kernel.org/r/20180411060320.14458-2-willy@infradead.org
+Fixes: 449dd6984d0e ("mm: keep page cache radix tree nodes in check")
+Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
+Reported-by: Chris Fries <cfries@google.com>
+Debugged-by: Minchan Kim <minchan@kernel.org>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/filemap.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -785,7 +785,7 @@ int replace_page_cache_page(struct page
+       VM_BUG_ON_PAGE(!PageLocked(new), new);
+       VM_BUG_ON_PAGE(new->mapping, new);
+-      error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
++      error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
+       if (!error) {
+               struct address_space *mapping = old->mapping;
+               void (*freepage)(struct page *);
+@@ -841,7 +841,7 @@ static int __add_to_page_cache_locked(st
+                       return error;
+       }
+-      error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
++      error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
+       if (error) {
+               if (!huge)
+                       mem_cgroup_cancel_charge(page, memcg, false);
+@@ -1584,8 +1584,7 @@ no_page:
+               if (fgp_flags & FGP_ACCESSED)
+                       __SetPageReferenced(page);
+-              err = add_to_page_cache_lru(page, mapping, offset,
+-                              gfp_mask & GFP_RECLAIM_MASK);
++              err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+               if (unlikely(err)) {
+                       put_page(page);
+                       page = NULL;
+@@ -2388,7 +2387,7 @@ static int page_cache_read(struct file *
+               if (!page)
+                       return -ENOMEM;
+-              ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
++              ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+               if (ret == 0)
+                       ret = mapping->a_ops->readpage(file, page);
+               else if (ret == -EEXIST)
diff --git a/queue-4.16/orangefs_kill_sb-deal-with-allocation-failures.patch b/queue-4.16/orangefs_kill_sb-deal-with-allocation-failures.patch
new file mode 100644 (file)
index 0000000..2f6edce
--- /dev/null
@@ -0,0 +1,34 @@
+From 659038428cb43a66e3eff71e2c845c9de3611a98 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Tue, 3 Apr 2018 00:13:17 -0400
+Subject: orangefs_kill_sb(): deal with allocation failures
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 659038428cb43a66e3eff71e2c845c9de3611a98 upstream.
+
+orangefs_fill_sb() might've failed to allocate ORANGEFS_SB(s); don't
+oops in that case.
+
+Cc: stable@kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/orangefs/super.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block
+       /* provided sb cleanup */
+       kill_anon_super(sb);
++      if (!ORANGEFS_SB(sb)) {
++              mutex_lock(&orangefs_request_mutex);
++              mutex_unlock(&orangefs_request_mutex);
++              return;
++      }
+       /*
+        * issue the unmount to userspace to tell it to remove the
+        * dynamic mount info it has for this superblock
diff --git a/queue-4.16/rpc_pipefs-fix-double-dput.patch b/queue-4.16/rpc_pipefs-fix-double-dput.patch
new file mode 100644 (file)
index 0000000..9329856
--- /dev/null
@@ -0,0 +1,31 @@
+From 4a3877c4cedd95543f8726b0a98743ed8db0c0fb Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Tue, 3 Apr 2018 01:15:46 -0400
+Subject: rpc_pipefs: fix double-dput()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 4a3877c4cedd95543f8726b0a98743ed8db0c0fb upstream.
+
+if we ever hit rpc_gssd_dummy_depopulate() dentry passed to
+it has refcount equal to 1.  __rpc_rmpipe() drops it and
+dput() done after that hits an already freed dentry.
+
+Cc: stable@kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/rpc_pipe.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry
+       struct dentry *clnt_dir = pipe_dentry->d_parent;
+       struct dentry *gssd_dir = clnt_dir->d_parent;
++      dget(pipe_dentry);
+       __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
+       __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
+       __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
index 90c8d8831dfd5eab79a84e8ecb194d6a9e48bcb7..3abc9530a2a19e6920ac944b59c7d37ef51a517e 100644 (file)
@@ -180,3 +180,15 @@ powerpc-lib-fix-off-by-one-in-alternate-feature-patching.patch
 udf-fix-leak-of-utf-16-surrogates-into-encoded-strings.patch
 fanotify-fix-logic-of-events-on-child.patch
 mmc-sdhci-pci-only-do-amd-tuning-for-hs200.patch
+drm-i915-fix-hibernation-with-acpi-s0-target-state.patch
+drm-i915-correctly-handle-limited-range-ycbcr-data-on-vlv-chv.patch
+jffs2_kill_sb-deal-with-failed-allocations.patch
+hypfs_kill_super-deal-with-failed-allocations.patch
+orangefs_kill_sb-deal-with-allocation-failures.patch
+rpc_pipefs-fix-double-dput.patch
+don-t-leak-mnt_internal-away-from-internal-mounts.patch
+libnvdimm-dimm-handle-eacces-failures-from-label-reads.patch
+device-dax-allow-map_sync-to-succeed.patch
+autofs-mount-point-create-should-honour-passed-in-mode.patch
+mm-filemap.c-fix-null-pointer-in-page_cache_tree_insert.patch
+drm-i915-gvt-init-mmio-by-lri-command-in-vgpu-inhibit-context.patch