--- /dev/null
+From c09f12186d6b03b798832d95289af76495990192 Mon Sep 17 00:00:00 2001
+From: Vishal Verma <vishal.l.verma@intel.com>
+Date: Fri, 19 Aug 2016 14:40:58 -0600
+Subject: acpi, nfit: check for the correct event code in notifications
+
+From: Vishal Verma <vishal.l.verma@intel.com>
+
+commit c09f12186d6b03b798832d95289af76495990192 upstream.
+
+Commit 209851649dc4 "acpi: nfit: Add support for hot-add" added
+support for _FIT notifications, but it neglected to verify the
+notification event code matches the one in the ACPI spec for
+"NFIT Update". Currently there is only one code in the spec, but
+once additional codes are added, older kernels (without this fix)
+will misbehave by assuming all event notifications are for an
+NFIT Update.
+
+Fixes: 209851649dc4 ("acpi: nfit: Add support for hot-add")
+Cc: <linux-acpi@vger.kernel.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Reported-by: Linda Knippers <linda.knippers@hpe.com>
+Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/nfit/core.c | 3 +++
+ drivers/acpi/nfit/nfit.h | 4 ++++
+ 2 files changed, 7 insertions(+)
+
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -2689,6 +2689,9 @@ static void acpi_nfit_notify(struct acpi
+
+ dev_dbg(dev, "%s: event: %d\n", __func__, event);
+
++ if (event != NFIT_NOTIFY_UPDATE)
++ return;
++
+ device_lock(dev);
+ if (!dev->driver) {
+ /* dev->driver may be null if we're being removed */
+--- a/drivers/acpi/nfit/nfit.h
++++ b/drivers/acpi/nfit/nfit.h
+@@ -78,6 +78,10 @@ enum {
+ NFIT_ARS_TIMEOUT = 90,
+ };
+
++enum nfit_root_notifiers {
++ NFIT_NOTIFY_UPDATE = 0x80,
++};
++
+ struct nfit_spa {
+ struct list_head list;
+ struct nd_region *nd_region;
--- /dev/null
+From a818101d7b92e76db2f9a597e4830734767473b9 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 9 Aug 2016 17:41:16 +0100
+Subject: cachefiles: Fix attempt to read i_blocks after deleting file [ver #2]
+
+From: David Howells <dhowells@redhat.com>
+
+commit a818101d7b92e76db2f9a597e4830734767473b9 upstream.
+
+An NULL-pointer dereference happens in cachefiles_mark_object_inactive()
+when it tries to read i_blocks so that it can tell the cachefilesd daemon
+how much space it's making available.
+
+The problem is that cachefiles_drop_object() calls
+cachefiles_mark_object_inactive() after calling cachefiles_delete_object()
+because the object being marked active staves off attempts to (re-)use the
+file at that filename until after it has been deleted. This means that
+d_inode is NULL by the time we come to try to access it.
+
+To fix the problem, have the caller of cachefiles_mark_object_inactive()
+supply the number of blocks freed up.
+
+Without this, the following oops may occur:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000098
+IP: [<ffffffffa06c5cc1>] cachefiles_mark_object_inactive+0x61/0xb0 [cachefiles]
+...
+CPU: 11 PID: 527 Comm: kworker/u64:4 Tainted: G I ------------ 3.10.0-470.el7.x86_64 #1
+Hardware name: Hewlett-Packard HP Z600 Workstation/0B54h, BIOS 786G4 v03.19 03/11/2011
+Workqueue: fscache_object fscache_object_work_func [fscache]
+task: ffff880035edaf10 ti: ffff8800b77c0000 task.ti: ffff8800b77c0000
+RIP: 0010:[<ffffffffa06c5cc1>] cachefiles_mark_object_inactive+0x61/0xb0 [cachefiles]
+RSP: 0018:ffff8800b77c3d70 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: ffff8800bf6cc400 RCX: 0000000000000034
+RDX: 0000000000000000 RSI: ffff880090ffc710 RDI: ffff8800bf761ef8
+RBP: ffff8800b77c3d88 R08: 2000000000000000 R09: 0090ffc710000000
+R10: ff51005d2ff1c400 R11: 0000000000000000 R12: ffff880090ffc600
+R13: ffff8800bf6cc520 R14: ffff8800bf6cc400 R15: ffff8800bf6cc498
+FS: 0000000000000000(0000) GS:ffff8800bb8c0000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+CR2: 0000000000000098 CR3: 00000000019ba000 CR4: 00000000000007e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+Stack:
+ ffff880090ffc600 ffff8800bf6cc400 ffff8800867df140 ffff8800b77c3db0
+ ffffffffa06c48cb ffff880090ffc600 ffff880090ffc180 ffff880090ffc658
+ ffff8800b77c3df0 ffffffffa085d846 ffff8800a96b8150 ffff880090ffc600
+Call Trace:
+ [<ffffffffa06c48cb>] cachefiles_drop_object+0x6b/0xf0 [cachefiles]
+ [<ffffffffa085d846>] fscache_drop_object+0xd6/0x1e0 [fscache]
+ [<ffffffffa085d615>] fscache_object_work_func+0xa5/0x200 [fscache]
+ [<ffffffff810a605b>] process_one_work+0x17b/0x470
+ [<ffffffff810a6e96>] worker_thread+0x126/0x410
+ [<ffffffff810a6d70>] ? rescuer_thread+0x460/0x460
+ [<ffffffff810ae64f>] kthread+0xcf/0xe0
+ [<ffffffff810ae580>] ? kthread_create_on_node+0x140/0x140
+ [<ffffffff81695418>] ret_from_fork+0x58/0x90
+ [<ffffffff810ae580>] ? kthread_create_on_node+0x140/0x140
+
+The oopsing code shows:
+
+ callq 0xffffffff810af6a0 <wake_up_bit>
+ mov 0xf8(%r12),%rax
+ mov 0x30(%rax),%rax
+ mov 0x98(%rax),%rax <---- oops here
+ lock add %rax,0x130(%rbx)
+
+where this is:
+
+ d_backing_inode(object->dentry)->i_blocks
+
+Fixes: a5b3a80b899bda0f456f1246c4c5a1191ea01519 (CacheFiles: Provide read-and-reset release counters for cachefilesd)
+Reported-by: Jianhong Yin <jiyin@redhat.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Reviewed-by: Steve Dickson <steved@redhat.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cachefiles/interface.c | 8 +++++++-
+ fs/cachefiles/internal.h | 3 ++-
+ fs/cachefiles/namei.c | 8 ++++----
+ 3 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/fs/cachefiles/interface.c
++++ b/fs/cachefiles/interface.c
+@@ -253,6 +253,8 @@ static void cachefiles_drop_object(struc
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ const struct cred *saved_cred;
++ struct inode *inode;
++ blkcnt_t i_blocks = 0;
+
+ ASSERT(_object);
+
+@@ -279,6 +281,10 @@ static void cachefiles_drop_object(struc
+ _object != cache->cache.fsdef
+ ) {
+ _debug("- retire object OBJ%x", object->fscache.debug_id);
++ inode = d_backing_inode(object->dentry);
++ if (inode)
++ i_blocks = inode->i_blocks;
++
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_delete_object(cache, object);
+ cachefiles_end_secure(cache, saved_cred);
+@@ -292,7 +298,7 @@ static void cachefiles_drop_object(struc
+
+ /* note that the object is now inactive */
+ if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
+- cachefiles_mark_object_inactive(cache, object);
++ cachefiles_mark_object_inactive(cache, object, i_blocks);
+
+ dput(object->dentry);
+ object->dentry = NULL;
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -160,7 +160,8 @@ extern char *cachefiles_cook_key(const u
+ * namei.c
+ */
+ extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
+- struct cachefiles_object *object);
++ struct cachefiles_object *object,
++ blkcnt_t i_blocks);
+ extern int cachefiles_delete_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object);
+ extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -261,10 +261,9 @@ requeue:
+ * Mark an object as being inactive.
+ */
+ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
+- struct cachefiles_object *object)
++ struct cachefiles_object *object,
++ blkcnt_t i_blocks)
+ {
+- blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
+-
+ write_lock(&cache->active_lock);
+ rb_erase(&object->active_node, &cache->active_nodes);
+ clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
+@@ -707,7 +706,8 @@ mark_active_timed_out:
+
+ check_error:
+ _debug("check error %d", ret);
+- cachefiles_mark_object_inactive(cache, object);
++ cachefiles_mark_object_inactive(
++ cache, object, d_backing_inode(object->dentry)->i_blocks);
+ release_dentry:
+ dput(object->dentry);
+ object->dentry = NULL;
--- /dev/null
+From a397ba829d7f8aff4c90af3704573a28ccd61a59 Mon Sep 17 00:00:00 2001
+From: Marcelo Cerri <marcelo.cerri@canonical.com>
+Date: Wed, 28 Sep 2016 13:42:09 -0300
+Subject: crypto: ghash-generic - move common definitions to a new header file
+
+From: Marcelo Cerri <marcelo.cerri@canonical.com>
+
+commit a397ba829d7f8aff4c90af3704573a28ccd61a59 upstream.
+
+Move common values and types used by ghash-generic to a new header file
+so drivers can directly use ghash-generic as a fallback implementation.
+
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ghash-generic.c | 13 +------------
+ include/crypto/ghash.h | 23 +++++++++++++++++++++++
+ 2 files changed, 24 insertions(+), 12 deletions(-)
+
+--- a/crypto/ghash-generic.c
++++ b/crypto/ghash-generic.c
+@@ -14,24 +14,13 @@
+
+ #include <crypto/algapi.h>
+ #include <crypto/gf128mul.h>
++#include <crypto/ghash.h>
+ #include <crypto/internal/hash.h>
+ #include <linux/crypto.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+
+-#define GHASH_BLOCK_SIZE 16
+-#define GHASH_DIGEST_SIZE 16
+-
+-struct ghash_ctx {
+- struct gf128mul_4k *gf128;
+-};
+-
+-struct ghash_desc_ctx {
+- u8 buffer[GHASH_BLOCK_SIZE];
+- u32 bytes;
+-};
+-
+ static int ghash_init(struct shash_desc *desc)
+ {
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+--- /dev/null
++++ b/include/crypto/ghash.h
+@@ -0,0 +1,23 @@
++/*
++ * Common values for GHASH algorithms
++ */
++
++#ifndef __CRYPTO_GHASH_H__
++#define __CRYPTO_GHASH_H__
++
++#include <linux/types.h>
++#include <crypto/gf128mul.h>
++
++#define GHASH_BLOCK_SIZE 16
++#define GHASH_DIGEST_SIZE 16
++
++struct ghash_ctx {
++ struct gf128mul_4k *gf128;
++};
++
++struct ghash_desc_ctx {
++ u8 buffer[GHASH_BLOCK_SIZE];
++ u32 bytes;
++};
++
++#endif
--- /dev/null
+From 80da44c29d997e28c4442825f35f4ac339813877 Mon Sep 17 00:00:00 2001
+From: Marcelo Cerri <marcelo.cerri@canonical.com>
+Date: Wed, 28 Sep 2016 13:42:10 -0300
+Subject: crypto: vmx - Fix memory corruption caused by p8_ghash
+
+From: Marcelo Cerri <marcelo.cerri@canonical.com>
+
+commit 80da44c29d997e28c4442825f35f4ac339813877 upstream.
+
+This patch changes the p8_ghash driver to use ghash-generic as a fixed
+fallback implementation. This allows the correct value of descsize to be
+defined directly in its shash_alg structure and avoids problems with
+incorrect buffer sizes when its state is exported or imported.
+
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/ghash.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -26,16 +26,13 @@
+ #include <linux/hardirq.h>
+ #include <asm/switch_to.h>
+ #include <crypto/aes.h>
++#include <crypto/ghash.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/internal/hash.h>
+ #include <crypto/b128ops.h>
+
+ #define IN_INTERRUPT in_interrupt()
+
+-#define GHASH_BLOCK_SIZE (16)
+-#define GHASH_DIGEST_SIZE (16)
+-#define GHASH_KEY_LEN (16)
+-
+ void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
+ void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
+ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
+@@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx {
+
+ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+ {
+- const char *alg;
++ const char *alg = "ghash-generic";
+ struct crypto_shash *fallback;
+ struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- if (!(alg = crypto_tfm_alg_name(tfm))) {
+- printk(KERN_ERR "Failed to get algorithm name.\n");
+- return -ENOENT;
+- }
+-
+ fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR
+@@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct cryp
+ crypto_shash_set_flags(fallback,
+ crypto_shash_get_flags((struct crypto_shash
+ *) tfm));
+- ctx->fallback = fallback;
+
+- shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
+- + crypto_shash_descsize(fallback);
++ /* Check if the descsize defined in the algorithm is still enough. */
++ if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
++ + crypto_shash_descsize(fallback)) {
++ printk(KERN_ERR
++ "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
++ alg,
++ shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
++ crypto_shash_descsize(fallback));
++ return -EINVAL;
++ }
++ ctx->fallback = fallback;
+
+ return 0;
+ }
+@@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto
+ {
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
+
+- if (keylen != GHASH_KEY_LEN)
++ if (keylen != GHASH_BLOCK_SIZE)
+ return -EINVAL;
+
+ preempt_disable();
+@@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = {
+ .update = p8_ghash_update,
+ .final = p8_ghash_final,
+ .setkey = p8_ghash_setkey,
+- .descsize = sizeof(struct p8_ghash_desc_ctx),
++ .descsize = sizeof(struct p8_ghash_desc_ctx)
++ + sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "p8_ghash",
--- /dev/null
+From 3a8db79889ce16930aff19b818f5b09651bb7644 Mon Sep 17 00:00:00 2001
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Date: Sat, 8 Oct 2016 10:14:37 -0300
+Subject: dlm: free workqueues after the connections
+
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+
+commit 3a8db79889ce16930aff19b818f5b09651bb7644 upstream.
+
+After backporting commit ee44b4bc054a ("dlm: use sctp 1-to-1 API")
+series to a kernel with an older workqueue which didn't use RCU yet, it
+was noticed that we are freeing the workqueues in dlm_lowcomms_stop()
+too early as free_conn() will try to access that memory for canceling
+the queued works if any.
+
+This issue was introduced by commit 0d737a8cfd83 as before it such
+attempt to cancel the queued works wasn't performed, so the issue was
+not present.
+
+This patch fixes it by simply inverting the free order.
+
+Fixes: 0d737a8cfd83 ("dlm: fix race while closing connections")
+Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/dlm/lowcomms.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1656,16 +1656,12 @@ void dlm_lowcomms_stop(void)
+ mutex_lock(&connections_lock);
+ dlm_allow_conn = 0;
+ foreach_conn(stop_conn);
++ clean_writequeues();
++ foreach_conn(free_conn);
+ mutex_unlock(&connections_lock);
+
+ work_stop();
+
+- mutex_lock(&connections_lock);
+- clean_writequeues();
+-
+- foreach_conn(free_conn);
+-
+- mutex_unlock(&connections_lock);
+ kmem_cache_destroy(con_cache);
+ }
+
--- /dev/null
+From c2cbc38b9715bd8318062e600668fc30e5a3fbfa Mon Sep 17 00:00:00 2001
+From: Laszlo Ersek <lersek@redhat.com>
+Date: Mon, 3 Oct 2016 19:43:03 +0200
+Subject: drm: virtio: reinstate drm_virtio_set_busid()
+
+From: Laszlo Ersek <lersek@redhat.com>
+
+commit c2cbc38b9715bd8318062e600668fc30e5a3fbfa upstream.
+
+Before commit a325725633c2 ("drm: Lobotomize set_busid nonsense for !pci
+drivers"), several DRM drivers for platform devices used to expose an
+explicit "drm_driver.set_busid" callback, invariably backed by
+drm_platform_set_busid().
+
+Commit a325725633c2 removed drm_platform_set_busid(), along with the
+referring .set_busid field initializations. This was justified because
+interchangeable functionality had been implemented in drm_dev_alloc() /
+drm_dev_init(), which DRM_IOCTL_SET_VERSION would rely on going forward.
+
+However, commit a325725633c2 also removed drm_virtio_set_busid(), for
+which the same consolidation was not appropriate: this .set_busid callback
+had been implemented with drm_pci_set_busid(), and not
+drm_platform_set_busid(). The error regressed Xorg/xserver on QEMU's
+"virtio-vga" card; the drmGetBusid() function from libdrm would no longer
+return stable PCI identifiers like "pci:0000:00:02.0", but rather unstable
+platform ones like "virtio0".
+
+Reinstate drm_virtio_set_busid() with judicious use of
+
+ git checkout -p a325725633c2^ -- drivers/gpu/drm/virtio
+
+Cc: Daniel Vetter <daniel.vetter@intel.com>
+Cc: Emil Velikov <emil.l.velikov@gmail.com>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Joachim Frieben <jfrieben@hotmail.com>
+Reported-by: Joachim Frieben <jfrieben@hotmail.com>
+Fixes: a325725633c26aa66ab940f762a6b0778edf76c0
+Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1366842
+Signed-off-by: Laszlo Ersek <lersek@redhat.com>
+Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/virtio/virtgpu_drm_bus.c | 10 ++++++++++
+ drivers/gpu/drm/virtio/virtgpu_drv.c | 1 +
+ drivers/gpu/drm/virtio/virtgpu_drv.h | 1 +
+ 3 files changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
++++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+@@ -27,6 +27,16 @@
+
+ #include "virtgpu_drv.h"
+
++int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
++{
++ struct pci_dev *pdev = dev->pdev;
++
++ if (pdev) {
++ return drm_pci_set_busid(dev, master);
++ }
++ return 0;
++}
++
+ static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
+ {
+ struct apertures_struct *ap;
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
+@@ -117,6 +117,7 @@ static const struct file_operations virt
+
+ static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
++ .set_busid = drm_virtio_set_busid,
+ .load = virtio_gpu_driver_load,
+ .unload = virtio_gpu_driver_unload,
+ .open = virtio_gpu_driver_open,
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
+@@ -49,6 +49,7 @@
+ #define DRIVER_PATCHLEVEL 1
+
+ /* virtgpu_drm_bus.c */
++int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
+ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
+
+ struct virtio_gpu_object {
--- /dev/null
+From cca32b7eeb4ea24fa6596650e06279ad9130af98 Mon Sep 17 00:00:00 2001
+From: Ross Zwisler <ross.zwisler@linux.intel.com>
+Date: Thu, 22 Sep 2016 11:49:38 -0400
+Subject: ext4: allow DAX writeback for hole punch
+
+From: Ross Zwisler <ross.zwisler@linux.intel.com>
+
+commit cca32b7eeb4ea24fa6596650e06279ad9130af98 upstream.
+
+Currently when doing a DAX hole punch with ext4 we fail to do a writeback.
+This is because the logic around filemap_write_and_wait_range() in
+ext4_punch_hole() only looks for dirty page cache pages in the radix tree,
+not for dirty DAX exceptional entries.
+
+Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3892,7 +3892,7 @@ int ext4_update_disksize_before_punch(st
+ }
+
+ /*
+- * ext4_punch_hole: punches a hole in a file by releaseing the blocks
++ * ext4_punch_hole: punches a hole in a file by releasing the blocks
+ * associated with the given offset and length
+ *
+ * @inode: File inode
+@@ -3921,7 +3921,7 @@ int ext4_punch_hole(struct inode *inode,
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
+ */
+- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
++ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ ret = filemap_write_and_wait_range(mapping, offset,
+ offset + length - 1);
+ if (ret)
--- /dev/null
+From 4e800c0359d9a53e6bf0ab216954971b2515247f Mon Sep 17 00:00:00 2001
+From: wangguang <wang.guang55@zte.com.cn>
+Date: Thu, 15 Sep 2016 11:32:46 -0400
+Subject: ext4: bugfix for mmaped pages in mpage_release_unused_pages()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: wangguang <wang.guang55@zte.com.cn>
+
+commit 4e800c0359d9a53e6bf0ab216954971b2515247f upstream.
+
+Pages clear buffers after ext4 delayed block allocation failed,
+However, it does not clean its pte_dirty flag.
+if the pages unmap ,in cording to the pte_dirty ,
+unmap_page_range may try to call __set_page_dirty,
+
+which may lead to the bugon at
+mpage_prepare_extent_to_map:head = page_buffers(page);.
+
+This patch just call clear_page_dirty_for_io to clean pte_dirty
+at mpage_release_unused_pages for pages mmaped.
+
+Steps to reproduce the bug:
+
+(1) mmap a file in ext4
+ addr = (char *)mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
+ fd, 0);
+ memset(addr, 'i', 4096);
+
+(2) return EIO at
+
+ ext4_writepages->mpage_map_and_submit_extent->mpage_map_one_extent
+
+which causes this log message to be print:
+
+ ext4_msg(sb, KERN_CRIT,
+ "Delayed block allocation failed for "
+ "inode %lu at logical offset %llu with"
+ " max blocks %u with error %d",
+ inode->i_ino,
+ (unsigned long long)map->m_lblk,
+ (unsigned)map->m_len, -err);
+
+(3)Unmap the addr cause warning at
+
+ __set_page_dirty:WARN_ON_ONCE(warn && !PageUptodate(page));
+
+(4) wait for a minute,then bugon happen.
+
+Signed-off-by: wangguang <wangguang03@zte.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1649,6 +1649,8 @@ static void mpage_release_unused_pages(s
+ BUG_ON(!PageLocked(page));
+ BUG_ON(PageWriteback(page));
+ if (invalidate) {
++ if (page_mapped(page))
++ clear_page_dirty_for_io(page);
+ block_invalidatepage(page, 0, PAGE_SIZE);
+ ClearPageUptodate(page);
+ }
--- /dev/null
+From 14fbd4aa613bd5110556c281799ce36dc6f3ba97 Mon Sep 17 00:00:00 2001
+From: Eric Whitney <enwlinux@gmail.com>
+Date: Mon, 29 Aug 2016 15:45:11 -0400
+Subject: ext4: enforce online defrag restriction for encrypted files
+
+From: Eric Whitney <enwlinux@gmail.com>
+
+commit 14fbd4aa613bd5110556c281799ce36dc6f3ba97 upstream.
+
+Online defragging of encrypted files is not currently implemented.
+However, the move extent ioctl can still return successfully when
+called. For example, this occurs when xfstest ext4/020 is run on an
+encrypted file system, resulting in a corrupted test file and a
+corresponding test failure.
+
+Until the proper functionality is implemented, fail the move extent
+ioctl if either the original or donor file is encrypted.
+
+Signed-off-by: Eric Whitney <enwlinux@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/move_extent.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, s
+ return -EOPNOTSUPP;
+ }
+
++ if (ext4_encrypted_inode(orig_inode) ||
++ ext4_encrypted_inode(donor_inode)) {
++ ext4_msg(orig_inode->i_sb, KERN_ERR,
++ "Online defrag not supported for encrypted files");
++ return -EOPNOTSUPP;
++ }
++
+ /* Protect orig and donor inodes against a truncate */
+ lock_two_nondirectories(orig_inode, donor_inode);
+
--- /dev/null
+From edf15aa180d7b98fe16bd3eda42f9dd0e60dee20 Mon Sep 17 00:00:00 2001
+From: Fabian Frederick <fabf@skynet.be>
+Date: Thu, 15 Sep 2016 11:39:52 -0400
+Subject: ext4: fix memory leak in ext4_insert_range()
+
+From: Fabian Frederick <fabf@skynet.be>
+
+commit edf15aa180d7b98fe16bd3eda42f9dd0e60dee20 upstream.
+
+Running xfstests generic/013 with kmemleak gives the following:
+
+unreferenced object 0xffff8801d3d27de0 (size 96):
+ comm "fsstress", pid 4941, jiffies 4294860168 (age 53.485s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<ffffffff818eaaf3>] kmemleak_alloc+0x23/0x40
+ [<ffffffff81179805>] __kmalloc+0xf5/0x1d0
+ [<ffffffff8122ef5c>] ext4_find_extent+0x1ec/0x2f0
+ [<ffffffff8123530c>] ext4_insert_range+0x34c/0x4a0
+ [<ffffffff81235942>] ext4_fallocate+0x4e2/0x8b0
+ [<ffffffff81181334>] vfs_fallocate+0x134/0x210
+ [<ffffffff8118203f>] SyS_fallocate+0x3f/0x60
+ [<ffffffff818efa9b>] entry_SYSCALL_64_fastpath+0x13/0x8f
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+Problem seems mitigated by dropping refs and freeing path
+when there's no path[depth].p_ext
+
+Signed-off-by: Fabian Frederick <fabf@skynet.be>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5734,6 +5734,9 @@ int ext4_insert_range(struct inode *inod
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto out_stop;
+ }
++ } else {
++ ext4_ext_drop_refs(path);
++ kfree(path);
+ }
+
+ ret = ext4_es_remove_extent(inode, offset_lblk,
--- /dev/null
+From dcce7a46c6f28f41447272fb44348ead8f584573 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 15 Sep 2016 13:13:13 -0400
+Subject: ext4: fix memory leak when symlink decryption fails
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit dcce7a46c6f28f41447272fb44348ead8f584573 upstream.
+
+This bug was introduced in v4.8-rc1.
+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/symlink.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/ext4/symlink.c
++++ b/fs/ext4/symlink.c
+@@ -65,13 +65,12 @@ static const char *ext4_encrypted_get_li
+ res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
+ if (res)
+ goto errout;
++ paddr = pstr.name;
+
+ res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+ if (res < 0)
+ goto errout;
+
+- paddr = pstr.name;
+-
+ /* Null-terminate the name */
+ if (res <= pstr.len)
+ paddr[res] = '\0';
--- /dev/null
+From 93e3b4e6631d2a74a8cf7429138096862ff9f452 Mon Sep 17 00:00:00 2001
+From: Daeho Jeong <daeho.jeong@samsung.com>
+Date: Mon, 5 Sep 2016 22:56:10 -0400
+Subject: ext4: reinforce check of i_dtime when clearing high fields of uid and gid
+
+From: Daeho Jeong <daeho.jeong@samsung.com>
+
+commit 93e3b4e6631d2a74a8cf7429138096862ff9f452 upstream.
+
+Now, ext4_do_update_inode() clears high 16-bit fields of uid/gid
+of deleted and evicted inode to fix up interoperability with old
+kernels. However, it checks only i_dtime of an inode to determine
+whether the inode was deleted and evicted, and this is very risky,
+because i_dtime can be used for the pointer maintaining orphan inode
+list, too. We need to further check whether the i_dtime is being
+used for the orphan inode list even if the i_dtime is not NULL.
+
+We found that high 16-bit fields of uid/gid of inode are unintentionally
+and permanently cleared when the inode truncation is just triggered,
+but not finished, and the inode metadata, whose high uid/gid bits are
+cleared, is written on disk, and the sudden power-off follows that
+in order.
+
+Signed-off-by: Daeho Jeong <daeho.jeong@samsung.com>
+Signed-off-by: Hobin Woo <hobin.woo@samsung.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4814,14 +4814,14 @@ static int ext4_do_update_inode(handle_t
+ * Fix up interoperability with old kernels. Otherwise, old inodes get
+ * re-used with the upper 16 bits of the uid/gid intact
+ */
+- if (!ei->i_dtime) {
++ if (ei->i_dtime && list_empty(&ei->i_orphan)) {
++ raw_inode->i_uid_high = 0;
++ raw_inode->i_gid_high = 0;
++ } else {
+ raw_inode->i_uid_high =
+ cpu_to_le16(high_16_bits(i_uid));
+ raw_inode->i_gid_high =
+ cpu_to_le16(high_16_bits(i_gid));
+- } else {
+- raw_inode->i_uid_high = 0;
+- raw_inode->i_gid_high = 0;
+ }
+ } else {
+ raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
--- /dev/null
+From e81d44778d1d57bbaef9e24c4eac7c8a7a401d40 Mon Sep 17 00:00:00 2001
+From: gmail <yngsion@gmail.com>
+Date: Fri, 30 Sep 2016 01:33:37 -0400
+Subject: ext4: release bh in make_indexed_dir
+
+From: gmail <yngsion@gmail.com>
+
+commit e81d44778d1d57bbaef9e24c4eac7c8a7a401d40 upstream.
+
+The commit 6050d47adcad: "ext4: bail out from make_indexed_dir() on
+first error" could end up leaking bh2 in the error path.
+
+[ Also avoid renaming bh2 to bh, which just confuses things --tytso ]
+
+Signed-off-by: yangsheng <yngsion@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/namei.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2044,33 +2044,31 @@ static int make_indexed_dir(handle_t *ha
+ frame->entries = entries;
+ frame->at = entries;
+ frame->bh = bh;
+- bh = bh2;
+
+ retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+ if (retval)
+ goto out_frames;
+- retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
++ retval = ext4_handle_dirty_dirent_node(handle, dir, bh2);
+ if (retval)
+ goto out_frames;
+
+- de = do_split(handle,dir, &bh, frame, &fname->hinfo);
++ de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
+ if (IS_ERR(de)) {
+ retval = PTR_ERR(de);
+ goto out_frames;
+ }
+- dx_release(frames);
+
+- retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
+- brelse(bh);
+- return retval;
++ retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
+ out_frames:
+ /*
+ * Even if the block split failed, we have to properly write
+ * out all the changes we did so far. Otherwise we can end up
+ * with corrupted filesystem.
+ */
+- ext4_mark_inode_dirty(handle, dir);
++ if (retval)
++ ext4_mark_inode_dirty(handle, dir);
+ dx_release(frames);
++ brelse(bh2);
+ return retval;
+ }
+
--- /dev/null
+From 9b623df614576680cadeaa4d7e0b5884de8f7c17 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 30 Sep 2016 02:02:29 -0400
+Subject: ext4: unmap metadata when zeroing blocks
+
+From: Jan Kara <jack@suse.cz>
+
+commit 9b623df614576680cadeaa4d7e0b5884de8f7c17 upstream.
+
+When zeroing blocks for DAX allocations, we also have to unmap aliases
+in the block device mappings. Otherwise writeback can overwrite zeros
+with stale data from block device page cache.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -647,11 +647,19 @@ found:
+ /*
+ * We have to zeroout blocks before inserting them into extent
+ * status tree. Otherwise someone could look them up there and
+- * use them before they are really zeroed.
++ * use them before they are really zeroed. We also have to
++ * unmap metadata before zeroing as otherwise writeback can
++ * overwrite zeros with stale data from block device.
+ */
+ if (flags & EXT4_GET_BLOCKS_ZERO &&
+ map->m_flags & EXT4_MAP_MAPPED &&
+ map->m_flags & EXT4_MAP_NEW) {
++ ext4_lblk_t i;
++
++ for (i = 0; i < map->m_len; i++) {
++ unmap_underlying_metadata(inode->i_sb->s_bdev,
++ map->m_pblk + i);
++ }
+ ret = ext4_issue_zeroout(inode, map->m_lblk,
+ map->m_pblk, map->m_len);
+ if (ret) {
--- /dev/null
+From 5864a2fd3088db73d47942370d0f7210a807b9bc Mon Sep 17 00:00:00 2001
+From: Manfred Spraul <manfred@colorfullife.com>
+Date: Tue, 11 Oct 2016 13:54:50 -0700
+Subject: ipc/sem.c: fix complex_count vs. simple op race
+
+From: Manfred Spraul <manfred@colorfullife.com>
+
+commit 5864a2fd3088db73d47942370d0f7210a807b9bc upstream.
+
+Commit 6d07b68ce16a ("ipc/sem.c: optimize sem_lock()") introduced a
+race:
+
+sem_lock has a fast path that allows parallel simple operations.
+There are two reasons why a simple operation cannot run in parallel:
+ - a non-simple operations is ongoing (sma->sem_perm.lock held)
+ - a complex operation is sleeping (sma->complex_count != 0)
+
+As both facts are stored independently, a thread can bypass the current
+checks by sleeping in the right positions. See below for more details
+(or kernel bugzilla 105651).
+
+The patch fixes that by creating one variable (complex_mode)
+that tracks both reasons why parallel operations are not possible.
+
+The patch also updates stale documentation regarding the locking.
+
+With regards to stable kernels:
+The patch is required for all kernels that include the
+commit 6d07b68ce16a ("ipc/sem.c: optimize sem_lock()") (3.10?)
+
+The alternative is to revert the patch that introduced the race.
+
+The patch is safe for backporting, i.e. it makes no assumptions
+about memory barriers in spin_unlock_wait().
+
+Background:
+Here is the race of the current implementation:
+
+Thread A: (simple op)
+- does the first "sma->complex_count == 0" test
+
+Thread B: (complex op)
+- does sem_lock(): This includes an array scan. But the scan can't
+ find Thread A, because Thread A does not own sem->lock yet.
+- the thread does the operation, increases complex_count,
+ drops sem_lock, sleeps
+
+Thread A:
+- spin_lock(&sem->lock), spin_is_locked(sma->sem_perm.lock)
+- sleeps before the complex_count test
+
+Thread C: (complex op)
+- does sem_lock (no array scan, complex_count==1)
+- wakes up Thread B.
+- decrements complex_count
+
+Thread A:
+- does the complex_count test
+
+Bug:
+Now both thread A and thread C operate on the same array, without
+any synchronization.
+
+Fixes: 6d07b68ce16a ("ipc/sem.c: optimize sem_lock()")
+Link: http://lkml.kernel.org/r/1469123695-5661-1-git-send-email-manfred@colorfullife.com
+Reported-by: <felixh@informatik.uni-bremen.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: <1vier1@web.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sem.h | 1
+ ipc/sem.c | 138 +++++++++++++++++++++++++++++++---------------------
+ 2 files changed, 84 insertions(+), 55 deletions(-)
+
+--- a/include/linux/sem.h
++++ b/include/linux/sem.h
+@@ -21,6 +21,7 @@ struct sem_array {
+ struct list_head list_id; /* undo requests on this array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
++ bool complex_mode; /* no parallel simple ops */
+ };
+
+ #ifdef CONFIG_SYSVIPC
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -162,14 +162,21 @@ static int sysvipc_sem_proc_show(struct
+
+ /*
+ * Locking:
++ * a) global sem_lock() for read/write
+ * sem_undo.id_next,
+ * sem_array.complex_count,
+- * sem_array.pending{_alter,_cont},
+- * sem_array.sem_undo: global sem_lock() for read/write
+- * sem_undo.proc_next: only "current" is allowed to read/write that field.
++ * sem_array.complex_mode
++ * sem_array.pending{_alter,_const},
++ * sem_array.sem_undo
+ *
++ * b) global or semaphore sem_lock() for read/write:
+ * sem_array.sem_base[i].pending_{const,alter}:
+- * global or semaphore sem_lock() for read/write
++ * sem_array.complex_mode (for read)
++ *
++ * c) special:
++ * sem_undo_list.list_proc:
++ * * undo_list->lock for write
++ * * rcu for read
+ */
+
+ #define sc_semmsl sem_ctls[0]
+@@ -260,30 +267,61 @@ static void sem_rcu_free(struct rcu_head
+ }
+
+ /*
+- * Wait until all currently ongoing simple ops have completed.
++ * Enter the mode suitable for non-simple operations:
+ * Caller must own sem_perm.lock.
+- * New simple ops cannot start, because simple ops first check
+- * that sem_perm.lock is free.
+- * that a) sem_perm.lock is free and b) complex_count is 0.
+ */
+-static void sem_wait_array(struct sem_array *sma)
++static void complexmode_enter(struct sem_array *sma)
+ {
+ int i;
+ struct sem *sem;
+
+- if (sma->complex_count) {
+- /* The thread that increased sma->complex_count waited on
+- * all sem->lock locks. Thus we don't need to wait again.
+- */
++ if (sma->complex_mode) {
++ /* We are already in complex_mode. Nothing to do */
+ return;
+ }
+
++ /* We need a full barrier after seting complex_mode:
++ * The write to complex_mode must be visible
++ * before we read the first sem->lock spinlock state.
++ */
++ smp_store_mb(sma->complex_mode, true);
++
+ for (i = 0; i < sma->sem_nsems; i++) {
+ sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
++ /*
++ * spin_unlock_wait() is not a memory barriers, it is only a
++ * control barrier. The code must pair with spin_unlock(&sem->lock),
++ * thus just the control barrier is insufficient.
++ *
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
++ */
++ smp_rmb();
++}
++
++/*
++ * Try to leave the mode that disallows simple operations:
++ * Caller must own sem_perm.lock.
++ */
++static void complexmode_tryleave(struct sem_array *sma)
++{
++ if (sma->complex_count) {
++ /* Complex ops are sleeping.
++ * We must stay in complex mode
++ */
++ return;
++ }
++ /*
++ * Immediately after setting complex_mode to false,
++ * a simple op can start. Thus: all memory writes
++ * performed by the current operation must be visible
++ * before we set complex_mode to false.
++ */
++ smp_store_release(&sma->complex_mode, false);
+ }
+
++#define SEM_GLOBAL_LOCK (-1)
+ /*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+@@ -300,56 +338,42 @@ static inline int sem_lock(struct sem_ar
+ /* Complex operation - acquire a full lock */
+ ipc_lock_object(&sma->sem_perm);
+
+- /* And wait until all simple ops that are processed
+- * right now have dropped their locks.
+- */
+- sem_wait_array(sma);
+- return -1;
++ /* Prevent parallel simple ops */
++ complexmode_enter(sma);
++ return SEM_GLOBAL_LOCK;
+ }
+
+ /*
+ * Only one semaphore affected - try to optimize locking.
+- * The rules are:
+- * - optimized locking is possible if no complex operation
+- * is either enqueued or processed right now.
+- * - The test for enqueued complex ops is simple:
+- * sma->complex_count != 0
+- * - Testing for complex ops that are processed right now is
+- * a bit more difficult. Complex ops acquire the full lock
+- * and first wait that the running simple ops have completed.
+- * (see above)
+- * Thus: If we own a simple lock and the global lock is free
+- * and complex_count is now 0, then it will stay 0 and
+- * thus just locking sem->lock is sufficient.
++ * Optimized locking is possible if no complex operation
++ * is either enqueued or processed right now.
++ *
++ * Both facts are tracked by complex_mode.
+ */
+ sem = sma->sem_base + sops->sem_num;
+
+- if (sma->complex_count == 0) {
++ /*
++ * Initial check for complex_mode. Just an optimization,
++ * no locking, no memory barrier.
++ */
++ if (!sma->complex_mode) {
+ /*
+ * It appears that no complex operation is around.
+ * Acquire the per-semaphore lock.
+ */
+ spin_lock(&sem->lock);
+
+- /* Then check that the global lock is free */
+- if (!spin_is_locked(&sma->sem_perm.lock)) {
+- /*
+- * We need a memory barrier with acquire semantics,
+- * otherwise we can race with another thread that does:
+- * complex_count++;
+- * spin_unlock(sem_perm.lock);
+- */
+- smp_acquire__after_ctrl_dep();
++ /*
++ * See 51d7d5205d33
++ * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
++ * A full barrier is required: the write of sem->lock
++ * must be visible before the read is executed
++ */
++ smp_mb();
+
+- /*
+- * Now repeat the test of complex_count:
+- * It can't change anymore until we drop sem->lock.
+- * Thus: if is now 0, then it will stay 0.
+- */
+- if (sma->complex_count == 0) {
+- /* fast path successful! */
+- return sops->sem_num;
+- }
++ if (!smp_load_acquire(&sma->complex_mode)) {
++ /* fast path successful! */
++ return sops->sem_num;
+ }
+ spin_unlock(&sem->lock);
+ }
+@@ -369,15 +393,16 @@ static inline int sem_lock(struct sem_ar
+ /* Not a false alarm, thus complete the sequence for a
+ * full lock.
+ */
+- sem_wait_array(sma);
+- return -1;
++ complexmode_enter(sma);
++ return SEM_GLOBAL_LOCK;
+ }
+ }
+
+ static inline void sem_unlock(struct sem_array *sma, int locknum)
+ {
+- if (locknum == -1) {
++ if (locknum == SEM_GLOBAL_LOCK) {
+ unmerge_queues(sma);
++ complexmode_tryleave(sma);
+ ipc_unlock_object(&sma->sem_perm);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+@@ -529,6 +554,7 @@ static int newary(struct ipc_namespace *
+ }
+
+ sma->complex_count = 0;
++ sma->complex_mode = true; /* dropped by sem_unlock below */
+ INIT_LIST_HEAD(&sma->pending_alter);
+ INIT_LIST_HEAD(&sma->pending_const);
+ INIT_LIST_HEAD(&sma->list_id);
+@@ -2184,10 +2210,10 @@ static int sysvipc_sem_proc_show(struct
+ /*
+ * The proc interface isn't aware of sem_lock(), it calls
+ * ipc_lock_object() directly (in sysvipc_find_ipc).
+- * In order to stay compatible with sem_lock(), we must wait until
+- * all simple semop() calls have left their critical regions.
++ * In order to stay compatible with sem_lock(), we must
++ * enter / leave complex_mode.
+ */
+- sem_wait_array(sma);
++ complexmode_enter(sma);
+
+ sem_otime = get_semotime(sma);
+
+@@ -2204,6 +2230,8 @@ static int sysvipc_sem_proc_show(struct
+ sem_otime,
+ sma->sem_ctime);
+
++ complexmode_tryleave(sma);
++
+ return 0;
+ }
+ #endif
--- /dev/null
+From e03a9976afce6634826d56c33531dd10bb9a9166 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 22 Sep 2016 11:44:06 -0400
+Subject: jbd2: fix lockdep annotation in add_transaction_credits()
+
+From: Jan Kara <jack@suse.cz>
+
+commit e03a9976afce6634826d56c33531dd10bb9a9166 upstream.
+
+Thomas has reported a lockdep splat hitting in
+add_transaction_credits(). The problem is that that function calls
+jbd2_might_wait_for_commit() while holding j_state_lock which is wrong
+(we do not really wait for transaction commit while holding that lock).
+
+Fix the problem by moving jbd2_might_wait_for_commit() into places where
+we are ready to wait for transaction commit and thus j_state_lock is
+unlocked.
+
+Fixes: 1eaa566d368b214d99cbb973647c1b0b8102a9ae
+Reported-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/transaction.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -159,6 +159,7 @@ static void wait_transaction_locked(jour
+ read_unlock(&journal->j_state_lock);
+ if (need_to_start)
+ jbd2_log_start_commit(journal, tid);
++ jbd2_might_wait_for_commit(journal);
+ schedule();
+ finish_wait(&journal->j_wait_transaction_locked, &wait);
+ }
+@@ -182,8 +183,6 @@ static int add_transaction_credits(journ
+ int needed;
+ int total = blocks + rsv_blocks;
+
+- jbd2_might_wait_for_commit(journal);
+-
+ /*
+ * If the current transaction is locked down for commit, wait
+ * for the lock to be released.
+@@ -214,6 +213,7 @@ static int add_transaction_credits(journ
+ if (atomic_read(&journal->j_reserved_credits) + total >
+ journal->j_max_transaction_buffers) {
+ read_unlock(&journal->j_state_lock);
++ jbd2_might_wait_for_commit(journal);
+ wait_event(journal->j_wait_reserved,
+ atomic_read(&journal->j_reserved_credits) + total <=
+ journal->j_max_transaction_buffers);
+@@ -238,6 +238,7 @@ static int add_transaction_credits(journ
+ if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+ atomic_sub(total, &t->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
++ jbd2_might_wait_for_commit(journal);
+ write_lock(&journal->j_state_lock);
+ if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+ __jbd2_log_wait_for_space(journal);
+@@ -255,6 +256,7 @@ static int add_transaction_credits(journ
+ sub_reserved_credits(journal, rsv_blocks);
+ atomic_sub(total, &t->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
++ jbd2_might_wait_for_commit(journal);
+ wait_event(journal->j_wait_reserved,
+ atomic_read(&journal->j_reserved_credits) + rsv_blocks
+ <= journal->j_max_transaction_buffers / 2);
--- /dev/null
+From 2247bb335ab9c40058484cac36ea74ee652f3b7b Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Fri, 7 Oct 2016 17:01:07 -0700
+Subject: mm/hugetlb: fix memory offline with hugepage size > memory block size
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit 2247bb335ab9c40058484cac36ea74ee652f3b7b upstream.
+
+Patch series "mm/hugetlb: memory offline issues with hugepages", v4.
+
+This addresses several issues with hugepages and memory offline. While
+the first patch fixes a panic, and is therefore rather important, the
+last patch is just a performance optimization.
+
+The second patch fixes a theoretical issue with reserved hugepages,
+while still leaving some ugly usability issue, see description.
+
+This patch (of 3):
+
+dissolve_free_huge_pages() will either run into the VM_BUG_ON() or a
+list corruption and addressing exception when trying to set a memory
+block offline that is part (but not the first part) of a "gigantic"
+hugetlb page with a size > memory block size.
+
+When no other smaller hugetlb page sizes are present, the VM_BUG_ON()
+will trigger directly. In the other case we will run into an addressing
+exception later, because dissolve_free_huge_page() will not work on the
+head page of the compound hugetlb page which will result in a NULL
+hstate from page_hstate().
+
+To fix this, first remove the VM_BUG_ON() because it is wrong, and then
+use the compound head page in dissolve_free_huge_page(). This means
+that an unused pre-allocated gigantic page that has any part of itself
+inside the memory block that is going offline will be dissolved
+completely. Losing an unused gigantic hugepage is preferable to failing
+the memory offline, for example in the situation where a (possibly
+faulty) memory DIMM needs to go offline.
+
+Fixes: c8721bbb ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
+Link: http://lkml.kernel.org/r/20160926172811.94033-2-gerald.schaefer@de.ibm.com
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Rui Teng <rui.teng@linux.vnet.ibm.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1443,13 +1443,14 @@ static void dissolve_free_huge_page(stru
+ {
+ spin_lock(&hugetlb_lock);
+ if (PageHuge(page) && !page_count(page)) {
+- struct hstate *h = page_hstate(page);
+- int nid = page_to_nid(page);
+- list_del(&page->lru);
++ struct page *head = compound_head(page);
++ struct hstate *h = page_hstate(head);
++ int nid = page_to_nid(head);
++ list_del(&head->lru);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[nid]--;
+ h->max_huge_pages--;
+- update_and_free_page(h, page);
++ update_and_free_page(h, head);
+ }
+ spin_unlock(&hugetlb_lock);
+ }
+@@ -1457,7 +1458,8 @@ static void dissolve_free_huge_page(stru
+ /*
+ * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
+ * make specified memory blocks removable from the system.
+- * Note that start_pfn should aligned with (minimum) hugepage size.
++ * Note that this will dissolve a free gigantic hugepage completely, if any
++ * part of it lies within the given range.
+ */
+ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ {
+@@ -1466,7 +1468,6 @@ void dissolve_free_huge_pages(unsigned l
+ if (!hugepages_supported())
+ return;
+
+- VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
+ dissolve_free_huge_page(pfn_to_page(pfn));
+ }
--- /dev/null
+From 7bc2b55a5c030685b399bb65b6baa9ccc3d1f167 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 15 Sep 2016 16:44:56 +0300
+Subject: scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 7bc2b55a5c030685b399bb65b6baa9ccc3d1f167 upstream.
+
+We need to put an upper bound on "user_len" so the memcpy() doesn't
+overflow.
+
+Reported-by: Marco Grassi <marco.gra@gmail.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/arcmsr/arcmsr_hba.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2388,7 +2388,8 @@ static int arcmsr_iop_message_xfer(struc
+ }
+ case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
+ unsigned char *ver_addr;
+- int32_t user_len, cnt2end;
++ uint32_t user_len;
++ int32_t cnt2end;
+ uint8_t *pQbuffer, *ptmpuserbuffer;
+ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+ if (!ver_addr) {
+@@ -2397,6 +2398,11 @@ static int arcmsr_iop_message_xfer(struc
+ }
+ ptmpuserbuffer = ver_addr;
+ user_len = pcmdmessagefld->cmdmessage.Length;
++ if (user_len > ARCMSR_API_DATA_BUFLEN) {
++ retvalue = ARCMSR_MESSAGE_FAIL;
++ kfree(ver_addr);
++ goto message_out;
++ }
+ memcpy(ptmpuserbuffer,
+ pcmdmessagefld->messagedatabuffer, user_len);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
--- /dev/null
+From 4bd173c30792791a6daca8c64793ec0a4ae8324f Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@alien8.de>
+Date: Fri, 23 Sep 2016 13:22:26 +0200
+Subject: scsi: arcmsr: Simplify user_len checking
+
+From: Borislav Petkov <bp@alien8.de>
+
+commit 4bd173c30792791a6daca8c64793ec0a4ae8324f upstream.
+
+Do the user_len check first and then the ver_addr allocation so that we
+can save us the kfree() on the error path when user_len is >
+ARCMSR_API_DATA_BUFLEN.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Marco Grassi <marco.gra@gmail.com>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Tomas Henzl <thenzl@redhat.com>
+Cc: Martin K. Petersen <martin.petersen@oracle.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: Tomas Henzl <thenzl@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/arcmsr/arcmsr_hba.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2391,18 +2391,20 @@ static int arcmsr_iop_message_xfer(struc
+ uint32_t user_len;
+ int32_t cnt2end;
+ uint8_t *pQbuffer, *ptmpuserbuffer;
+- ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+- if (!ver_addr) {
++
++ user_len = pcmdmessagefld->cmdmessage.Length;
++ if (user_len > ARCMSR_API_DATA_BUFLEN) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ goto message_out;
+ }
+- ptmpuserbuffer = ver_addr;
+- user_len = pcmdmessagefld->cmdmessage.Length;
+- if (user_len > ARCMSR_API_DATA_BUFLEN) {
++
++ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
++ if (!ver_addr) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+- kfree(ver_addr);
+ goto message_out;
+ }
++ ptmpuserbuffer = ver_addr;
++
+ memcpy(ptmpuserbuffer,
+ pcmdmessagefld->messagedatabuffer, user_len);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
--- /dev/null
+From 07d0e9a847401ffd2f09bd450d41644cd090e81d Mon Sep 17 00:00:00 2001
+From: Brian King <brking@linux.vnet.ibm.com>
+Date: Mon, 19 Sep 2016 08:59:19 -0500
+Subject: scsi: ibmvfc: Fix I/O hang when port is not mapped
+
+From: Brian King <brking@linux.vnet.ibm.com>
+
+commit 07d0e9a847401ffd2f09bd450d41644cd090e81d upstream.
+
+If a VFC port gets unmapped in the VIOS, it may not respond with a CRQ
+init complete following H_REG_CRQ. If this occurs, we can end up having
+called scsi_block_requests and not a resulting unblock until the init
+complete happens, which may never occur, and we end up hanging I/O
+requests. This patch ensures the host action stay set to
+IBMVFC_HOST_ACTION_TGT_DEL so we move all rports into devloss state and
+unblock unless we receive an init complete.
+
+Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
+Acked-by: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ibmvscsi/ibmvfc.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvf
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vhost->state = IBMVFC_NO_CRQ;
+ vhost->logged_in = 0;
+- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+
+ /* Clean out the queue */
+ memset(crq->msgs, 0, PAGE_SIZE);
reiserfs-unlock-superblock-before-calling-reiserfs_quota_on_mount.patch
async_pq_val-fix-dma-memory-leak.patch
autofs-fix-automounts-by-using-current_real_cred-uid.patch
+scsi-arcmsr-buffer-overflow-in-arcmsr_iop_message_xfer.patch
+scsi-arcmsr-simplify-user_len-checking.patch
+scsi-ibmvfc-fix-i-o-hang-when-port-is-not-mapped.patch
+ipc-sem.c-fix-complex_count-vs.-simple-op-race.patch
+mm-hugetlb-fix-memory-offline-with-hugepage-size-memory-block-size.patch
+vfs-mm-fix-a-dead-loop-in-truncate_inode_pages_range.patch
+jbd2-fix-lockdep-annotation-in-add_transaction_credits.patch
+ext4-enforce-online-defrag-restriction-for-encrypted-files.patch
+ext4-reinforce-check-of-i_dtime-when-clearing-high-fields-of-uid-and-gid.patch
+ext4-bugfix-for-mmaped-pages-in-mpage_release_unused_pages.patch
+ext4-fix-memory-leak-in-ext4_insert_range.patch
+ext4-fix-memory-leak-when-symlink-decryption-fails.patch
+ext4-allow-dax-writeback-for-hole-punch.patch
+ext4-release-bh-in-make_indexed_dir.patch
+ext4-unmap-metadata-when-zeroing-blocks.patch
+crypto-ghash-generic-move-common-definitions-to-a-new-header-file.patch
+crypto-vmx-fix-memory-corruption-caused-by-p8_ghash.patch
+dlm-free-workqueues-after-the-connections.patch
+vfs-move-permission-checking-into-notify_change-for-utimes-null.patch
+cachefiles-fix-attempt-to-read-i_blocks-after-deleting-file.patch
+drm-virtio-reinstate-drm_virtio_set_busid.patch
+acpi-nfit-check-for-the-correct-event-code-in-notifications.patch
--- /dev/null
+From c2a9737f45e27d8263ff9643f994bda9bac0b944 Mon Sep 17 00:00:00 2001
+From: Wei Fang <fangwei1@huawei.com>
+Date: Fri, 7 Oct 2016 17:01:52 -0700
+Subject: vfs,mm: fix a dead loop in truncate_inode_pages_range()
+
+From: Wei Fang <fangwei1@huawei.com>
+
+commit c2a9737f45e27d8263ff9643f994bda9bac0b944 upstream.
+
+We triggered a deadloop in truncate_inode_pages_range() on 32 bits
+architecture with the test case bellow:
+
+ ...
+ fd = open();
+ write(fd, buf, 4096);
+ preadv64(fd, &iovec, 1, 0xffffffff000);
+ ftruncate(fd, 0);
+ ...
+
+Then ftruncate() will not return forever.
+
+The filesystem used in this case is ubifs, but it can be triggered on
+many other filesystems.
+
+When preadv64() is called with offset=0xffffffff000, a page with
+index=0xffffffff will be added to the radix tree of ->mapping. Then
+this page can be found in ->mapping with pagevec_lookup(). After that,
+truncate_inode_pages_range(), which is called in ftruncate(), will fall
+into an infinite loop:
+
+ - find a page with index=0xffffffff, since index>=end, this page won't
+ be truncated
+
+ - index++, and index become 0
+
+ - the page with index=0xffffffff will be found again
+
+The data type of index is unsigned long, so index won't overflow to 0 on
+64 bits architecture in this case, and the dead loop won't happen.
+
+Since truncate_inode_pages_range() is executed with holding lock of
+inode->i_rwsem, any operation related with this lock will be blocked,
+and a hung task will happen, e.g.:
+
+ INFO: task truncate_test:3364 blocked for more than 120 seconds.
+ ...
+ call_rwsem_down_write_failed+0x17/0x30
+ generic_file_write_iter+0x32/0x1c0
+ ubifs_write_iter+0xcc/0x170
+ __vfs_write+0xc4/0x120
+ vfs_write+0xb2/0x1b0
+ SyS_write+0x46/0xa0
+
+The page with index=0xffffffff added to ->mapping is useless. Fix this
+by checking the read position before allocating pages.
+
+Link: http://lkml.kernel.org/r/1475151010-40166-1-git-send-email-fangwei1@huawei.com
+Signed-off-by: Wei Fang <fangwei1@huawei.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/filemap.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1687,6 +1687,10 @@ static ssize_t do_generic_file_read(stru
+ unsigned int prev_offset;
+ int error = 0;
+
++ if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
++ return -EINVAL;
++ iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
++
+ index = *ppos >> PAGE_SHIFT;
+ prev_index = ra->prev_pos >> PAGE_SHIFT;
+ prev_offset = ra->prev_pos & (PAGE_SIZE-1);
--- /dev/null
+From f2b20f6ee842313a0d681dbbf7f87b70291a6a3b Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Fri, 16 Sep 2016 12:44:20 +0200
+Subject: vfs: move permission checking into notify_change() for utimes(NULL)
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit f2b20f6ee842313a0d681dbbf7f87b70291a6a3b upstream.
+
+This fixes a bug where the permission was not properly checked in
+overlayfs. The testcase is ltp/utimensat01.
+
+It is also cleaner and safer to do the permission checking in the vfs
+helper instead of the caller.
+
+This patch introduces an additional ia_valid flag ATTR_TOUCH (since
+touch(1) is the most obvious user of utimes(NULL)) that is passed into
+notify_change whenever the conditions for this special permission checking
+mode are met.
+
+Reported-by: Aihua Zhang <zhangaihua1@huawei.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Tested-by: Aihua Zhang <zhangaihua1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/attr.c | 15 +++++++++++++++
+ fs/utimes.c | 17 +----------------
+ include/linux/fs.h | 1 +
+ 3 files changed, 17 insertions(+), 16 deletions(-)
+
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -202,6 +202,21 @@ int notify_change(struct dentry * dentry
+ return -EPERM;
+ }
+
++ /*
++ * If utimes(2) and friends are called with times == NULL (or both
++ * times are UTIME_NOW), then we need to check for write permission
++ */
++ if (ia_valid & ATTR_TOUCH) {
++ if (IS_IMMUTABLE(inode))
++ return -EPERM;
++
++ if (!inode_owner_or_capable(inode)) {
++ error = inode_permission(inode, MAY_WRITE);
++ if (error)
++ return error;
++ }
++ }
++
+ if ((ia_valid & ATTR_MODE)) {
+ umode_t amode = attr->ia_mode;
+ /* Flag setting protected by i_mutex */
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -87,21 +87,7 @@ static int utimes_common(struct path *pa
+ */
+ newattrs.ia_valid |= ATTR_TIMES_SET;
+ } else {
+- /*
+- * If times is NULL (or both times are UTIME_NOW),
+- * then we need to check permissions, because
+- * inode_change_ok() won't do it.
+- */
+- error = -EPERM;
+- if (IS_IMMUTABLE(inode))
+- goto mnt_drop_write_and_out;
+-
+- error = -EACCES;
+- if (!inode_owner_or_capable(inode)) {
+- error = inode_permission(inode, MAY_WRITE);
+- if (error)
+- goto mnt_drop_write_and_out;
+- }
++ newattrs.ia_valid |= ATTR_TOUCH;
+ }
+ retry_deleg:
+ inode_lock(inode);
+@@ -113,7 +99,6 @@ retry_deleg:
+ goto retry_deleg;
+ }
+
+-mnt_drop_write_and_out:
+ mnt_drop_write(path->mnt);
+ out:
+ return error;
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -224,6 +224,7 @@ typedef int (dio_iodone_t)(struct kiocb
+ #define ATTR_KILL_PRIV (1 << 14)
+ #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
+ #define ATTR_TIMES_SET (1 << 16)
++#define ATTR_TOUCH (1 << 17)
+
+ /*
+ * Whiteout is represented by a char device. The following constants define the