--- /dev/null
+From e17b1af96b2afc38e684aa2f1033387e2ed10029 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Fri, 12 Apr 2019 22:34:18 +0100
+Subject: ARM: 8857/1: efi: enable CP15 DMB instructions before cleaning the cache
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit e17b1af96b2afc38e684aa2f1033387e2ed10029 upstream.
+
+The EFI stub is entered with the caches and MMU enabled by the
+firmware, and once the stub is ready to hand over to the decompressor,
+we clean and disable the caches.
+
+The cache clean routines use CP15 barrier instructions, which can be
+disabled via SCTLR. Normally, when using the provided cache handling
+routines to enable the caches and MMU, this bit is enabled as well.
+However, but since we entered the stub with the caches already enabled,
+this routine is not executed before we call the cache clean routines,
+resulting in undefined instruction exceptions if the firmware never
+enabled this bit.
+
+So set the bit explicitly in the EFI entry code, but do so in a way that
+guarantees that the resulting code can still run on v6 cores as well
+(which are guaranteed to have CP15 barriers enabled)
+
+Cc: <stable@vger.kernel.org> # v4.9+
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/compressed/head.S | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
+
+ @ Preserve return value of efi_entry() in r4
+ mov r4, r0
+- bl cache_clean_flush
++
++ @ our cache maintenance code relies on CP15 barrier instructions
++ @ but since we arrived here with the MMU and caches configured
++ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
++ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
++ @ the enable path will be executed on v7+ only.
++ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
++ tst r1, #(1 << 5) @ CP15BEN bit set?
++ bne 0f
++ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
++ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
++ ARM( .inst 0xf57ff06f @ v7+ isb )
++ THUMB( isb )
++
++0: bl cache_clean_flush
+ bl cache_off
+
+ @ Set parameters for booting zImage according to boot protocol
--- /dev/null
+From 76a495d666e5043ffc315695f8241f5e94a98849 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Wed, 17 Apr 2019 12:58:28 -0400
+Subject: ceph: ensure d_name stability in ceph_dentry_hash()
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 76a495d666e5043ffc315695f8241f5e94a98849 upstream.
+
+Take the d_lock here to ensure that d_name doesn't change.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/dir.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *
+ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
+ {
+ struct ceph_inode_info *dci = ceph_inode(dir);
++ unsigned hash;
+
+ switch (dci->i_dir_layout.dl_dir_hash) {
+ case 0: /* for backward compat */
+@@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *
+ return dn->d_name.hash;
+
+ default:
+- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
++ spin_lock(&dn->d_lock);
++ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+ dn->d_name.name, dn->d_name.len);
++ spin_unlock(&dn->d_lock);
++ return hash;
+ }
+ }
+
--- /dev/null
+From 37659182bff1eeaaeadcfc8f853c6d2b6dbc3f47 Mon Sep 17 00:00:00 2001
+From: "Yan, Zheng" <zyan@redhat.com>
+Date: Thu, 18 Apr 2019 11:24:57 +0800
+Subject: ceph: fix ci->i_head_snapc leak
+
+From: Yan, Zheng <zyan@redhat.com>
+
+commit 37659182bff1eeaaeadcfc8f853c6d2b6dbc3f47 upstream.
+
+We missed two places that i_wrbuffer_ref_head, i_wr_ref, i_dirty_caps
+and i_flushing_caps may change. When they are all zeros, we should free
+i_head_snapc.
+
+Cc: stable@vger.kernel.org
+Link: https://tracker.ceph.com/issues/38224
+Reported-and-tested-by: Luis Henriques <lhenriques@suse.com>
+Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/mds_client.c | 9 +++++++++
+ fs/ceph/snap.c | 7 ++++++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1286,6 +1286,15 @@ static int remove_session_caps_cb(struct
+ list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
+ ci->i_prealloc_cap_flush = NULL;
+ }
++
++ if (drop &&
++ ci->i_wrbuffer_ref_head == 0 &&
++ ci->i_wr_ref == 0 &&
++ ci->i_dirty_caps == 0 &&
++ ci->i_flushing_caps == 0) {
++ ceph_put_snap_context(ci->i_head_snapc);
++ ci->i_head_snapc = NULL;
++ }
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ while (!list_empty(&to_remove)) {
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_ino
+ old_snapc = NULL;
+
+ update_snapc:
+- if (ci->i_head_snapc) {
++ if (ci->i_wrbuffer_ref_head == 0 &&
++ ci->i_wr_ref == 0 &&
++ ci->i_dirty_caps == 0 &&
++ ci->i_flushing_caps == 0) {
++ ci->i_head_snapc = NULL;
++ } else {
+ ci->i_head_snapc = ceph_get_snap_context(new_snapc);
+ dout(" new snapc is %p\n", new_snapc);
+ }
--- /dev/null
+From 1bcb344086f3ecf8d6705f6d708441baa823beb3 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Mon, 15 Apr 2019 12:00:42 -0400
+Subject: ceph: only use d_name directly when parent is locked
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 1bcb344086f3ecf8d6705f6d708441baa823beb3 upstream.
+
+Ben reported tripping the BUG_ON in create_request_message during some
+performance testing. Analysis of the vmcore showed that the length of
+the r_dentry->d_name string changed after we allocated the buffer, but
+before we encoded it.
+
+build_dentry_path returns pointers to d_name in the common case of
+non-snapped dentries, but this optimization isn't safe unless the parent
+directory is locked. When it isn't, have the code make a copy of the
+d_name while holding the d_lock.
+
+Cc: stable@vger.kernel.org
+Reported-by: Ben England <bengland@redhat.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/mds_client.c | 61 +++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 50 insertions(+), 11 deletions(-)
+
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1958,10 +1958,39 @@ retry:
+ return path;
+ }
+
++/* Duplicate the dentry->d_name.name safely */
++static int clone_dentry_name(struct dentry *dentry, const char **ppath,
++ int *ppathlen)
++{
++ u32 len;
++ char *name;
++
++retry:
++ len = READ_ONCE(dentry->d_name.len);
++ name = kmalloc(len + 1, GFP_NOFS);
++ if (!name)
++ return -ENOMEM;
++
++ spin_lock(&dentry->d_lock);
++ if (dentry->d_name.len != len) {
++ spin_unlock(&dentry->d_lock);
++ kfree(name);
++ goto retry;
++ }
++ memcpy(name, dentry->d_name.name, len);
++ spin_unlock(&dentry->d_lock);
++
++ name[len] = '\0';
++ *ppath = name;
++ *ppathlen = len;
++ return 0;
++}
++
+ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+ const char **ppath, int *ppathlen, u64 *pino,
+- int *pfreepath)
++ bool *pfreepath, bool parent_locked)
+ {
++ int ret;
+ char *path;
+
+ rcu_read_lock();
+@@ -1970,8 +1999,15 @@ static int build_dentry_path(struct dent
+ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
+ *pino = ceph_ino(dir);
+ rcu_read_unlock();
+- *ppath = dentry->d_name.name;
+- *ppathlen = dentry->d_name.len;
++ if (parent_locked) {
++ *ppath = dentry->d_name.name;
++ *ppathlen = dentry->d_name.len;
++ } else {
++ ret = clone_dentry_name(dentry, ppath, ppathlen);
++ if (ret)
++ return ret;
++ *pfreepath = true;
++ }
+ return 0;
+ }
+ rcu_read_unlock();
+@@ -1979,13 +2015,13 @@ static int build_dentry_path(struct dent
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+- *pfreepath = 1;
++ *pfreepath = true;
+ return 0;
+ }
+
+ static int build_inode_path(struct inode *inode,
+ const char **ppath, int *ppathlen, u64 *pino,
+- int *pfreepath)
++ bool *pfreepath)
+ {
+ struct dentry *dentry;
+ char *path;
+@@ -2001,7 +2037,7 @@ static int build_inode_path(struct inode
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+- *pfreepath = 1;
++ *pfreepath = true;
+ return 0;
+ }
+
+@@ -2012,7 +2048,7 @@ static int build_inode_path(struct inode
+ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+ struct inode *rdiri, const char *rpath,
+ u64 rino, const char **ppath, int *pathlen,
+- u64 *ino, int *freepath)
++ u64 *ino, bool *freepath, bool parent_locked)
+ {
+ int r = 0;
+
+@@ -2022,7 +2058,7 @@ static int set_request_path_attr(struct
+ ceph_snap(rinode));
+ } else if (rdentry) {
+ r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
+- freepath);
++ freepath, parent_locked);
+ dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
+ *ppath);
+ } else if (rpath || rino) {
+@@ -2048,7 +2084,7 @@ static struct ceph_msg *create_request_m
+ const char *path2 = NULL;
+ u64 ino1 = 0, ino2 = 0;
+ int pathlen1 = 0, pathlen2 = 0;
+- int freepath1 = 0, freepath2 = 0;
++ bool freepath1 = false, freepath2 = false;
+ int len;
+ u16 releases;
+ void *p, *end;
+@@ -2056,16 +2092,19 @@ static struct ceph_msg *create_request_m
+
+ ret = set_request_path_attr(req->r_inode, req->r_dentry,
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+- &path1, &pathlen1, &ino1, &freepath1);
++ &path1, &pathlen1, &ino1, &freepath1,
++ test_bit(CEPH_MDS_R_PARENT_LOCKED,
++ &req->r_req_flags));
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out;
+ }
+
++ /* If r_old_dentry is set, then assume that its parent is locked */
+ ret = set_request_path_attr(NULL, req->r_old_dentry,
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+- &path2, &pathlen2, &ino2, &freepath2);
++ &path2, &pathlen2, &ino2, &freepath2, true);
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out_free1;
--- /dev/null
+From 5bb5c3a3ac102158b799bf5eda871223aa5e9c25 Mon Sep 17 00:00:00 2001
+From: Shun-Chih Yu <shun-chih.yu@mediatek.com>
+Date: Thu, 25 Apr 2019 11:53:50 +0800
+Subject: dmaengine: mediatek-cqdma: fix wrong register usage in mtk_cqdma_start
+
+From: Shun-Chih Yu <shun-chih.yu@mediatek.com>
+
+commit 5bb5c3a3ac102158b799bf5eda871223aa5e9c25 upstream.
+
+This patch fixes wrong register usage in the mtk_cqdma_start. The
+destination register should be MTK_CQDMA_DST2 instead.
+
+Fixes: b1f01e48df5a ("dmaengine: mediatek: Add MediaTek Command-Queue DMA controller for MT6765 SoC")
+Signed-off-by: Shun-Chih Yu <shun-chih.yu@mediatek.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mediatek/mtk-cqdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma/mediatek/mtk-cqdma.c
++++ b/drivers/dma/mediatek/mtk-cqdma.c
+@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_c
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
+ #else
+- mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
++ mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
+ #endif
+
+ /* setup the length */
--- /dev/null
+From 6e7da74775348d96e2d7efaf3f91410e18c481ef Mon Sep 17 00:00:00 2001
+From: Achim Dahlhoff <Achim.Dahlhoff@de.bosch.com>
+Date: Fri, 12 Apr 2019 07:29:14 +0200
+Subject: dmaengine: sh: rcar-dmac: Fix glitch in dmaengine_tx_status
+
+From: Achim Dahlhoff <Achim.Dahlhoff@de.bosch.com>
+
+commit 6e7da74775348d96e2d7efaf3f91410e18c481ef upstream.
+
+The tx_status poll in the rcar_dmac driver reads the status register
+which indicates which chunk is busy (DMACHCRB). Afterwards the point
+inside the chunk is read from DMATCRB. It is possible that the chunk
+has changed between the two reads. The result is a non-monotonous
+increase of the residue. Fix this by introducing a 'safe read' logic.
+
+Fixes: 73a47bd0da66 ("dmaengine: rcar-dmac: use TCRB instead of TCR for residue")
+Signed-off-by: Achim Dahlhoff <Achim.Dahlhoff@de.bosch.com>
+Signed-off-by: Dirk Behme <dirk.behme@de.bosch.com>
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Cc: <stable@vger.kernel.org> # v4.16+
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/sh/rcar-dmac.c | 26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_r
+ enum dma_status status;
+ unsigned int residue = 0;
+ unsigned int dptr = 0;
++ unsigned int chcrb;
++ unsigned int tcrb;
++ unsigned int i;
+
+ if (!desc)
+ return 0;
+@@ -1330,14 +1333,31 @@ static unsigned int rcar_dmac_chan_get_r
+ }
+
+ /*
++ * We need to read two registers.
++ * Make sure the control register does not skip to next chunk
++ * while reading the counter.
++ * Trying it 3 times should be enough: Initial read, retry, retry
++ * for the paranoid.
++ */
++ for (i = 0; i < 3; i++) {
++ chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
++ RCAR_DMACHCRB_DPTR_MASK;
++ tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
++ /* Still the same? */
++ if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
++ RCAR_DMACHCRB_DPTR_MASK))
++ break;
++ }
++ WARN_ONCE(i >= 3, "residue might be not continuous!");
++
++ /*
+ * In descriptor mode the descriptor running pointer is not maintained
+ * by the interrupt handler, find the running descriptor from the
+ * descriptor pointer field in the CHCRB register. In non-descriptor
+ * mode just use the running descriptor pointer.
+ */
+ if (desc->hwdescs.use) {
+- dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+- RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
++ dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
+ if (dptr == 0)
+ dptr = desc->nchunks;
+ dptr--;
+@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_r
+ }
+
+ /* Add the residue for the current chunk. */
+- residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
++ residue += tcrb << desc->xfer_shift;
+
+ return residue;
+ }
--- /dev/null
+From 907bd68a2edc491849e2fdcfe52c4596627bca94 Mon Sep 17 00:00:00 2001
+From: Dirk Behme <dirk.behme@de.bosch.com>
+Date: Fri, 12 Apr 2019 07:29:13 +0200
+Subject: dmaengine: sh: rcar-dmac: With cyclic DMA residue 0 is valid
+
+From: Dirk Behme <dirk.behme@de.bosch.com>
+
+commit 907bd68a2edc491849e2fdcfe52c4596627bca94 upstream.
+
+Having a cyclic DMA, a residue 0 is not an indication of a completed
+DMA. In case of cyclic DMA make sure that dma_set_residue() is called
+and with this a residue of 0 is forwarded correctly to the caller.
+
+Fixes: 3544d2878817 ("dmaengine: rcar-dmac: use result of updated get_residue in tx_status")
+Signed-off-by: Dirk Behme <dirk.behme@de.bosch.com>
+Signed-off-by: Achim Dahlhoff <Achim.Dahlhoff@de.bosch.com>
+Signed-off-by: Hiroyuki Yokoyama <hiroyuki.yokoyama.vx@renesas.com>
+Signed-off-by: Yao Lihua <ylhuajnu@outlook.com>
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: <stable@vger.kernel.org> # v4.8+
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/sh/rcar-dmac.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1368,6 +1368,7 @@ static enum dma_status rcar_dmac_tx_stat
+ enum dma_status status;
+ unsigned long flags;
+ unsigned int residue;
++ bool cyclic;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE || !txstate)
+@@ -1375,10 +1376,11 @@ static enum dma_status rcar_dmac_tx_stat
+
+ spin_lock_irqsave(&rchan->lock, flags);
+ residue = rcar_dmac_chan_get_residue(rchan, cookie);
++ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
+ spin_unlock_irqrestore(&rchan->lock, flags);
+
+ /* if there's no residue, the cookie is complete */
+- if (!residue)
++ if (!residue && !cyclic)
+ return DMA_COMPLETE;
+
+ dma_set_residue(txstate, residue);
--- /dev/null
+From bd4264112f93045704731850c5e4d85db981cd85 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 16 Apr 2019 11:49:17 +0200
+Subject: drm/ttm: fix re-init of global structures
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+commit bd4264112f93045704731850c5e4d85db981cd85 upstream.
+
+When a driver unloads without unloading TTM we don't correctly
+clear the global structures leading to errors on re-init.
+
+Next step should probably be to remove the global structures and
+kobjs all together, but this is tricky since we need to maintain
+backward compatibility.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Karol Herbst <kherbst@redhat.com>
+Tested-by: Karol Herbst <kherbst@redhat.com>
+CC: stable@vger.kernel.org # 5.0.x
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/ttm/ttm_bo.c | 10 +++++-----
+ drivers/gpu/drm/ttm/ttm_memory.c | 5 +++--
+ include/drm/ttm/ttm_bo_driver.h | 1 -
+ 3 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(s
+ * ttm_global_mutex - protecting the global BO state
+ */
+ DEFINE_MUTEX(ttm_global_mutex);
+-struct ttm_bo_global ttm_bo_glob = {
+- .use_count = 0
+-};
++unsigned ttm_bo_glob_use_count;
++struct ttm_bo_global ttm_bo_glob;
+
+ static struct attribute ttm_bo_count = {
+ .name = "bo_count",
+@@ -1535,12 +1534,13 @@ static void ttm_bo_global_release(void)
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+
+ mutex_lock(&ttm_global_mutex);
+- if (--glob->use_count > 0)
++ if (--ttm_bo_glob_use_count > 0)
+ goto out;
+
+ kobject_del(&glob->kobj);
+ kobject_put(&glob->kobj);
+ ttm_mem_global_release(&ttm_mem_glob);
++ memset(glob, 0, sizeof(*glob));
+ out:
+ mutex_unlock(&ttm_global_mutex);
+ }
+@@ -1552,7 +1552,7 @@ static int ttm_bo_global_init(void)
+ unsigned i;
+
+ mutex_lock(&ttm_global_mutex);
+- if (++glob->use_count > 1)
++ if (++ttm_bo_glob_use_count > 1)
+ goto out;
+
+ ret = ttm_mem_global_init(&ttm_mem_glob);
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -461,8 +461,8 @@ out_no_zone:
+
+ void ttm_mem_global_release(struct ttm_mem_global *glob)
+ {
+- unsigned int i;
+ struct ttm_mem_zone *zone;
++ unsigned int i;
+
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_m
+ zone = glob->zones[i];
+ kobject_del(&zone->kobj);
+ kobject_put(&zone->kobj);
+- }
++ }
+ kobject_del(&glob->kobj);
+ kobject_put(&glob->kobj);
++ memset(glob, 0, sizeof(*glob));
+ }
+
+ static void ttm_check_swapping(struct ttm_mem_global *glob)
+--- a/include/drm/ttm/ttm_bo_driver.h
++++ b/include/drm/ttm/ttm_bo_driver.h
+@@ -411,7 +411,6 @@ extern struct ttm_bo_global {
+ /**
+ * Protected by ttm_global_mutex.
+ */
+- unsigned int use_count;
+ struct list_head device_list;
+
+ /**
--- /dev/null
+From 462ce5d963f18b71c63f6b7730a35a2ee5273540 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Wed, 24 Apr 2019 17:06:29 +0200
+Subject: drm/vc4: Fix compilation error reported by kbuild test bot
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+commit 462ce5d963f18b71c63f6b7730a35a2ee5273540 upstream.
+
+A pointer to crtc was missing, resulting in the following build error:
+drivers/gpu/drm/vc4/vc4_crtc.c:1045:44: sparse: sparse: incorrect type in argument 1 (different base types)
+drivers/gpu/drm/vc4/vc4_crtc.c:1045:44: sparse: expected struct drm_crtc *crtc
+drivers/gpu/drm/vc4/vc4_crtc.c:1045:44: sparse: got struct drm_crtc_state *state
+drivers/gpu/drm/vc4/vc4_crtc.c:1045:39: sparse: sparse: not enough arguments for function vc4_crtc_destroy_state
+
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reported-by: kbuild test robot <lkp@intel.com>
+Cc: Eric Anholt <eric@anholt.net>
+Link: https://patchwork.freedesktop.org/patch/msgid/2b6ed5e6-81b0-4276-8860-870b54ca3262@linux.intel.com
+Fixes: d08106796a78 ("drm/vc4: Fix memory leak during gpu reset.")
+Cc: <stable@vger.kernel.org> # v4.6+
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vc4/vc4_crtc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -999,7 +999,7 @@ static void
+ vc4_crtc_reset(struct drm_crtc *crtc)
+ {
+ if (crtc->state)
+- vc4_crtc_destroy_state(crtc->state);
++ vc4_crtc_destroy_state(crtc, crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+ if (crtc->state)
--- /dev/null
+From d08106796a78a4273e39e1bbdf538dc4334b2635 Mon Sep 17 00:00:00 2001
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Fri, 1 Mar 2019 13:56:11 +0100
+Subject: drm/vc4: Fix memory leak during gpu reset.
+
+From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+commit d08106796a78a4273e39e1bbdf538dc4334b2635 upstream.
+
+__drm_atomic_helper_crtc_destroy_state does not free memory, it only
+cleans it up. Fix this by calling the functions own destroy function.
+
+Fixes: 6d6e50039187 ("drm/vc4: Allocate the right amount of space for boot-time CRTC state.")
+Cc: Eric Anholt <eric@anholt.net>
+Cc: <stable@vger.kernel.org> # v4.6+
+Reviewed-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190301125627.7285-2-maarten.lankhorst@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vc4/vc4_crtc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -999,7 +999,7 @@ static void
+ vc4_crtc_reset(struct drm_crtc *crtc)
+ {
+ if (crtc->state)
+- __drm_atomic_helper_crtc_destroy_state(crtc->state);
++ vc4_crtc_destroy_state(crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+ if (crtc->state)
--- /dev/null
+From 7159a986b4202343f6cca3bb8079ecace5816fd6 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 21 Feb 2019 11:17:34 -0500
+Subject: ext4: fix some error pointer dereferences
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 7159a986b4202343f6cca3bb8079ecace5816fd6 upstream.
+
+We can't pass error pointers to brelse().
+
+Fixes: fb265c9cb49e ("ext4: add ext4_sb_bread() to disambiguate ENOMEM cases")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/xattr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *i
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
++ bh = NULL;
+ goto out;
+ }
+
+@@ -2903,6 +2904,7 @@ int ext4_xattr_delete_inode(handle_t *ha
+ if (error == -EIO)
+ EXT4_ERROR_INODE(inode, "block %llu read error",
+ EXT4_I(inode)->i_file_acl);
++ bh = NULL;
+ goto cleanup;
+ }
+ error = ext4_xattr_check_block(inode, bh);
+@@ -3059,6 +3061,7 @@ ext4_xattr_block_cache_find(struct inode
+ if (IS_ERR(bh)) {
+ if (PTR_ERR(bh) == -ENOMEM)
+ return NULL;
++ bh = NULL;
+ EXT4_ERROR_INODE(inode, "block %lu read error",
+ (unsigned long)ce->e_value);
+ } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
--- /dev/null
+From 3a349763cf11e63534b8f2d302f2d0c790566497 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Fri, 26 Apr 2019 17:22:01 -0700
+Subject: Input: synaptics-rmi4 - write config register values to the right offset
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit 3a349763cf11e63534b8f2d302f2d0c790566497 upstream.
+
+Currently any changed config register values don't take effect, as the
+function to write them back is called with the wrong register offset.
+
+Fixes: ff8f83708b3e (Input: synaptics-rmi4 - add support for 2D
+ sensors and F11)
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_f11.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/input/rmi4/rmi_f11.c
++++ b/drivers/input/rmi4/rmi_f11.c
+@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi
+ }
+
+ rc = f11_write_control_regs(fn, &f11->sens_query,
+- &f11->dev_controls, fn->fd.query_base_addr);
++ &f11->dev_controls, fn->fd.control_base_addr);
+ if (rc)
+ dev_warn(&fn->dev, "Failed to write control registers\n");
+
--- /dev/null
+From e6abc8caa6deb14be2a206253f7e1c5e37e9515b Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Fri, 5 Apr 2019 08:54:37 -0700
+Subject: nfsd: Don't release the callback slot unless it was actually held
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+commit e6abc8caa6deb14be2a206253f7e1c5e37e9515b upstream.
+
+If there are multiple callbacks queued, waiting for the callback
+slot when the callback gets shut down, then they all currently
+end up acting as if they hold the slot, and call
+nfsd4_cb_sequence_done() resulting in interesting side-effects.
+
+In addition, the 'retry_nowait' path in nfsd4_cb_sequence_done()
+causes a loop back to nfsd4_cb_prepare() without first freeing the
+slot, which causes a deadlock when nfsd41_cb_get_slot() gets called
+a second time.
+
+This patch therefore adds a boolean to track whether or not the
+callback did pick up the slot, so that it can do the right thing
+in these 2 cases.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfs4callback.c | 8 +++++++-
+ fs/nfsd/state.h | 1 +
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1023,8 +1023,9 @@ static void nfsd4_cb_prepare(struct rpc_
+ cb->cb_seq_status = 1;
+ cb->cb_status = 0;
+ if (minorversion) {
+- if (!nfsd41_cb_get_slot(clp, task))
++ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
+ return;
++ cb->cb_holds_slot = true;
+ }
+ rpc_call_start(task);
+ }
+@@ -1051,6 +1052,9 @@ static bool nfsd4_cb_sequence_done(struc
+ return true;
+ }
+
++ if (!cb->cb_holds_slot)
++ goto need_restart;
++
+ switch (cb->cb_seq_status) {
+ case 0:
+ /*
+@@ -1089,6 +1093,7 @@ static bool nfsd4_cb_sequence_done(struc
+ cb->cb_seq_status);
+ }
+
++ cb->cb_holds_slot = false;
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_wake_up_next(&clp->cl_cb_waitq);
+ dprintk("%s: freed slot, new seqid=%d\n", __func__,
+@@ -1296,6 +1301,7 @@ void nfsd4_init_cb(struct nfsd4_callback
+ cb->cb_seq_status = 1;
+ cb->cb_status = 0;
+ cb->cb_need_restart = false;
++ cb->cb_holds_slot = false;
+ }
+
+ void nfsd4_run_cb(struct nfsd4_callback *cb)
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -70,6 +70,7 @@ struct nfsd4_callback {
+ int cb_seq_status;
+ int cb_status;
+ bool cb_need_restart;
++ bool cb_holds_slot;
+ };
+
+ struct nfsd4_callback_ops {
--- /dev/null
+From f456458e4d25a8962d0946891617c76cc3ff5fb9 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Mon, 22 Apr 2019 12:34:24 -0400
+Subject: nfsd: wake blocked file lock waiters before sending callback
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit f456458e4d25a8962d0946891617c76cc3ff5fb9 upstream.
+
+When a blocked NFS lock is "awoken" we send a callback to the server and
+then wake any hosts waiting on it. If a client attempts to get a lock
+and then drops off the net, we could end up waiting for a long time
+until we end up waking locks blocked on that request.
+
+So, wake any other waiting lock requests before sending the callback.
+Do this by calling locks_delete_block in a new "prepare" phase for
+CB_NOTIFY_LOCK callbacks.
+
+URL: https://bugzilla.kernel.org/show_bug.cgi?id=203363
+Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
+Reported-by: Slawomir Pryczek <slawek1211@gmail.com>
+Cc: Neil Brown <neilb@suse.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfs4state.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -298,6 +298,14 @@ remove_blocked_locks(struct nfs4_lockown
+ }
+ }
+
++static void
++nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
++{
++ struct nfsd4_blocked_lock *nbl = container_of(cb,
++ struct nfsd4_blocked_lock, nbl_cb);
++ locks_delete_block(&nbl->nbl_lock);
++}
++
+ static int
+ nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
+ {
+@@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd
+ }
+
+ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
++ .prepare = nfsd4_cb_notify_lock_prepare,
+ .done = nfsd4_cb_notify_lock_done,
+ .release = nfsd4_cb_notify_lock_release,
+ };
--- /dev/null
+From 6aaafc43a4ecc5bc8a3f6a2811d5eddc996a97f3 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Mon, 22 Apr 2019 12:34:23 -0400
+Subject: nfsd: wake waiters blocked on file_lock before deleting it
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 6aaafc43a4ecc5bc8a3f6a2811d5eddc996a97f3 upstream.
+
+After a blocked nfsd file_lock request is deleted, knfsd will send a
+callback to the client and then free the request. Commit 16306a61d3b7
+("fs/locks: always delete_block after waiting.") changed it such that
+locks_delete_block is always called on a request after it is awoken,
+but that patch missed fixing up blocked nfsd request handling.
+
+Call locks_delete_block on the block to wake up any locks still blocked
+on the nfsd lock request before freeing it. Some of its callers already
+do this however, so just remove those calls.
+
+URL: https://bugzilla.kernel.org/show_bug.cgi?id=203363
+Fixes: 16306a61d3b7 ("fs/locks: always delete_block after waiting.")
+Reported-by: Slawomir Pryczek <slawek1211@gmail.com>
+Cc: Neil Brown <neilb@suse.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfs4state.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_locko
+ static void
+ free_blocked_lock(struct nfsd4_blocked_lock *nbl)
+ {
++ locks_delete_block(&nbl->nbl_lock);
+ locks_release_private(&nbl->nbl_lock);
+ kfree(nbl);
+ }
+@@ -293,7 +294,6 @@ remove_blocked_locks(struct nfs4_lockown
+ nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
+ nbl_lru);
+ list_del_init(&nbl->nbl_lru);
+- locks_delete_block(&nbl->nbl_lock);
+ free_blocked_lock(nbl);
+ }
+ }
+@@ -4863,7 +4863,6 @@ nfs4_laundromat(struct nfsd_net *nn)
+ nbl = list_first_entry(&reaplist,
+ struct nfsd4_blocked_lock, nbl_lru);
+ list_del_init(&nbl->nbl_lru);
+- locks_delete_block(&nbl->nbl_lock);
+ free_blocked_lock(nbl);
+ }
+ out:
--- /dev/null
+From 82c99f7a81f28f8c1be5f701c8377d14c4075b10 Mon Sep 17 00:00:00 2001
+From: Harry Pan <harry.pan@intel.com>
+Date: Wed, 24 Apr 2019 22:50:33 +0800
+Subject: perf/x86/intel: Update KBL Package C-state events to also include PC8/PC9/PC10 counters
+
+From: Harry Pan <harry.pan@intel.com>
+
+commit 82c99f7a81f28f8c1be5f701c8377d14c4075b10 upstream.
+
+Kaby Lake (and Coffee Lake) has PC8/PC9/PC10 residency counters.
+
+This patch updates the list of Kaby/Coffee Lake PMU event counters
+from the snb_cstates[] list of events to the hswult_cstates[]
+list of events, which keeps all previously supported events and
+also adds the PKG_C8, PKG_C9 and PKG_C10 residency counters.
+
+This allows user space tools to profile them through the perf interface.
+
+Signed-off-by: Harry Pan <harry.pan@intel.com>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: gs0622@gmail.com
+Link: http://lkml.kernel.org/r/20190424145033.1924-1-harry.pan@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/cstate.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -76,15 +76,15 @@
+ * Scope: Package (physical package)
+ * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
+ * perf code: 0x04
+- * Available model: HSW ULT,CNL
++ * Available model: HSW ULT,KBL,CNL
+ * Scope: Package (physical package)
+ * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
+ * perf code: 0x05
+- * Available model: HSW ULT,CNL
++ * Available model: HSW ULT,KBL,CNL
+ * Scope: Package (physical package)
+ * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+ * perf code: 0x06
+- * Available model: HSW ULT,GLM,CNL
++ * Available model: HSW ULT,KBL,GLM,CNL
+ * Scope: Package (physical package)
+ *
+ */
+@@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cst
+ X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
+
+- X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
+- X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
++ X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
++ X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
+
--- /dev/null
+From 8adddf349fda0d3de2f6bb41ddf838cbf36a8ad2 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Tue, 16 Apr 2019 23:59:02 +1000
+Subject: powerpc/mm/radix: Make Radix require HUGETLB_PAGE
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 8adddf349fda0d3de2f6bb41ddf838cbf36a8ad2 upstream.
+
+Joel reported weird crashes using skiroot_defconfig, in his case we
+jumped into an NX page:
+
+ kernel tried to execute exec-protected page (c000000002bff4f0) - exploit attempt? (uid: 0)
+ BUG: Unable to handle kernel instruction fetch
+ Faulting instruction address: 0xc000000002bff4f0
+
+Looking at the disassembly, we had simply branched to that address:
+
+ c000000000c001bc 49fff335 bl c000000002bff4f0
+
+But that didn't match the original kernel image:
+
+ c000000000c001bc 4bfff335 bl c000000000bff4f0 <kobject_get+0x8>
+
+When STRICT_KERNEL_RWX is enabled, and we're using the radix MMU, we
+call radix__change_memory_range() late in boot to change page
+protections. We do that both to mark rodata read only and also to mark
+init text no-execute. That involves walking the kernel page tables,
+and clearing _PAGE_WRITE or _PAGE_EXEC respectively.
+
+With radix we may use hugepages for the linear mapping, so the code in
+radix__change_memory_range() uses eg. pmd_huge() to test if it has
+found a huge mapping, and if so it stops the page table walk and
+changes the PMD permissions.
+
+However if the kernel is built without HUGETLBFS support, pmd_huge()
+is just a #define that always returns 0. That causes the code in
+radix__change_memory_range() to incorrectly interpret the PMD value as
+a pointer to a PTE page rather than as a PTE at the PMD level.
+
+We can see this using `dv` in xmon which also uses pmd_huge():
+
+ 0:mon> dv c000000000000000
+ pgd @ 0xc000000001740000
+ pgdp @ 0xc000000001740000 = 0x80000000ffffb009
+ pudp @ 0xc0000000ffffb000 = 0x80000000ffffa009
+ pmdp @ 0xc0000000ffffa000 = 0xc00000000000018f <- this is a PTE
+ ptep @ 0xc000000000000100 = 0xa64bb17da64ab07d <- kernel text
+
+The end result is we treat the value at 0xc000000000000100 as a PTE
+and clear _PAGE_WRITE or _PAGE_EXEC, potentially corrupting the code
+at that address.
+
+In Joel's specific case we cleared the sign bit in the offset of the
+branch, causing a backward branch to turn into a forward branch which
+caused us to branch into a non-executable page. However the exact
+nature of the crash depends on kernel version, compiler version, and
+other factors.
+
+We need to fix radix__change_memory_range() to not use accessors that
+depend on HUGETLBFS, but we also have radix memory hotplug code that
+uses pmd_huge() etc that will also need fixing. So for now just
+disallow the broken combination of Radix with HUGETLBFS disabled.
+
+The only defconfig we have that is affected is skiroot_defconfig, so
+turn on HUGETLBFS there so that it still gets Radix.
+
+Fixes: 566ca99af026 ("powerpc/mm/radix: Add dummy radix_enabled()")
+Cc: stable@vger.kernel.org # v4.7+
+Reported-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/configs/skiroot_defconfig | 1 +
+ arch/powerpc/platforms/Kconfig.cputype | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/configs/skiroot_defconfig
++++ b/arch/powerpc/configs/skiroot_defconfig
+@@ -260,6 +260,7 @@ CONFIG_UDF_FS=m
+ CONFIG_MSDOS_FS=m
+ CONFIG_VFAT_FS=m
+ CONFIG_PROC_KCORE=y
++CONFIG_HUGETLBFS=y
+ # CONFIG_MISC_FILESYSTEMS is not set
+ # CONFIG_NETWORK_FILESYSTEMS is not set
+ CONFIG_NLS=y
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -318,7 +318,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+
+ config PPC_RADIX_MMU
+ bool "Radix MMU Support"
+- depends on PPC_BOOK3S_64
++ depends on PPC_BOOK3S_64 && HUGETLB_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
+ default y
+ help
--- /dev/null
+From 9fa246256e09dc30820524401cdbeeaadee94025 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Wed, 24 Apr 2019 10:47:56 +1000
+Subject: Revert "drm/i915/fbdev: Actually configure untiled displays"
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 9fa246256e09dc30820524401cdbeeaadee94025 upstream.
+
+This reverts commit d179b88deb3bf6fed4991a31fd6f0f2cad21fab5.
+
+This commit is documented to break userspace X.org modesetting driver in certain configurations.
+
+The X.org modesetting userspace driver is broken. No fixes are available yet. In order for this patch to be applied it either needs a config option or a workaround developed.
+
+This has been reported a few times, saying it's a userspace problem is clearly against the regression rules.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109806
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Cc: <stable@vger.kernel.org> # v3.19+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_fbdev.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(stru
+ bool *enabled, int width, int height)
+ {
+ struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
++ unsigned long conn_configured, conn_seq, mask;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+- unsigned long conn_configured, conn_seq;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true, ret = true;
+@@ -355,9 +355,10 @@ static bool intel_fb_initial_config(stru
+ drm_modeset_backoff(&ctx);
+
+ memcpy(save_enabled, enabled, count);
+- conn_seq = GENMASK(count - 1, 0);
++ mask = GENMASK(count - 1, 0);
+ conn_configured = 0;
+ retry:
++ conn_seq = conn_configured;
+ for (i = 0; i < count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+@@ -370,8 +371,7 @@ retry:
+ if (conn_configured & BIT(i))
+ continue;
+
+- /* First pass, only consider tiled connectors */
+- if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
++ if (conn_seq == 0 && !connector->has_tile)
+ continue;
+
+ if (connector->status == connector_status_connected)
+@@ -475,10 +475,8 @@ retry:
+ conn_configured |= BIT(i);
+ }
+
+- if (conn_configured != conn_seq) { /* repeat until no more are found */
+- conn_seq = conn_configured;
++ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ goto retry;
+- }
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
rdma-mlx5-use-rdma_user_map_io-for-mapping-bar-pages.patch
rdma-ucontext-fix-regression-with-disassociate.patch
sched-numa-fix-a-possible-divide-by-zero.patch
+ceph-only-use-d_name-directly-when-parent-is-locked.patch
+ceph-ensure-d_name-stability-in-ceph_dentry_hash.patch
+ceph-fix-ci-i_head_snapc-leak.patch
+nfsd-don-t-release-the-callback-slot-unless-it-was-actually-held.patch
+nfsd-wake-waiters-blocked-on-file_lock-before-deleting-it.patch
+nfsd-wake-blocked-file-lock-waiters-before-sending-callback.patch
+sunrpc-don-t-mark-uninitialised-items-as-valid.patch
+perf-x86-intel-update-kbl-package-c-state-events-to-also-include-pc8-pc9-pc10-counters.patch
+input-synaptics-rmi4-write-config-register-values-to-the-right-offset.patch
+vfio-type1-limit-dma-mappings-per-container.patch
+dmaengine-sh-rcar-dmac-with-cyclic-dma-residue-0-is-valid.patch
+dmaengine-sh-rcar-dmac-fix-glitch-in-dmaengine_tx_status.patch
+dmaengine-mediatek-cqdma-fix-wrong-register-usage-in-mtk_cqdma_start.patch
+arm-8857-1-efi-enable-cp15-dmb-instructions-before-cleaning-the-cache.patch
+powerpc-mm-radix-make-radix-require-hugetlb_page.patch
+drm-vc4-fix-memory-leak-during-gpu-reset.patch
+drm-ttm-fix-re-init-of-global-structures.patch
+revert-drm-i915-fbdev-actually-configure-untiled-displays.patch
+drm-vc4-fix-compilation-error-reported-by-kbuild-test-bot.patch
+usb-add-new-usb-lpm-helpers.patch
+usb-consolidate-lpm-checks-to-avoid-enabling-lpm-twice.patch
+ext4-fix-some-error-pointer-dereferences.patch
--- /dev/null
+From d58431eacb226222430940134d97bfd72f292fcd Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Fri, 5 Apr 2019 11:34:40 +1100
+Subject: sunrpc: don't mark uninitialised items as VALID.
+
+From: NeilBrown <neilb@suse.com>
+
+commit d58431eacb226222430940134d97bfd72f292fcd upstream.
+
+A recent commit added a call to cache_fresh_locked()
+when an expired item was found.
+The call sets the CACHE_VALID flag, so it is important
+that the item actually is valid.
+There are two ways it could be valid:
+1/ If ->update has been called to fill in relevant content
+2/ if CACHE_NEGATIVE is set, to say that content doesn't exist.
+
+An expired item that is waiting for an update will be neither.
+Setting CACHE_VALID will mean that a subsequent call to cache_put()
+will be likely to dereference uninitialised pointers.
+
+So we must make sure the item is valid, and we already have code to do
+that in try_to_negate_entry(). This takes the hash lock and so cannot
+be used directly, so take out the two lines that we need and use them.
+
+Now cache_fresh_locked() is certain to be called only on
+a valid item.
+
+Cc: stable@kernel.org # 2.6.35
+Fixes: 4ecd55ea0742 ("sunrpc: fix cache_head leak due to queued request")
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/cache.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -54,6 +54,7 @@ static void cache_init(struct cache_head
+ h->last_refresh = now;
+ }
+
++static inline int cache_is_valid(struct cache_head *h);
+ static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+ struct cache_detail *detail);
+ static void cache_fresh_unlocked(struct cache_head *head,
+@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_a
+ if (cache_is_expired(detail, tmp)) {
+ hlist_del_init_rcu(&tmp->cache_list);
+ detail->entries --;
++ if (cache_is_valid(tmp) == -EAGAIN)
++ set_bit(CACHE_NEGATIVE, &tmp->flags);
+ cache_fresh_locked(tmp, 0, detail);
+ freeme = tmp;
+ break;
--- /dev/null
+From 7529b2574a7aaf902f1f8159fbc2a7caa74be559 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Sat, 12 Jan 2019 03:54:24 +0800
+Subject: USB: Add new USB LPM helpers
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit 7529b2574a7aaf902f1f8159fbc2a7caa74be559 upstream.
+
+Use new helpers to make LPM enabling/disabling more clear.
+
+This is a preparation to subsequent patch.
+
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: stable <stable@vger.kernel.org> # after much soaking
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/core/driver.c | 12 +++++++++++-
+ drivers/usb/core/hub.c | 12 ++++++------
+ drivers/usb/core/message.c | 2 +-
+ drivers/usb/core/sysfs.c | 5 ++++-
+ drivers/usb/core/usb.h | 10 ++++++++--
+ 5 files changed, 30 insertions(+), 11 deletions(-)
+
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1896,7 +1896,7 @@ int usb_runtime_idle(struct device *dev)
+ return -EBUSY;
+ }
+
+-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
++static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+ {
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int ret = -EPERM;
+@@ -1913,6 +1913,16 @@ int usb_set_usb2_hardware_lpm(struct usb
+ return ret;
+ }
+
++int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return usb_set_usb2_hardware_lpm(udev, 1);
++}
++
++int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return usb_set_usb2_hardware_lpm(udev, 0);
++}
++
+ #endif /* CONFIG_PM */
+
+ struct bus_type usb_bus_type = {
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3221,7 +3221,7 @@ int usb_port_suspend(struct usb_device *
+
+ /* disable USB2 hardware LPM */
+ if (udev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(udev, 0);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ if (usb_disable_ltm(udev)) {
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
+@@ -3260,7 +3260,7 @@ int usb_port_suspend(struct usb_device *
+ err_ltm:
+ /* Try to enable USB2 hardware LPM again */
+ if (udev->usb2_hw_lpm_capable == 1)
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ if (udev->do_remote_wakeup)
+ (void) usb_disable_remote_wakeup(udev);
+@@ -3544,7 +3544,7 @@ int usb_port_resume(struct usb_device *u
+ } else {
+ /* Try to enable USB2 hardware LPM */
+ if (udev->usb2_hw_lpm_capable == 1)
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ /* Try to enable USB3 LTM */
+ usb_enable_ltm(udev);
+@@ -4435,7 +4435,7 @@ static void hub_set_initial_usb2_lpm_pol
+ if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
+ connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
+ udev->usb2_hw_lpm_allowed = 1;
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+ }
+ }
+
+@@ -5650,7 +5650,7 @@ static int usb_reset_and_verify_device(s
+ * It will be re-enabled by the enumeration process.
+ */
+ if (udev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(udev, 0);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ /* Disable LPM while we reset the device and reinstall the alt settings.
+ * Device-initiated LPM, and system exit latency settings are cleared
+@@ -5753,7 +5753,7 @@ static int usb_reset_and_verify_device(s
+
+ done:
+ /* Now that the alt settings are re-installed, enable LTM and LPM. */
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+ usb_unlocked_enable_lpm(udev);
+ usb_enable_ltm(udev);
+ usb_release_bos_descriptor(udev);
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1244,7 +1244,7 @@ void usb_disable_device(struct usb_devic
+ }
+
+ if (dev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(dev, 0);
++ usb_disable_usb2_hardware_lpm(dev);
+ usb_unlocked_disable_lpm(dev);
+ usb_disable_ltm(dev);
+
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(s
+
+ if (!ret) {
+ udev->usb2_hw_lpm_allowed = value;
+- ret = usb_set_usb2_hardware_lpm(udev, value);
++ if (value)
++ ret = usb_enable_usb2_hardware_lpm(udev);
++ else
++ ret = usb_disable_usb2_hardware_lpm(udev);
+ }
+
+ usb_unlock_device(udev);
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_
+ extern int usb_runtime_suspend(struct device *dev);
+ extern int usb_runtime_resume(struct device *dev);
+ extern int usb_runtime_idle(struct device *dev);
+-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
++extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
++extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
+
+ #else
+
+@@ -112,7 +113,12 @@ static inline int usb_autoresume_device(
+ return 0;
+ }
+
+-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
++static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return 0;
++}
++
++static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+ {
+ return 0;
+ }
--- /dev/null
+From d7a6c0ce8d26412903c7981503bad9e1cc7c45d2 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Sat, 12 Jan 2019 03:54:25 +0800
+Subject: USB: Consolidate LPM checks to avoid enabling LPM twice
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit d7a6c0ce8d26412903c7981503bad9e1cc7c45d2 upstream.
+
+USB Bluetooth controller QCA ROME (0cf3:e007) sometimes stops working
+after S3:
+[ 165.110742] Bluetooth: hci0: using NVM file: qca/nvm_usb_00000302.bin
+[ 168.432065] Bluetooth: hci0: Failed to send body at 4 of 1953 (-110)
+
+After some experiments, I found that disabling LPM can workaround the
+issue.
+
+On some platforms, the USB power is cut during S3, so the driver uses
+reset-resume to resume the device. During port resume, LPM gets enabled
+twice, by usb_reset_and_verify_device() and usb_port_resume().
+
+Consolidate all checks into new LPM helpers to make sure LPM only gets
+enabled once.
+
+Fixes: de68bab4fa96 ("usb: Don't enable USB 2.0 Link PM by default.”)
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: stable <stable@vger.kernel.org> # after much soaking
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/core/driver.c | 11 ++++++++---
+ drivers/usb/core/hub.c | 12 ++++--------
+ drivers/usb/core/message.c | 3 +--
+ 3 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1901,9 +1901,6 @@ static int usb_set_usb2_hardware_lpm(str
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int ret = -EPERM;
+
+- if (enable && !udev->usb2_hw_lpm_allowed)
+- return 0;
+-
+ if (hcd->driver->set_usb2_hw_lpm) {
+ ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
+ if (!ret)
+@@ -1915,11 +1912,19 @@ static int usb_set_usb2_hardware_lpm(str
+
+ int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+ {
++ if (!udev->usb2_hw_lpm_capable ||
++ !udev->usb2_hw_lpm_allowed ||
++ udev->usb2_hw_lpm_enabled)
++ return 0;
++
+ return usb_set_usb2_hardware_lpm(udev, 1);
+ }
+
+ int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+ {
++ if (!udev->usb2_hw_lpm_enabled)
++ return 0;
++
+ return usb_set_usb2_hardware_lpm(udev, 0);
+ }
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3220,8 +3220,7 @@ int usb_port_suspend(struct usb_device *
+ }
+
+ /* disable USB2 hardware LPM */
+- if (udev->usb2_hw_lpm_enabled == 1)
+- usb_disable_usb2_hardware_lpm(udev);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ if (usb_disable_ltm(udev)) {
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
+@@ -3259,8 +3258,7 @@ int usb_port_suspend(struct usb_device *
+ usb_enable_ltm(udev);
+ err_ltm:
+ /* Try to enable USB2 hardware LPM again */
+- if (udev->usb2_hw_lpm_capable == 1)
+- usb_enable_usb2_hardware_lpm(udev);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ if (udev->do_remote_wakeup)
+ (void) usb_disable_remote_wakeup(udev);
+@@ -3543,8 +3541,7 @@ int usb_port_resume(struct usb_device *u
+ hub_port_logical_disconnect(hub, port1);
+ } else {
+ /* Try to enable USB2 hardware LPM */
+- if (udev->usb2_hw_lpm_capable == 1)
+- usb_enable_usb2_hardware_lpm(udev);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ /* Try to enable USB3 LTM */
+ usb_enable_ltm(udev);
+@@ -5649,8 +5646,7 @@ static int usb_reset_and_verify_device(s
+ /* Disable USB2 hardware LPM.
+ * It will be re-enabled by the enumeration process.
+ */
+- if (udev->usb2_hw_lpm_enabled == 1)
+- usb_disable_usb2_hardware_lpm(udev);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ /* Disable LPM while we reset the device and reinstall the alt settings.
+ * Device-initiated LPM, and system exit latency settings are cleared
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_devic
+ dev->actconfig->interface[i] = NULL;
+ }
+
+- if (dev->usb2_hw_lpm_enabled == 1)
+- usb_disable_usb2_hardware_lpm(dev);
++ usb_disable_usb2_hardware_lpm(dev);
+ usb_unlocked_disable_lpm(dev);
+ usb_disable_ltm(dev);
+
--- /dev/null
+From 492855939bdb59c6f947b0b5b44af9ad82b7e38c Mon Sep 17 00:00:00 2001
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Wed, 3 Apr 2019 12:36:21 -0600
+Subject: vfio/type1: Limit DMA mappings per container
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit 492855939bdb59c6f947b0b5b44af9ad82b7e38c upstream.
+
+Memory backed DMA mappings are accounted against a user's locked
+memory limit, including multiple mappings of the same memory. This
+accounting bounds the number of such mappings that a user can create.
+However, DMA mappings that are not backed by memory, such as DMA
+mappings of device MMIO via mmaps, do not make use of page pinning
+and therefore do not count against the user's locked memory limit.
+These mappings still consume memory, but the memory is not well
+associated to the process for the purpose of oom killing a task.
+
+To add bounding on this use case, we introduce a limit to the total
+number of concurrent DMA mappings that a user is allowed to create.
+This limit is exposed as a tunable module option where the default
+value of 64K is expected to be well in excess of any reasonable use
+case (a large virtual machine configuration would typically only make
+use of tens of concurrent mappings).
+
+This fixes CVE-2019-3882.
+
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Tested-by: Eric Auger <eric.auger@redhat.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vfio/vfio_iommu_type1.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
+ MODULE_PARM_DESC(disable_hugepages,
+ "Disable VFIO IOMMU support for IOMMU hugepages.");
+
++static unsigned int dma_entry_limit __read_mostly = U16_MAX;
++module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
++MODULE_PARM_DESC(dma_entry_limit,
++ "Maximum number of user DMA mappings per container (65535).");
++
+ struct vfio_iommu {
+ struct list_head domain_list;
+ struct vfio_domain *external_domain; /* domain for external user */
+ struct mutex lock;
+ struct rb_root dma_list;
+ struct blocking_notifier_head notifier;
++ unsigned int dma_avail;
+ bool v2;
+ bool nesting;
+ };
+@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_
+ vfio_unlink_dma(iommu, dma);
+ put_task_struct(dma->task);
+ kfree(dma);
++ iommu->dma_avail++;
+ }
+
+ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_i
+ goto out_unlock;
+ }
+
++ if (!iommu->dma_avail) {
++ ret = -ENOSPC;
++ goto out_unlock;
++ }
++
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
++ iommu->dma_avail--;
+ dma->iova = iova;
+ dma->vaddr = vaddr;
+ dma->prot = prot;
+@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsig
+
+ INIT_LIST_HEAD(&iommu->domain_list);
+ iommu->dma_list = RB_ROOT;
++ iommu->dma_avail = dma_entry_limit;
+ mutex_init(&iommu->lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+