--- /dev/null
+From a45ea48e2bcd92c1f678b794f488ca0bda9835b8 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Sun, 26 Jan 2020 01:02:53 +0000
+Subject: afs: Fix characters allowed into cell names
+
+From: David Howells <dhowells@redhat.com>
+
+commit a45ea48e2bcd92c1f678b794f488ca0bda9835b8 upstream.
+
+The afs filesystem needs to prohibit certain characters from cell names,
+such as '/', as these are used to form filenames in procfs, leading to
+the following warning being generated:
+
+ WARNING: CPU: 0 PID: 3489 at fs/proc/generic.c:178
+
+Fix afs_alloc_cell() to disallow nonprintable characters, '/', '@' and
+names that begin with a dot.
+
+Remove the check for "@cell" as that is then redundant.
+
+This can be tested by running:
+
+ echo add foo/.bar 1.2.3.4 >/proc/fs/afs/cells
+
+Note that we will also need to deal with:
+
+ - Names ending in ".invalid" shouldn't be passed to the DNS.
+
+ - Names that contain non-valid domainname chars shouldn't be passed to
+ the DNS.
+
+ - DNS replies that say "your-dns-needs-immediate-attention.<gTLD>" and
+ replies containing A records that say 127.0.53.53 should be
+ considered invalid.
+ [https://www.icann.org/en/system/files/files/name-collision-mitigation-01aug14-en.pdf]
+
+but these need to be dealt with by the kafs-client DNS program rather
+than the kernel.
+
+Reported-by: syzbot+b904ba7c947a37b4b291@syzkaller.appspotmail.com
+Cc: stable@kernel.org
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/afs/cell.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(s
+ _leave(" = -ENAMETOOLONG");
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+- if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
++
++ /* Prohibit cell names that contain unprintable chars, '/' and '@' or
++ * that begin with a dot. This also precludes "@cell".
++ */
++ if (name[0] == '.')
+ return ERR_PTR(-EINVAL);
++ for (i = 0; i < namelen; i++) {
++ char ch = name[i];
++ if (!isprint(ch) || ch == '/' || ch == '@')
++ return ERR_PTR(-EINVAL);
++ }
+
+ _enter("%*.*s,%s", namelen, namelen, name, addresses);
+
--- /dev/null
+From 927d780ee371d7e121cea4fc7812f6ef2cea461c Mon Sep 17 00:00:00 2001
+From: Alex Sverdlin <alexander.sverdlin@nokia.com>
+Date: Wed, 8 Jan 2020 15:57:47 +0100
+Subject: ARM: 8950/1: ftrace/recordmcount: filter relocation types
+
+From: Alex Sverdlin <alexander.sverdlin@nokia.com>
+
+commit 927d780ee371d7e121cea4fc7812f6ef2cea461c upstream.
+
+Scenario 1, ARMv7
+=================
+
+If code in arch/arm/kernel/ftrace.c would operate on mcount() pointer
+the following may be generated:
+
+00000230 <prealloc_fixed_plts>:
+ 230: b5f8 push {r3, r4, r5, r6, r7, lr}
+ 232: b500 push {lr}
+ 234: f7ff fffe bl 0 <__gnu_mcount_nc>
+ 234: R_ARM_THM_CALL __gnu_mcount_nc
+ 238: f240 0600 movw r6, #0
+ 238: R_ARM_THM_MOVW_ABS_NC __gnu_mcount_nc
+ 23c: f8d0 1180 ldr.w r1, [r0, #384] ; 0x180
+
+FTRACE currently is not able to deal with it:
+
+WARNING: CPU: 0 PID: 0 at .../kernel/trace/ftrace.c:1979 ftrace_bug+0x1ad/0x230()
+...
+CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.116-... #1
+...
+[<c0314e3d>] (unwind_backtrace) from [<c03115e9>] (show_stack+0x11/0x14)
+[<c03115e9>] (show_stack) from [<c051a7f1>] (dump_stack+0x81/0xa8)
+[<c051a7f1>] (dump_stack) from [<c0321c5d>] (warn_slowpath_common+0x69/0x90)
+[<c0321c5d>] (warn_slowpath_common) from [<c0321cf3>] (warn_slowpath_null+0x17/0x1c)
+[<c0321cf3>] (warn_slowpath_null) from [<c038ee9d>] (ftrace_bug+0x1ad/0x230)
+[<c038ee9d>] (ftrace_bug) from [<c038f1f9>] (ftrace_process_locs+0x27d/0x444)
+[<c038f1f9>] (ftrace_process_locs) from [<c08915bd>] (ftrace_init+0x91/0xe8)
+[<c08915bd>] (ftrace_init) from [<c0885a67>] (start_kernel+0x34b/0x358)
+[<c0885a67>] (start_kernel) from [<00308095>] (0x308095)
+---[ end trace cb88537fdc8fa200 ]---
+ftrace failed to modify [<c031266c>] prealloc_fixed_plts+0x8/0x60
+ actual: 44:f2:e1:36
+ftrace record flags: 0
+ (0) expected tramp: c03143e9
+
+Scenario 2, ARMv4T
+==================
+
+ftrace: allocating 14435 entries in 43 pages
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2029 ftrace_bug+0x204/0x310
+CPU: 0 PID: 0 Comm: swapper Not tainted 4.19.5 #1
+Hardware name: Cirrus Logic EDB9302 Evaluation Board
+[<c0010a24>] (unwind_backtrace) from [<c000ecb0>] (show_stack+0x20/0x2c)
+[<c000ecb0>] (show_stack) from [<c03c72e8>] (dump_stack+0x20/0x30)
+[<c03c72e8>] (dump_stack) from [<c0021c18>] (__warn+0xdc/0x104)
+[<c0021c18>] (__warn) from [<c0021d7c>] (warn_slowpath_null+0x4c/0x5c)
+[<c0021d7c>] (warn_slowpath_null) from [<c0095360>] (ftrace_bug+0x204/0x310)
+[<c0095360>] (ftrace_bug) from [<c04dabac>] (ftrace_init+0x3b4/0x4d4)
+[<c04dabac>] (ftrace_init) from [<c04cef4c>] (start_kernel+0x20c/0x410)
+[<c04cef4c>] (start_kernel) from [<00000000>] ( (null))
+---[ end trace 0506a2f5dae6b341 ]---
+ftrace failed to modify
+[<c000c350>] perf_trace_sys_exit+0x5c/0xe8
+ actual: 1e:ff:2f:e1
+Initializing ftrace call sites
+ftrace record flags: 0
+ (0)
+ expected tramp: c000fb24
+
+The analysis for this problem has been already performed previously,
+refer to the link below.
+
+Fix the above problems by allowing only selected reloc types in
+__mcount_loc. The list itself comes from the legacy recordmcount.pl
+script.
+
+Link: https://lore.kernel.org/lkml/56961010.6000806@pengutronix.de/
+Cc: stable@vger.kernel.org
+Fixes: ed60453fa8f8 ("ARM: 6511/1: ftrace: add ARM support for C version of recordmcount")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/recordmcount.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -38,6 +38,10 @@
+ #define R_AARCH64_ABS64 257
+ #endif
+
++#define R_ARM_PC24 1
++#define R_ARM_THM_CALL 10
++#define R_ARM_CALL 28
++
+ static int fd_map; /* File descriptor for file being modified. */
+ static int mmap_failed; /* Boolean flag. */
+ static char gpfx; /* prefix for global symbol name (sometimes '_') */
+@@ -418,6 +422,18 @@ static char const *already_has_rel_mcoun
+ #define RECORD_MCOUNT_64
+ #include "recordmcount.h"
+
++static int arm_is_fake_mcount(Elf32_Rel const *rp)
++{
++ switch (ELF32_R_TYPE(w(rp->r_info))) {
++ case R_ARM_THM_CALL:
++ case R_ARM_CALL:
++ case R_ARM_PC24:
++ return 0;
++ }
++
++ return 1;
++}
++
+ /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
+ * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
+ * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
+@@ -523,6 +539,7 @@ static int do_file(char const *const fna
+ altmcount = "__gnu_mcount_nc";
+ make_nop = make_nop_arm;
+ rel_type_nop = R_ARM_NONE;
++ is_fake_mcount32 = arm_is_fake_mcount;
+ gpfx = 0;
+ break;
+ case EM_AARCH64:
--- /dev/null
+From 9c1c2b35f1d94de8325344c2777d7ee67492db3b Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Wed, 3 Apr 2019 13:16:01 -0400
+Subject: ceph: hold extra reference to r_parent over life of request
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 9c1c2b35f1d94de8325344c2777d7ee67492db3b upstream.
+
+Currently, we just assume that it will stick around by virtue of the
+submitter's reference, but later patches will allow the syscall to
+return early and we can't rely on that reference at that point.
+
+While I'm not aware of any reports of it, Xiubo pointed out that this
+may fix a use-after-free. If the wait for a reply times out or is
+canceled via signal, and then the reply comes in after the syscall
+returns, the client can end up trying to access r_parent without a
+reference.
+
+Take an extra reference to the inode when setting r_parent and release
+it when releasing the request.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/mds_client.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kr
+ /* avoid calling iput_final() in mds dispatch threads */
+ ceph_async_iput(req->r_inode);
+ }
+- if (req->r_parent)
++ if (req->r_parent) {
+ ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
++ ceph_async_iput(req->r_parent);
++ }
+ ceph_async_iput(req->r_target_inode);
+ if (req->r_dentry)
+ dput(req->r_dentry);
+@@ -2670,8 +2672,10 @@ int ceph_mdsc_submit_request(struct ceph
+ /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
+ if (req->r_inode)
+ ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
+- if (req->r_parent)
++ if (req->r_parent) {
+ ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
++ ihold(req->r_parent);
++ }
+ if (req->r_old_dentry_dir)
+ ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
+ CEPH_CAP_PIN);
--- /dev/null
+From 5eec71829ad7749a8c918f66a91a9bcf6fb4462a Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Date: Thu, 16 Jan 2020 13:45:08 +0000
+Subject: drm/i915: Align engine->uabi_class/instance with i915_drm.h
+
+From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+
+commit 5eec71829ad7749a8c918f66a91a9bcf6fb4462a upstream.
+
+In our ABI we have defined I915_ENGINE_CLASS_INVALID_NONE and
+I915_ENGINE_CLASS_INVALID_VIRTUAL as negative values which creates
+implicit coupling with type widths used in, also ABI, struct
+i915_engine_class_instance.
+
+One place where we export engine->uabi_class
+I915_ENGINE_CLASS_INVALID_VIRTUAL is from our our tracepoints. Because the
+type of the former is u8 in contrast to u16 defined in the ABI, 254 will
+be returned instead of 65534 which userspace would legitimately expect.
+
+Another place is I915_CONTEXT_PARAM_ENGINES.
+
+Therefore we need to align the type used to store engine ABI class and
+instance.
+
+v2:
+ * Update the commit message mentioning get_engines and cc stable.
+ (Chris)
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Fixes: 6d06779e8672 ("drm/i915: Load balancing across a virtual engine")
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v5.3+
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200116134508.25211-1-tvrtko.ursulin@linux.intel.com
+(cherry picked from commit 0b3bd0cdc329a1e2e00995cffd61aacf58c87cb4)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_busy.c | 12 ++++++------
+ drivers/gpu/drm/i915/gt/intel_engine_types.h | 4 ++--
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+@@ -9,16 +9,16 @@
+ #include "i915_gem_ioctls.h"
+ #include "i915_gem_object.h"
+
+-static __always_inline u32 __busy_read_flag(u8 id)
++static __always_inline u32 __busy_read_flag(u16 id)
+ {
+- if (id == (u8)I915_ENGINE_CLASS_INVALID)
++ if (id == (u16)I915_ENGINE_CLASS_INVALID)
+ return 0xffff0000u;
+
+ GEM_BUG_ON(id >= 16);
+ return 0x10000u << id;
+ }
+
+-static __always_inline u32 __busy_write_id(u8 id)
++static __always_inline u32 __busy_write_id(u16 id)
+ {
+ /*
+ * The uABI guarantees an active writer is also amongst the read
+@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_
+ * last_read - hence we always set both read and write busy for
+ * last_write.
+ */
+- if (id == (u8)I915_ENGINE_CLASS_INVALID)
++ if (id == (u16)I915_ENGINE_CLASS_INVALID)
+ return 0xffffffffu;
+
+ return (id + 1) | __busy_read_flag(id);
+ }
+
+ static __always_inline unsigned int
+-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
++__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
+ {
+ const struct i915_request *rq;
+
+@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fe
+ return 0;
+
+ /* Beware type-expansion follies! */
+- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
++ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
+ }
+
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -300,8 +300,8 @@ struct intel_engine_cs {
+ u8 class;
+ u8 instance;
+
+- u8 uabi_class;
+- u8 uabi_instance;
++ u16 uabi_class;
++ u16 uabi_instance;
+
+ u32 context_size;
+ u32 mmio_base;
--- /dev/null
+From bdefca2d8dc0f80bbe49e08bf52a717146490706 Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@collabora.com>
+Date: Wed, 15 Jan 2020 20:15:54 -0600
+Subject: drm/panfrost: Add the panfrost_gem_mapping concept
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+commit bdefca2d8dc0f80bbe49e08bf52a717146490706 upstream.
+
+With the introduction of per-FD address space, the same BO can be mapped
+in different address space if the BO is globally visible (GEM_FLINK)
+and opened in different context or if the dmabuf is self-imported. The
+current implementation does not take case into account, and attaches the
+mapping directly to the panfrost_gem_object.
+
+Let's create a panfrost_gem_mapping struct and allow multiple mappings
+per BO.
+
+The mappings are refcounted which helps solve another problem where
+mappings were torn down (GEM handle closed by userspace) while GPU
+jobs accessing those BOs were still in-flight. Jobs now keep a
+reference on the mappings they use.
+
+v2 (robh):
+- Minor review comment clean-ups from Steven
+- Use list_is_singular helper
+- Just WARN if we add a mapping when madvise state is not WILLNEED.
+ With that, drop the use of object_name_lock.
+
+v3 (robh):
+- Revert returning list iterator in panfrost_gem_mapping_get()
+
+Fixes: a5efb4c9a562 ("drm/panfrost: Restructure the GEM object creation")
+Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Acked-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200116021554.15090-1-robh@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/panfrost/panfrost_drv.c | 91 +++++++++++++++-
+ drivers/gpu/drm/panfrost/panfrost_gem.c | 124 ++++++++++++++++++++---
+ drivers/gpu/drm/panfrost/panfrost_gem.h | 41 ++++++-
+ drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 3
+ drivers/gpu/drm/panfrost/panfrost_job.c | 13 ++
+ drivers/gpu/drm/panfrost/panfrost_job.h | 1
+ drivers/gpu/drm/panfrost/panfrost_mmu.c | 61 ++++++-----
+ drivers/gpu/drm/panfrost/panfrost_mmu.h | 6 -
+ drivers/gpu/drm/panfrost/panfrost_perfcnt.c | 34 ++++--
+ 9 files changed, 300 insertions(+), 74 deletions(-)
+
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(stru
+ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
+ struct drm_file *file)
+ {
++ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_gem_object *bo;
+ struct drm_panfrost_create_bo *args = data;
++ struct panfrost_gem_mapping *mapping;
+
+ if (!args->size || args->pad ||
+ (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
+@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(stru
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+- args->offset = bo->node.start << PAGE_SHIFT;
++ mapping = panfrost_gem_mapping_get(bo, priv);
++ if (!mapping) {
++ drm_gem_object_put_unlocked(&bo->base.base);
++ return -EINVAL;
++ }
++
++ args->offset = mapping->mmnode.start << PAGE_SHIFT;
++ panfrost_gem_mapping_put(mapping);
+
+ return 0;
+ }
+@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *d
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+ {
++ struct panfrost_file_priv *priv = file_priv->driver_priv;
++ struct panfrost_gem_object *bo;
++ unsigned int i;
++ int ret;
++
+ job->bo_count = args->bo_handle_count;
+
+ if (!job->bo_count)
+@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *d
+ if (!job->implicit_fences)
+ return -ENOMEM;
+
+- return drm_gem_objects_lookup(file_priv,
+- (void __user *)(uintptr_t)args->bo_handles,
+- job->bo_count, &job->bos);
++ ret = drm_gem_objects_lookup(file_priv,
++ (void __user *)(uintptr_t)args->bo_handles,
++ job->bo_count, &job->bos);
++ if (ret)
++ return ret;
++
++ job->mappings = kvmalloc_array(job->bo_count,
++ sizeof(struct panfrost_gem_mapping *),
++ GFP_KERNEL | __GFP_ZERO);
++ if (!job->mappings)
++ return -ENOMEM;
++
++ for (i = 0; i < job->bo_count; i++) {
++ struct panfrost_gem_mapping *mapping;
++
++ bo = to_panfrost_bo(job->bos[i]);
++ mapping = panfrost_gem_mapping_get(bo, priv);
++ if (!mapping) {
++ ret = -EINVAL;
++ break;
++ }
++
++ job->mappings[i] = mapping;
++ }
++
++ return ret;
+ }
+
+ /**
+@@ -320,7 +357,9 @@ out:
+ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
++ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct drm_panfrost_get_bo_offset *args = data;
++ struct panfrost_gem_mapping *mapping;
+ struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+
+@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(
+ }
+ bo = to_panfrost_bo(gem_obj);
+
+- args->offset = bo->node.start << PAGE_SHIFT;
+-
++ mapping = panfrost_gem_mapping_get(bo, priv);
+ drm_gem_object_put_unlocked(gem_obj);
++
++ if (!mapping)
++ return -EINVAL;
++
++ args->offset = mapping->mmnode.start << PAGE_SHIFT;
++ panfrost_gem_mapping_put(mapping);
+ return 0;
+ }
+
+ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
++ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct drm_panfrost_madvise *args = data;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_gem_object *gem_obj;
++ struct panfrost_gem_object *bo;
++ int ret = 0;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct
+ return -ENOENT;
+ }
+
++ bo = to_panfrost_bo(gem_obj);
++
+ mutex_lock(&pfdev->shrinker_lock);
++ mutex_lock(&bo->mappings.lock);
++ if (args->madv == PANFROST_MADV_DONTNEED) {
++ struct panfrost_gem_mapping *first;
++
++ first = list_first_entry(&bo->mappings.list,
++ struct panfrost_gem_mapping,
++ node);
++
++ /*
++ * If we want to mark the BO purgeable, there must be only one
++ * user: the caller FD.
++ * We could do something smarter and mark the BO purgeable only
++ * when all its users have marked it purgeable, but globally
++ * visible/shared BOs are likely to never be marked purgeable
++ * anyway, so let's not bother.
++ */
++ if (!list_is_singular(&bo->mappings.list) ||
++ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
++ ret = -EINVAL;
++ goto out_unlock_mappings;
++ }
++ }
++
+ args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+
+ if (args->retained) {
+- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+-
+ if (args->madv == PANFROST_MADV_DONTNEED)
+ list_add_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
+ else if (args->madv == PANFROST_MADV_WILLNEED)
+ list_del_init(&bo->base.madv_list);
+ }
++
++out_unlock_mappings:
++ mutex_unlock(&bo->mappings.lock);
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ drm_gem_object_put_unlocked(gem_obj);
+- return 0;
++ return ret;
+ }
+
+ int panfrost_unstable_ioctl_check(void)
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(str
+ list_del_init(&bo->base.madv_list);
+ mutex_unlock(&pfdev->shrinker_lock);
+
++ /*
++ * If we still have mappings attached to the BO, there's a problem in
++ * our refcounting.
++ */
++ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
++
+ if (bo->sgts) {
+ int i;
+ int n_sgt = bo->base.base.size / SZ_2M;
+@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(str
+ drm_gem_shmem_free_object(obj);
+ }
+
++struct panfrost_gem_mapping *
++panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
++ struct panfrost_file_priv *priv)
++{
++ struct panfrost_gem_mapping *iter, *mapping = NULL;
++
++ mutex_lock(&bo->mappings.lock);
++ list_for_each_entry(iter, &bo->mappings.list, node) {
++ if (iter->mmu == &priv->mmu) {
++ kref_get(&iter->refcount);
++ mapping = iter;
++ break;
++ }
++ }
++ mutex_unlock(&bo->mappings.lock);
++
++ return mapping;
++}
++
++static void
++panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
++{
++ struct panfrost_file_priv *priv;
++
++ if (mapping->active)
++ panfrost_mmu_unmap(mapping);
++
++ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
++ spin_lock(&priv->mm_lock);
++ if (drm_mm_node_allocated(&mapping->mmnode))
++ drm_mm_remove_node(&mapping->mmnode);
++ spin_unlock(&priv->mm_lock);
++}
++
++static void panfrost_gem_mapping_release(struct kref *kref)
++{
++ struct panfrost_gem_mapping *mapping;
++
++ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
++
++ panfrost_gem_teardown_mapping(mapping);
++ drm_gem_object_put_unlocked(&mapping->obj->base.base);
++ kfree(mapping);
++}
++
++void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
++{
++ if (!mapping)
++ return;
++
++ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
++}
++
++void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
++{
++ struct panfrost_gem_mapping *mapping;
++
++ mutex_lock(&bo->mappings.lock);
++ list_for_each_entry(mapping, &bo->mappings.list, node)
++ panfrost_gem_teardown_mapping(mapping);
++ mutex_unlock(&bo->mappings.lock);
++}
++
+ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+ {
+ int ret;
+@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_obj
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
++ struct panfrost_gem_mapping *mapping;
++
++ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
++ if (!mapping)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&mapping->node);
++ kref_init(&mapping->refcount);
++ drm_gem_object_get(obj);
++ mapping->obj = bo;
+
+ /*
+ * Executable buffers cannot cross a 16MB boundary as the program
+@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_obj
+ else
+ align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+
+- bo->mmu = &priv->mmu;
++ mapping->mmu = &priv->mmu;
+ spin_lock(&priv->mm_lock);
+- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
++ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
+ size >> PAGE_SHIFT, align, color, 0);
+ spin_unlock(&priv->mm_lock);
+ if (ret)
+- return ret;
++ goto err;
+
+ if (!bo->is_heap) {
+- ret = panfrost_mmu_map(bo);
+- if (ret) {
+- spin_lock(&priv->mm_lock);
+- drm_mm_remove_node(&bo->node);
+- spin_unlock(&priv->mm_lock);
+- }
++ ret = panfrost_mmu_map(mapping);
++ if (ret)
++ goto err;
+ }
++
++ mutex_lock(&bo->mappings.lock);
++ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
++ list_add_tail(&mapping->node, &bo->mappings.list);
++ mutex_unlock(&bo->mappings.lock);
++
++err:
++ if (ret)
++ panfrost_gem_mapping_put(mapping);
+ return ret;
+ }
+
+ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+ {
+- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
++ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
++ struct panfrost_gem_mapping *mapping = NULL, *iter;
+
+- if (bo->is_mapped)
+- panfrost_mmu_unmap(bo);
++ mutex_lock(&bo->mappings.lock);
++ list_for_each_entry(iter, &bo->mappings.list, node) {
++ if (iter->mmu == &priv->mmu) {
++ mapping = iter;
++ list_del(&iter->node);
++ break;
++ }
++ }
++ mutex_unlock(&bo->mappings.lock);
+
+- spin_lock(&priv->mm_lock);
+- if (drm_mm_node_allocated(&bo->node))
+- drm_mm_remove_node(&bo->node);
+- spin_unlock(&priv->mm_lock);
++ panfrost_gem_mapping_put(mapping);
+ }
+
+ static int panfrost_gem_pin(struct drm_gem_object *obj)
+@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_crea
+ if (!obj)
+ return NULL;
+
++ INIT_LIST_HEAD(&obj->mappings.list);
++ mutex_init(&obj->mappings.lock);
+ obj->base.base.funcs = &panfrost_gem_funcs;
+
+ return &obj->base.base;
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
+@@ -13,23 +13,46 @@ struct panfrost_gem_object {
+ struct drm_gem_shmem_object base;
+ struct sg_table *sgts;
+
+- struct panfrost_mmu *mmu;
+- struct drm_mm_node node;
+- bool is_mapped :1;
++ /*
++ * Use a list for now. If searching a mapping ever becomes the
++ * bottleneck, we should consider using an RB-tree, or even better,
++ * let the core store drm_gem_object_mapping entries (where we
++ * could place driver specific data) instead of drm_gem_object ones
++ * in its drm_file->object_idr table.
++ *
++ * struct drm_gem_object_mapping {
++ * struct drm_gem_object *obj;
++ * void *driver_priv;
++ * };
++ */
++ struct {
++ struct list_head list;
++ struct mutex lock;
++ } mappings;
++
+ bool noexec :1;
+ bool is_heap :1;
+ };
+
++struct panfrost_gem_mapping {
++ struct list_head node;
++ struct kref refcount;
++ struct panfrost_gem_object *obj;
++ struct drm_mm_node mmnode;
++ struct panfrost_mmu *mmu;
++ bool active :1;
++};
++
+ static inline
+ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
+ {
+ return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
+ }
+
+-static inline
+-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
++static inline struct panfrost_gem_mapping *
++drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
+ {
+- return container_of(node, struct panfrost_gem_object, node);
++ return container_of(node, struct panfrost_gem_mapping, mmnode);
+ }
+
+ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
+@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_obj
+ void panfrost_gem_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+
++struct panfrost_gem_mapping *
++panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
++ struct panfrost_file_priv *priv);
++void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
++void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
++
+ void panfrost_gem_shrinker_init(struct drm_device *dev);
+ void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+
+--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrin
+ static bool panfrost_gem_purge(struct drm_gem_object *obj)
+ {
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
++ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+
+ if (!mutex_trylock(&shmem->pages_lock))
+ return false;
+
+- panfrost_mmu_unmap(to_panfrost_bo(obj));
++ panfrost_gem_teardown_mappings(bo);
+ drm_gem_shmem_purge_locked(obj);
+
+ mutex_unlock(&shmem->pages_lock);
+--- a/drivers/gpu/drm/panfrost/panfrost_job.c
++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
+@@ -269,9 +269,20 @@ static void panfrost_job_cleanup(struct
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->render_done_fence);
+
+- if (job->bos) {
++ if (job->mappings) {
+ for (i = 0; i < job->bo_count; i++)
++ panfrost_gem_mapping_put(job->mappings[i]);
++ kvfree(job->mappings);
++ }
++
++ if (job->bos) {
++ struct panfrost_gem_object *bo;
++
++ for (i = 0; i < job->bo_count; i++) {
++ bo = to_panfrost_bo(job->bos[i]);
+ drm_gem_object_put_unlocked(job->bos[i]);
++ }
++
+ kvfree(job->bos);
+ }
+
+--- a/drivers/gpu/drm/panfrost/panfrost_job.h
++++ b/drivers/gpu/drm/panfrost/panfrost_job.h
+@@ -32,6 +32,7 @@ struct panfrost_job {
+
+ /* Exclusive fences we have taken from the BOs to wait for */
+ struct dma_fence **implicit_fences;
++ struct panfrost_gem_mapping **mappings;
+ struct drm_gem_object **bos;
+ u32 bo_count;
+
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_de
+ return 0;
+ }
+
+-int panfrost_mmu_map(struct panfrost_gem_object *bo)
++int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
+ {
++ struct panfrost_gem_object *bo = mapping->obj;
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct sg_table *sgt;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+- if (WARN_ON(bo->is_mapped))
++ if (WARN_ON(mapping->active))
+ return 0;
+
+ if (bo->noexec)
+@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
+- bo->is_mapped = true;
++ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
++ prot, sgt);
++ mapping->active = true;
+
+ return 0;
+ }
+
+-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
++void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
+ {
++ struct panfrost_gem_object *bo = mapping->obj;
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
+- u64 iova = bo->node.start << PAGE_SHIFT;
+- size_t len = bo->node.size << PAGE_SHIFT;
++ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
++ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
++ size_t len = mapping->mmnode.size << PAGE_SHIFT;
+ size_t unmapped_len = 0;
+
+- if (WARN_ON(!bo->is_mapped))
++ if (WARN_ON(!mapping->active))
+ return;
+
+- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
++ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
++ mapping->mmu->as, iova, len);
+
+ while (unmapped_len < len) {
+ size_t unmapped_page;
+@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_
+ unmapped_len += pgsize;
+ }
+
+- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
+- bo->is_mapped = false;
++ panfrost_mmu_flush_range(pfdev, mapping->mmu,
++ mapping->mmnode.start << PAGE_SHIFT, len);
++ mapping->active = false;
+ }
+
+ static void mmu_tlb_inv_context_s1(void *cookie)
+@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct pa
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+ }
+
+-static struct panfrost_gem_object *
+-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
++static struct panfrost_gem_mapping *
++addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
+ {
+- struct panfrost_gem_object *bo = NULL;
++ struct panfrost_gem_mapping *mapping = NULL;
+ struct panfrost_file_priv *priv;
+ struct drm_mm_node *node;
+ u64 offset = addr >> PAGE_SHIFT;
+@@ -418,8 +423,9 @@ found_mmu:
+ drm_mm_for_each_node(node, &priv->mm) {
+ if (offset >= node->start &&
+ offset < (node->start + node->size)) {
+- bo = drm_mm_node_to_panfrost_bo(node);
+- drm_gem_object_get(&bo->base.base);
++ mapping = drm_mm_node_to_panfrost_mapping(node);
++
++ kref_get(&mapping->refcount);
+ break;
+ }
+ }
+@@ -427,7 +433,7 @@ found_mmu:
+ spin_unlock(&priv->mm_lock);
+ out:
+ spin_unlock(&pfdev->as_lock);
+- return bo;
++ return mapping;
+ }
+
+ #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
+@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(s
+ u64 addr)
+ {
+ int ret, i;
++ struct panfrost_gem_mapping *bomapping;
+ struct panfrost_gem_object *bo;
+ struct address_space *mapping;
+ pgoff_t page_offset;
+ struct sg_table *sgt;
+ struct page **pages;
+
+- bo = addr_to_drm_mm_node(pfdev, as, addr);
+- if (!bo)
++ bomapping = addr_to_mapping(pfdev, as, addr);
++ if (!bomapping)
+ return -ENOENT;
+
++ bo = bomapping->obj;
+ if (!bo->is_heap) {
+ dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+- bo->node.start << PAGE_SHIFT);
++ bomapping->mmnode.start << PAGE_SHIFT);
+ ret = -EINVAL;
+ goto err_bo;
+ }
+- WARN_ON(bo->mmu->as != as);
++ WARN_ON(bomapping->mmu->as != as);
+
+ /* Assume 2MB alignment and size multiple */
+ addr &= ~((u64)SZ_2M - 1);
+ page_offset = addr >> PAGE_SHIFT;
+- page_offset -= bo->node.start;
++ page_offset -= bomapping->mmnode.start;
+
+ mutex_lock(&bo->base.pages_lock);
+
+@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(s
+ goto err_map;
+ }
+
+- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
++ mmu_map_sg(pfdev, bomapping->mmu, addr,
++ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+
+- bo->is_mapped = true;
++ bomapping->active = true;
+
+ dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+
+- drm_gem_object_put_unlocked(&bo->base.base);
++ panfrost_gem_mapping_put(bomapping);
+
+ return 0;
+
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
+@@ -4,12 +4,12 @@
+ #ifndef __PANFROST_MMU_H__
+ #define __PANFROST_MMU_H__
+
+-struct panfrost_gem_object;
++struct panfrost_gem_mapping;
+ struct panfrost_file_priv;
+ struct panfrost_mmu;
+
+-int panfrost_mmu_map(struct panfrost_gem_object *bo);
+-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
++int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
++void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
+
+ int panfrost_mmu_init(struct panfrost_device *pfdev);
+ void panfrost_mmu_fini(struct panfrost_device *pfdev);
+--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+@@ -25,7 +25,7 @@
+ #define V4_SHADERS_PER_COREGROUP 4
+
+ struct panfrost_perfcnt {
+- struct panfrost_gem_object *bo;
++ struct panfrost_gem_mapping *mapping;
+ size_t bosize;
+ void *buf;
+ struct panfrost_file_priv *user;
+@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(
+ int ret;
+
+ reinit_completion(&pfdev->perfcnt->dump_comp);
+- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
++ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
+ gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
+ gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
+ gpu_write(pfdev, GPU_INT_CLEAR,
+@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locke
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+- perfcnt->bo = to_panfrost_bo(&bo->base);
+-
+ /* Map the perfcnt buf in the address space attached to file_priv. */
+- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
++ ret = panfrost_gem_open(&bo->base, file_priv);
+ if (ret)
+ goto err_put_bo;
+
++ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
++ user);
++ if (!perfcnt->mapping) {
++ ret = -EINVAL;
++ goto err_close_bo;
++ }
++
+ perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
+ if (IS_ERR(perfcnt->buf)) {
+ ret = PTR_ERR(perfcnt->buf);
+- goto err_close_bo;
++ goto err_put_mapping;
+ }
+
+ /*
+@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locke
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
+ gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+
++ /* The BO ref is retained by the mapping. */
++ drm_gem_object_put_unlocked(&bo->base);
++
+ return 0;
+
+ err_vunmap:
+- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
++ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
++err_put_mapping:
++ panfrost_gem_mapping_put(perfcnt->mapping);
+ err_close_bo:
+- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
++ panfrost_gem_close(&bo->base, file_priv);
+ err_put_bo:
+ drm_gem_object_put_unlocked(&bo->base);
+ return ret;
+@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_lock
+ GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
+
+ perfcnt->user = NULL;
+- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
++ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
+ perfcnt->buf = NULL;
+- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
+- perfcnt->bo = NULL;
++ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
++ panfrost_gem_mapping_put(perfcnt->mapping);
++ perfcnt->mapping = NULL;
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
--- /dev/null
+From cf3ca1877574a306c0207cbf7fdf25419d9229df Mon Sep 17 00:00:00 2001
+From: Luuk Paulussen <luuk.paulussen@alliedtelesis.co.nz>
+Date: Fri, 6 Dec 2019 12:16:59 +1300
+Subject: hwmon: (adt7475) Make volt2reg return same reg as reg2volt input
+
+From: Luuk Paulussen <luuk.paulussen@alliedtelesis.co.nz>
+
+commit cf3ca1877574a306c0207cbf7fdf25419d9229df upstream.
+
+reg2volt returns the voltage that matches a given register value.
+Converting this back the other way with volt2reg didn't return the same
+register value because it used truncation instead of rounding.
+
+This meant that values read from sysfs could not be written back to sysfs
+to set back the same register value.
+
+With this change, volt2reg will return the same value for every voltage
+previously returned by reg2volt (for the set of possible input values)
+
+Signed-off-by: Luuk Paulussen <luuk.paulussen@alliedtelesis.co.nz>
+Link: https://lore.kernel.org/r/20191205231659.1301-1-luuk.paulussen@alliedtelesis.co.nz
+cc: stable@vger.kernel.org
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/adt7475.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel,
+ long reg;
+
+ if (bypass_attn & (1 << channel))
+- reg = (volt * 1024) / 2250;
++ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
+ else
+- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
++ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
++ (r[0] + r[1]) * 2250);
+ return clamp_val(reg, 0, 1023) & (0xff << 2);
+ }
+
--- /dev/null
+From 3bf8bdcf3bada771eb12b57f2a30caee69e8ab8d Mon Sep 17 00:00:00 2001
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Thu, 16 Jan 2020 10:44:17 -0800
+Subject: hwmon: (core) Do not use device managed functions for memory allocations
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+commit 3bf8bdcf3bada771eb12b57f2a30caee69e8ab8d upstream.
+
+The hwmon core uses device managed functions, tied to the hwmon parent
+device, for various internal memory allocations. This is problematic
+since hwmon device lifetime does not necessarily match its parent's
+device lifetime. If there is a mismatch, memory leaks will accumulate
+until the parent device is released.
+
+Fix the problem by managing all memory allocations internally. The only
+exception is memory allocation for thermal device registration, which
+can be tied to the hwmon device, along with thermal device registration
+itself.
+
+Fixes: d560168b5d0f ("hwmon: (core) New hwmon registration API")
+Cc: stable@vger.kernel.org # v4.14.x: 47c332deb8e8: hwmon: Deal with errors from the thermal subsystem
+Cc: stable@vger.kernel.org # v4.14.x: 74e3512731bd: hwmon: (core) Fix double-free in __hwmon_device_register()
+Cc: stable@vger.kernel.org # v4.9.x: 3a412d5e4a1c: hwmon: (core) Simplify sysfs attribute name allocation
+Cc: stable@vger.kernel.org # v4.9.x: 47c332deb8e8: hwmon: Deal with errors from the thermal subsystem
+Cc: stable@vger.kernel.org # v4.9.x: 74e3512731bd: hwmon: (core) Fix double-free in __hwmon_device_register()
+Cc: stable@vger.kernel.org # v4.9+
+Cc: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/hwmon.c | 68 ++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 41 insertions(+), 27 deletions(-)
+
+--- a/drivers/hwmon/hwmon.c
++++ b/drivers/hwmon/hwmon.c
+@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
+
+ #define to_hwmon_attr(d) \
+ container_of(d, struct hwmon_device_attribute, dev_attr)
++#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
+
+ /*
+ * Thermal zone information
+@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
+ * also provides the sensor index.
+ */
+ struct hwmon_thermal_data {
+- struct hwmon_device *hwdev; /* Reference to hwmon device */
++ struct device *dev; /* Reference to hwmon device */
+ int index; /* sensor index */
+ };
+
+@@ -95,9 +96,27 @@ static const struct attribute_group *hwm
+ NULL
+ };
+
++static void hwmon_free_attrs(struct attribute **attrs)
++{
++ int i;
++
++ for (i = 0; attrs[i]; i++) {
++ struct device_attribute *dattr = to_dev_attr(attrs[i]);
++ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
++
++ kfree(hattr);
++ }
++ kfree(attrs);
++}
++
+ static void hwmon_dev_release(struct device *dev)
+ {
+- kfree(to_hwmon_device(dev));
++ struct hwmon_device *hwdev = to_hwmon_device(dev);
++
++ if (hwdev->group.attrs)
++ hwmon_free_attrs(hwdev->group.attrs);
++ kfree(hwdev->groups);
++ kfree(hwdev);
+ }
+
+ static struct class hwmon_class = {
+@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
+ static int hwmon_thermal_get_temp(void *data, int *temp)
+ {
+ struct hwmon_thermal_data *tdata = data;
+- struct hwmon_device *hwdev = tdata->hwdev;
++ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
+ int ret;
+ long t;
+
+- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
++ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
+ tdata->index, &t);
+ if (ret < 0)
+ return ret;
+@@ -137,8 +156,7 @@ static const struct thermal_zone_of_devi
+ .get_temp = hwmon_thermal_get_temp,
+ };
+
+-static int hwmon_thermal_add_sensor(struct device *dev,
+- struct hwmon_device *hwdev, int index)
++static int hwmon_thermal_add_sensor(struct device *dev, int index)
+ {
+ struct hwmon_thermal_data *tdata;
+ struct thermal_zone_device *tzd;
+@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(stru
+ if (!tdata)
+ return -ENOMEM;
+
+- tdata->hwdev = hwdev;
++ tdata->dev = dev;
+ tdata->index = index;
+
+- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
++ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
+ &hwmon_thermal_ops);
+ /*
+ * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(stru
+ return 0;
+ }
+ #else
+-static int hwmon_thermal_add_sensor(struct device *dev,
+- struct hwmon_device *hwdev, int index)
++static int hwmon_thermal_add_sensor(struct device *dev, int index)
+ {
+ return 0;
+ }
+@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_se
+ (type == hwmon_fan && attr == hwmon_fan_label);
+ }
+
+-static struct attribute *hwmon_genattr(struct device *dev,
+- const void *drvdata,
++static struct attribute *hwmon_genattr(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr,
+ int index,
+@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(s
+ if ((mode & 0222) && !ops->write)
+ return ERR_PTR(-EINVAL);
+
+- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
++ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
+ if (!hattr)
+ return ERR_PTR(-ENOMEM);
+
+@@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const
+ return n;
+ }
+
+-static int hwmon_genattrs(struct device *dev,
+- const void *drvdata,
++static int hwmon_genattrs(const void *drvdata,
+ struct attribute **attrs,
+ const struct hwmon_ops *ops,
+ const struct hwmon_channel_info *info)
+@@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device
+ attr_mask &= ~BIT(attr);
+ if (attr >= template_size)
+ return -EINVAL;
+- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
++ a = hwmon_genattr(drvdata, info->type, attr, i,
+ templates[attr], ops);
+ if (IS_ERR(a)) {
+ if (PTR_ERR(a) != -ENOENT)
+@@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device
+ }
+
+ static struct attribute **
+-__hwmon_create_attrs(struct device *dev, const void *drvdata,
+- const struct hwmon_chip_info *chip)
++__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
+ {
+ int ret, i, aindex = 0, nattrs = 0;
+ struct attribute **attrs;
+@@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev,
+ if (nattrs == 0)
+ return ERR_PTR(-EINVAL);
+
+- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
++ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; chip->info[i]; i++) {
+- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
++ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
+ chip->info[i]);
+- if (ret < 0)
++ if (ret < 0) {
++ hwmon_free_attrs(attrs);
+ return ERR_PTR(ret);
++ }
+ aindex += ret;
+ }
+
+@@ -595,14 +611,13 @@ __hwmon_device_register(struct device *d
+ for (i = 0; groups[i]; i++)
+ ngroups++;
+
+- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
+- GFP_KERNEL);
++ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
+ if (!hwdev->groups) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
+
+- attrs = __hwmon_create_attrs(dev, drvdata, chip);
++ attrs = __hwmon_create_attrs(drvdata, chip);
+ if (IS_ERR(attrs)) {
+ err = PTR_ERR(attrs);
+ goto free_hwmon;
+@@ -647,8 +662,7 @@ __hwmon_device_register(struct device *d
+ hwmon_temp_input, j))
+ continue;
+ if (info[i]->config[j] & HWMON_T_INPUT) {
+- err = hwmon_thermal_add_sensor(dev,
+- hwdev, j);
++ err = hwmon_thermal_add_sensor(hdev, j);
+ if (err) {
+ device_unregister(hdev);
+ /*
+@@ -667,7 +681,7 @@ __hwmon_device_register(struct device *d
+ return hdev;
+
+ free_hwmon:
+- kfree(hwdev);
++ hwmon_dev_release(hdev);
+ ida_remove:
+ ida_simple_remove(&hwmon_ida, id);
+ return ERR_PTR(err);
--- /dev/null
+From ba9a103f40fc4a3ec7558ec9b0b97d4f92034249 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 13 Jan 2020 10:38:57 -0800
+Subject: Input: keyspan-remote - fix control-message timeouts
+
+From: Johan Hovold <johan@kernel.org>
+
+commit ba9a103f40fc4a3ec7558ec9b0b97d4f92034249 upstream.
+
+The driver was issuing synchronous uninterruptible control requests
+without using a timeout. This could lead to the driver hanging on probe
+due to a malfunctioning (or malicious) device until the device is
+physically disconnected. While sleeping in probe the driver prevents
+other devices connected to the same hub from being added to (or removed
+from) the bus.
+
+The USB upper limit of five seconds per request should be more than
+enough.
+
+Fixes: 99f83c9c9ac9 ("[PATCH] USB: add driver for Keyspan Digital Remote")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable <stable@vger.kernel.org> # 2.6.13
+Link: https://lore.kernel.org/r/20200113171715.30621-1-johan@kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/misc/keyspan_remote.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/input/misc/keyspan_remote.c
++++ b/drivers/input/misc/keyspan_remote.c
+@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_devi
+ int retval = 0;
+
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
++ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
+ if (retval) {
+ dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
+ __func__, retval);
+@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_devi
+ }
+
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
++ 0x44, 0x40, 0x0, 0x0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
+ if (retval) {
+ dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
+ __func__, retval);
+@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_devi
+ }
+
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
++ 0x22, 0x40, 0x0, 0x0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
+ if (retval) {
+ dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
+ __func__, retval);
--- /dev/null
+From bf708cfb2f4811d1948a88c41ab96587e84ad344 Mon Sep 17 00:00:00 2001
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+Date: Tue, 21 Jan 2020 17:34:26 -0700
+Subject: iommu/vt-d: Call __dmar_remove_one_dev_info with valid pointer
+
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+
+commit bf708cfb2f4811d1948a88c41ab96587e84ad344 upstream.
+
+It is possible for archdata.iommu to be set to
+DEFER_DEVICE_DOMAIN_INFO or DUMMY_DEVICE_DOMAIN_INFO so check for
+those values before calling __dmar_remove_one_dev_info. Without a
+check it can result in a null pointer dereference. This has been seen
+while booting a kdump kernel on an HP dl380 gen9.
+
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Lu Baolu <baolu.lu@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: stable@vger.kernel.org # 5.3+
+Cc: linux-kernel@vger.kernel.org
+Fixes: ae23bfb68f28 ("iommu/vt-d: Detach domain before using a private one")
+Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -5132,7 +5132,8 @@ static void dmar_remove_one_dev_info(str
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+- if (info)
++ if (info && info != DEFER_DEVICE_DOMAIN_INFO
++ && info != DUMMY_DEVICE_DOMAIN_INFO)
+ __dmar_remove_one_dev_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ }
--- /dev/null
+From d829229e35f302fd49c052b5c5906c90ecf9911d Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Tue, 3 Dec 2019 10:08:49 +0200
+Subject: iwlwifi: mvm: don't send the IWL_MVM_RXQ_NSSN_SYNC notif to Rx queues
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit d829229e35f302fd49c052b5c5906c90ecf9911d upstream.
+
+The purpose of this was to keep all the queues updated with
+the Rx sequence numbers because unlikely yet possible
+situations where queues can't understand if a specific
+packet needs to be dropped or not.
+
+Unfortunately, it was reported that this caused issues in
+our DMA engine. We don't fully understand how this is related,
+but this is being currently debugged. For now, just don't send
+this notification to the Rx queues. This de-facto reverts my
+commit 3c514bf831ac12356b695ff054bef641b9e99593:
+
+iwlwifi: mvm: add a loose synchronization of the NSSN across Rx queues
+
+This issue was reported here:
+https://bugzilla.kernel.org/show_bug.cgi?id=204873
+https://bugzilla.kernel.org/show_bug.cgi?id=205001
+and others maybe.
+
+Fixes: 3c514bf831ac ("iwlwifi: mvm: add a loose synchronization of the NSSN across Rx queues")
+CC: <stable@vger.kernel.org> # 5.3+
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 +
+ drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 17 ++++++++++-------
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+@@ -154,5 +154,6 @@
+ #define IWL_MVM_D3_DEBUG false
+ #define IWL_MVM_USE_TWT false
+ #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
++#define IWL_MVM_USE_NSSN_SYNC 0
+
+ #endif /* __MVM_CONSTANTS_H */
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1,
+
+ static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
+ {
+- struct iwl_mvm_rss_sync_notif notif = {
+- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+- .metadata.sync = 0,
+- .nssn_sync.baid = baid,
+- .nssn_sync.nssn = nssn,
+- };
++ if (IWL_MVM_USE_NSSN_SYNC) {
++ struct iwl_mvm_rss_sync_notif notif = {
++ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
++ .metadata.sync = 0,
++ .nssn_sync.baid = baid,
++ .nssn_sync.nssn = nssn,
++ };
+
+- iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
++ iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if,
++ sizeof(notif));
++ }
+ }
+
+ #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
--- /dev/null
+From 2a187d03352086e300daa2044051db00044cd171 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= <mirq-linux@rere.qmqm.pl>
+Date: Wed, 15 Jan 2020 10:54:35 +0100
+Subject: mmc: sdhci: fix minimum clock rate for v3 controller
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+
+commit 2a187d03352086e300daa2044051db00044cd171 upstream.
+
+For SDHCIv3+ with programmable clock mode, minimal clock frequency is
+still base clock / max(divider). Minimal programmable clock frequency is
+always greater than minimal divided clock frequency. Without this patch,
+SDHCI uses out-of-spec initial frequency when multiplier is big enough:
+
+mmc1: mmc_rescan_try_freq: trying to init card at 468750 Hz
+[for 480 MHz source clock divided by 1024]
+
+The code in sdhci_calc_clk() already chooses a correct SDCLK clock mode.
+
+Fixes: c3ed3877625f ("mmc: sdhci: add support for programmable clock mode")
+Cc: <stable@vger.kernel.org> # 4f6aa3264af4: mmc: tegra: Only advertise UHS modes if IO regulator is present
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/ffb489519a446caffe7a0a05c4b9372bd52397bb.1579082031.git.mirq-linux@rere.qmqm.pl
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3902,11 +3902,13 @@ int sdhci_setup_host(struct sdhci_host *
+ if (host->ops->get_min_clock)
+ mmc->f_min = host->ops->get_min_clock(host);
+ else if (host->version >= SDHCI_SPEC_300) {
+- if (host->clk_mul) {
+- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
++ if (host->clk_mul)
+ max_clk = host->max_clk * host->clk_mul;
+- } else
+- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
++ /*
++ * Divided Clock Mode minimum clock rate is always less than
++ * Programmable Clock Mode minimum clock rate.
++ */
++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ } else
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+
--- /dev/null
+From 4d627c88546a697b07565dbb70d2f9f46a5ee76f Mon Sep 17 00:00:00 2001
+From: Faiz Abbas <faiz_abbas@ti.com>
+Date: Wed, 8 Jan 2020 20:02:59 +0530
+Subject: mmc: sdhci_am654: Remove Inverted Write Protect flag
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+commit 4d627c88546a697b07565dbb70d2f9f46a5ee76f upstream.
+
+The MMC/SD controllers on am65x and j721e don't in fact detect the write
+protect line as inverted. No issues were detected because of this
+because the sdwp line is not connected on any of the evms. Fix this by
+removing the flag.
+
+Fixes: 1accbced1c32 ("mmc: sdhci_am654: Add Support for 4 bit IP on J721E")
+Cc: stable@vger.kernel.org
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200108143301.1929-2-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci_am654.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -249,8 +249,7 @@ static struct sdhci_ops sdhci_am654_ops
+
+ static const struct sdhci_pltfm_data sdhci_am654_pdata = {
+ .ops = &sdhci_am654_ops,
+- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+
+@@ -272,8 +271,7 @@ static struct sdhci_ops sdhci_j721e_8bit
+
+ static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
+ .ops = &sdhci_j721e_8bit_ops,
+- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+
+@@ -295,8 +293,7 @@ static struct sdhci_ops sdhci_j721e_4bit
+
+ static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
+ .ops = &sdhci_j721e_4bit_ops,
+- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+
--- /dev/null
+From de31f6ab68a3f548d88686d53514f252d78f61d5 Mon Sep 17 00:00:00 2001
+From: Faiz Abbas <faiz_abbas@ti.com>
+Date: Wed, 8 Jan 2020 20:03:00 +0530
+Subject: mmc: sdhci_am654: Reset Command and Data line after tuning
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+commit de31f6ab68a3f548d88686d53514f252d78f61d5 upstream.
+
+The tuning data is leftover in the buffer after tuning. This can cause
+issues in future data commands, especially with CQHCI. Reset the command
+and data lines after tuning to continue from a clean state.
+
+Fixes: 41fd4caeb00b ("mmc: sdhci_am654: Add Initial Support for AM654 SDHCI driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200108143301.1929-3-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci_am654.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -236,6 +236,22 @@ static void sdhci_am654_write_b(struct s
+ writeb(val, host->ioaddr + reg);
+ }
+
++static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ int err = sdhci_execute_tuning(mmc, opcode);
++
++ if (err)
++ return err;
++ /*
++ * Tuning data remains in the buffer after tuning.
++ * Do a command and data reset to get rid of it
++ */
++ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++
++ return 0;
++}
++
+ static struct sdhci_ops sdhci_am654_ops = {
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+@@ -477,6 +493,8 @@ static int sdhci_am654_probe(struct plat
+ goto pm_runtime_put;
+ }
+
++ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
++
+ ret = sdhci_am654_init(host);
+ if (ret)
+ goto pm_runtime_put;
--- /dev/null
+From f571389c0b015e76f91c697c4c1700aba860d34f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= <mirq-linux@rere.qmqm.pl>
+Date: Tue, 7 Jan 2020 10:47:34 +0100
+Subject: mmc: tegra: fix SDR50 tuning override
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+
+commit f571389c0b015e76f91c697c4c1700aba860d34f upstream.
+
+Commit 7ad2ed1dfcbe inadvertently mixed up a quirk flag's name and
+broke SDR50 tuning override. Use correct NVQUIRK_ name.
+
+Fixes: 7ad2ed1dfcbe ("mmc: tegra: enable UHS-I modes")
+Cc: <stable@vger.kernel.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Reviewed-by: Thierry Reding <treding@nvidia.com>
+Tested-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
+Link: https://lore.kernel.org/r/9aff1d859935e59edd81e4939e40d6c55e0b55f6.1578390388.git.mirq-linux@rere.qmqm.pl
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-tegra.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdh
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
+- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
++ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
+ clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
+ }
+
--- /dev/null
+From 5e89cd303e3a4505752952259b9f1ba036632544 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 14 Jan 2020 17:09:28 -0600
+Subject: PCI: Mark AMD Navi14 GPU rev 0xc5 ATS as broken
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 5e89cd303e3a4505752952259b9f1ba036632544 upstream.
+
+To account for parts of the chip that are "harvested" (disabled) due to
+silicon flaws, caches on some AMD GPUs must be initialized before ATS is
+enabled.
+
+ATS is normally enabled by the IOMMU driver before the GPU driver loads, so
+this cache initialization would have to be done in a quirk, but that's too
+complex to be practical.
+
+For Navi14 (device ID 0x7340), this initialization is done by the VBIOS,
+but apparently some boards went to production with an older VBIOS that
+doesn't do it. Disable ATS for those boards.
+
+Link: https://lore.kernel.org/r/20200114205523.1054271-3-alexander.deucher@amd.com
+Bug: https://gitlab.freedesktop.org/drm/amd/issues/1015
+See-also: d28ca864c493 ("PCI: Mark AMD Stoney Radeon R7 GPU ATS as broken")
+See-also: 9b44b0b09dec ("PCI: Mark AMD Stoney GPU ATS as broken")
+[bhelgaas: squash into one patch, simplify slightly, commit log]
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/quirks.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5021,18 +5021,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SE
+
+ #ifdef CONFIG_PCI_ATS
+ /*
+- * Some devices have a broken ATS implementation causing IOMMU stalls.
+- * Don't use ATS for those devices.
++ * Some devices require additional driver setup to enable ATS. Don't use
++ * ATS for those devices as ATS will be enabled before the driver has had a
++ * chance to load and configure the device.
+ */
+-static void quirk_no_ats(struct pci_dev *pdev)
++static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ {
+- pci_info(pdev, "disabling ATS (broken on this device)\n");
++ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
++ return;
++
++ pci_info(pdev, "disabling ATS\n");
+ pdev->ats_cap = 0;
+ }
+
+ /* AMD Stoney platform GPU */
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
++/* AMD Iceland dGPU */
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
++/* AMD Navi14 dGPU */
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+
+ /* Freescale PCIe doesn't support MSI in RC mode */
--- /dev/null
+From 9608ea6c6613ced75b2c41703d99f44e6f8849f1 Mon Sep 17 00:00:00 2001
+From: Boyan Ding <boyan.j.ding@gmail.com>
+Date: Wed, 1 Jan 2020 12:41:20 -0800
+Subject: pinctrl: sunrisepoint: Add missing Interrupt Status register offset
+
+From: Boyan Ding <boyan.j.ding@gmail.com>
+
+commit 9608ea6c6613ced75b2c41703d99f44e6f8849f1 upstream.
+
+Commit 179e5a6114cc ("pinctrl: intel: Remove default Interrupt Status
+offset") removes default interrupt status offset of GPIO controllers,
+with previous commits explicitly providing the previously default
+offsets. However, the is_offset value in SPTH_COMMUNITY is missing,
+preventing related irq from being properly detected and handled.
+
+Fixes: f702e0b93cdb ("pinctrl: sunrisepoint: Provide Interrupt Status register offset")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=205745
+Cc: stable@vger.kernel.org
+Signed-off-by: Boyan Ding <boyan.j.ding@gmail.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-sunrisepoint.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
++++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+@@ -49,6 +49,7 @@
+ .padown_offset = SPT_PAD_OWN, \
+ .padcfglock_offset = SPT_PADCFGLOCK, \
+ .hostown_offset = SPT_HOSTSW_OWN, \
++ .is_offset = SPT_GPI_IS, \
+ .ie_offset = SPT_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
--- /dev/null
+From 18451f9f9e5810b8bd1245c5ae166f257e0e2b9d Mon Sep 17 00:00:00 2001
+From: Alexander Potapenko <glider@google.com>
+Date: Thu, 16 Jan 2020 12:09:34 +0100
+Subject: PM: hibernate: fix crashes with init_on_free=1
+
+From: Alexander Potapenko <glider@google.com>
+
+commit 18451f9f9e5810b8bd1245c5ae166f257e0e2b9d upstream.
+
+Upon resuming from hibernation, free pages may contain stale data from
+the kernel that initiated the resume. This breaks the invariant
+inflicted by init_on_free=1 that freed pages must be zeroed.
+
+To deal with this problem, make clear_free_pages() also clear the free
+pages when init_on_free is enabled.
+
+Fixes: 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options")
+Reported-by: Johannes Stezenbach <js@sig21.net>
+Signed-off-by: Alexander Potapenko <glider@google.com>
+Cc: 5.3+ <stable@vger.kernel.org> # 5.3+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/power/snapshot.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
+
+ void clear_free_pages(void)
+ {
+-#ifdef CONFIG_PAGE_POISONING_ZERO
+ struct memory_bitmap *bm = free_pages_map;
+ unsigned long pfn;
+
+ if (WARN_ON(!(free_pages_map)))
+ return;
+
+- memory_bm_position_reset(bm);
+- pfn = memory_bm_next_pfn(bm);
+- while (pfn != BM_END_OF_MAP) {
+- if (pfn_valid(pfn))
+- clear_highpage(pfn_to_page(pfn));
+-
++ if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
++ memory_bm_position_reset(bm);
+ pfn = memory_bm_next_pfn(bm);
++ while (pfn != BM_END_OF_MAP) {
++ if (pfn_valid(pfn))
++ clear_highpage(pfn_to_page(pfn));
++
++ pfn = memory_bm_next_pfn(bm);
++ }
++ memory_bm_position_reset(bm);
++ pr_info("free pages cleared after restore\n");
+ }
+- memory_bm_position_reset(bm);
+- pr_info("free pages cleared after restore\n");
+-#endif /* PAGE_POISONING_ZERO */
+ }
+
+ /**
--- /dev/null
+From 5d2e5dd5849b4ef5e8ec35e812cdb732c13cd27e Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Wed, 8 Jan 2020 11:14:22 +0530
+Subject: powerpc/mm/hash: Fix sharing context ids between kernel & userspace
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 5d2e5dd5849b4ef5e8ec35e812cdb732c13cd27e upstream.
+
+Commit 0034d395f89d ("powerpc/mm/hash64: Map all the kernel regions in
+the same 0xc range") has a bug in the definition of MIN_USER_CONTEXT.
+
+The result is that the context id used for the vmemmap and the lowest
+context id handed out to userspace are the same. The context id is
+essentially the process identifier as far as the first stage of the
+MMU translation is concerned.
+
+This can result in multiple SLB entries with the same VSID (Virtual
+Segment ID), accessible to the kernel and some random userspace
+process that happens to get the overlapping id, which is not expected
+eg:
+
+ 07 c00c000008000000 40066bdea7000500 1T ESID= c00c00 VSID= 66bdea7 LLP:100
+ 12 0002000008000000 40066bdea7000d80 1T ESID= 200 VSID= 66bdea7 LLP:100
+
+Even though the user process and the kernel use the same VSID, the
+permissions in the hash page table prevent the user process from
+reading or writing to any kernel mappings.
+
+It can also lead to SLB entries with different base page size
+encodings (LLP), eg:
+
+ 05 c00c000008000000 00006bde0053b500 256M ESID=c00c00000 VSID= 6bde0053b LLP:100
+ 09 0000000008000000 00006bde0053bc80 256M ESID= 0 VSID= 6bde0053b LLP: 0
+
+Such SLB entries can result in machine checks, eg. as seen on a G5:
+
+ Oops: Machine check, sig: 7 [#1]
+ BE PAGE SIZE=64K MU-Hash SMP NR_CPUS=4 NUMA Power Mac
+ NIP: c00000000026f248 LR: c000000000295e58 CTR: 0000000000000000
+ REGS: c0000000erfd3d70 TRAP: 0200 Tainted: G M (5.5.0-rcl-gcc-8.2.0-00010-g228b667d8ea1)
+ MSR: 9000000000109032 <SF,HV,EE,ME,IR,DR,RI> CR: 24282048 XER: 00000000
+ DAR: c00c000000612c80 DSISR: 00000400 IRQMASK: 0
+ ...
+ NIP [c00000000026f248] .kmem_cache_free+0x58/0x140
+ LR [c088000008295e58] .putname 8x88/0xa
+ Call Trace:
+ .putname+0xB8/0xa
+ .filename_lookup.part.76+0xbe/0x160
+ .do_faccessat+0xe0/0x380
+ system_call+0x5c/ex68
+
+This happens with 256MB segments and 64K pages, as the duplicate VSID
+is hit with the first vmemmap segment and the first user segment, and
+older 32-bit userspace maps things in the first user segment.
+
+On other CPUs a machine check is not seen. Instead the userspace
+process can get stuck continuously faulting, with the fault never
+properly serviced, due to the kernel not understanding that there is
+already a HPTE for the address but with inaccessible permissions.
+
+On machines with 1T segments we've not seen the bug hit other than by
+deliberately exercising it. That seems to be just a matter of luck
+though, due to the typical layout of the user virtual address space
+and the ranges of vmemmap that are typically populated.
+
+To fix it we add 2 to MIN_USER_CONTEXT. This ensures the lowest
+context given to userspace doesn't overlap with the VMEMMAP context,
+or with the context for INVALID_REGION_ID.
+
+Fixes: 0034d395f89d ("powerpc/mm/hash64: Map all the kernel regions in the same 0xc range")
+Cc: stable@vger.kernel.org # v5.2+
+Reported-by: Christian Marillat <marillat@debian.org>
+Reported-by: Romain Dolbeau <romain@dolbeau.org>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+[mpe: Account for INVALID_REGION_ID, mostly rewrite change log]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200123102547.11623-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/64/mmu-hash.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
++++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
+ *
+ */
+ #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
++
++// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
+ #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
+- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
++ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
++
+ /*
+ * For platforms that support on 65bit VA we limit the context bits
+ */
--- /dev/null
+From 17328f218fb760c9c6accc5b52494889243a6b98 Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.ibm.com>
+Date: Mon, 13 Jan 2020 14:01:18 +0100
+Subject: powerpc/xive: Discard ESB load value when interrupt is invalid
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Frederic Barrat <fbarrat@linux.ibm.com>
+
+commit 17328f218fb760c9c6accc5b52494889243a6b98 upstream.
+
+A load on an ESB page returning all 1's means that the underlying
+device has invalidated the access to the PQ state of the interrupt
+through mmio. It may happen, for example when querying a PHB interrupt
+while the PHB is in an error state.
+
+In that case, we should consider the interrupt to be invalid when
+checking its state in the irq_get_irqchip_state() handler.
+
+Fixes: da15c03b047d ("powerpc/xive: Implement get_irqchip_state method for XIVE to fix shutdown race")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com>
+[clg: wrote a commit log, introduced XIVE_ESB_INVALID ]
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200113130118.27969-1-clg@kaod.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/xive-regs.h | 1 +
+ arch/powerpc/sysdev/xive/common.c | 15 ++++++++++++---
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/include/asm/xive-regs.h
++++ b/arch/powerpc/include/asm/xive-regs.h
+@@ -39,6 +39,7 @@
+
+ #define XIVE_ESB_VAL_P 0x2
+ #define XIVE_ESB_VAL_Q 0x1
++#define XIVE_ESB_INVALID 0xFF
+
+ /*
+ * Thread Management (aka "TM") registers
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct
+ enum irqchip_irq_state which, bool *state)
+ {
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
++ u8 pq;
+
+ switch (which) {
+ case IRQCHIP_STATE_ACTIVE:
+- *state = !xd->stale_p &&
+- (xd->saved_p ||
+- !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
++ pq = xive_esb_read(xd, XIVE_ESB_GET);
++
++ /*
++ * The esb value being all 1's means we couldn't get
++ * the PQ state of the interrupt through mmio. It may
++ * happen, for example when querying a PHB interrupt
++ * while the PHB is in an error state. We consider the
++ * interrupt to be inactive in that case.
++ */
++ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
++ (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
+ return 0;
+ default:
+ return -EINVAL;
--- /dev/null
+From 8ff771f8c8d55d95f102cf88a970e541a8bd6bcf Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Date: Thu, 16 Jan 2020 20:12:27 -0800
+Subject: Revert "Input: synaptics-rmi4 - don't increment rmiaddr for SMBus transfers"
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+commit 8ff771f8c8d55d95f102cf88a970e541a8bd6bcf upstream.
+
+This reverts commit a284e11c371e446371675668d8c8120a27227339.
+
+This causes problems (drifting cursor) with at least the F11 function that
+reads more than 32 bytes.
+
+The real issue is in the F54 driver, and so this should be fixed there, and
+not in rmi_smbus.c.
+
+So first revert this bad commit, then fix the real problem in F54 in another
+patch.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Reported-by: Timo Kaufmann <timokau@zoho.com>
+Fixes: a284e11c371e ("Input: synaptics-rmi4 - don't increment rmiaddr for SMBus transfers")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200115124819.3191024-2-hverkuil-cisco@xs4all.nl
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_smbus.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/input/rmi4/rmi_smbus.c
++++ b/drivers/input/rmi4/rmi_smbus.c
+@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rm
+ /* prepare to write next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
++ rmiaddr += SMB_MAX_COUNT;
+ }
+ exit:
+ mutex_unlock(&rmi_smb->page_mutex);
+@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi
+ /* prepare to read next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
++ rmiaddr += SMB_MAX_COUNT;
+ }
+
+ retval = 0;
--- /dev/null
+From 73e08e711d9c1d79fae01daed4b0e1fee5f8a275 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 26 Jan 2020 09:53:12 -0700
+Subject: Revert "io_uring: only allow submit from owning task"
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 73e08e711d9c1d79fae01daed4b0e1fee5f8a275 upstream.
+
+This ends up being too restrictive for tasks that willingly fork and
+share the ring between forks. Andres reports that this breaks his
+postgresql work. Since we're close to 5.5 release, revert this change
+for now.
+
+Cc: stable@vger.kernel.org
+Fixes: 44d282796f81 ("io_uring: only allow submit from owning task")
+Reported-by: Andres Freund <andres@anarazel.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -3716,12 +3716,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned
+ wake_up(&ctx->sqo_wait);
+ submitted = to_submit;
+ } else if (to_submit) {
+- if (current->mm != ctx->sqo_mm ||
+- current_cred() != ctx->creds) {
+- ret = -EPERM;
+- goto out;
+- }
+-
+ to_submit = min(to_submit, ctx->sq_entries);
+
+ mutex_lock(&ctx->uring_lock);
--- /dev/null
+From 205608749e1ef394f513888091e613c5bfccbcca Mon Sep 17 00:00:00 2001
+From: Mehmet Akif Tasova <makiftasova@gmail.com>
+Date: Fri, 13 Dec 2019 23:35:10 +0300
+Subject: Revert "iwlwifi: mvm: fix scan config command size"
+
+From: Mehmet Akif Tasova <makiftasova@gmail.com>
+
+commit 205608749e1ef394f513888091e613c5bfccbcca upstream.
+
+Since v5.4-rc1 was released, iwlwifi started throwing errors when scan
+commands were sent to the firmware with certain devices (depending on
+the OTP burned in the device, which contains the list of available
+channels). For instance:
+
+iwlwifi 0000:00:14.3: FW error in SYNC CMD SCAN_CFG_CMD
+
+This bug was reported in the ArchLinux bug tracker:
+https://bugs.archlinux.org/task/64703
+
+And also in a specific case in bugzilla, when the lar_disabled option
+was set: https://bugzilla.kernel.org/show_bug.cgi?id=205193
+
+Revert the commit that introduced this error, by using the number of
+channels from the OTP instead of the number of channels that is
+specified in the FW TLV that tells us how many channels it supports.
+
+This reverts commit 06eb547c4ae4382e70d556ba213d13c95ca1801b.
+
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Mehmet Akif Tasova <makiftasova@gmail.com>
+[ Luca: reworded the commit message a bit. ]
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1220,7 +1220,7 @@ static int iwl_mvm_legacy_config_scan(st
+ cmd_size = sizeof(struct iwl_scan_config_v2);
+ else
+ cmd_size = sizeof(struct iwl_scan_config_v1);
+- cmd_size += num_channels;
++ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
+
+ cfg = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cfg)
net-mlx5e-ktls-remove-redundant-posts-in-tx-resync-flow.patch
net-mlx5e-ktls-do-not-send-decrypted-marked-skbs-via-non-accel-path.patch
ipv4-detect-rollover-in-specific-fib-table-dump.patch
+revert-io_uring-only-allow-submit-from-owning-task.patch
+afs-fix-characters-allowed-into-cell-names.patch
+hwmon-adt7475-make-volt2reg-return-same-reg-as-reg2volt-input.patch
+hwmon-core-do-not-use-device-managed-functions-for-memory-allocations.patch
+ceph-hold-extra-reference-to-r_parent-over-life-of-request.patch
+pci-mark-amd-navi14-gpu-rev-0xc5-ats-as-broken.patch
+drm-panfrost-add-the-panfrost_gem_mapping-concept.patch
+drm-i915-align-engine-uabi_class-instance-with-i915_drm.h.patch
+pm-hibernate-fix-crashes-with-init_on_free-1.patch
+tracing-trigger-replace-unneeded-rcu-list-traversals.patch
+tracing-uprobe-fix-double-perf_event-linking-on-multiprobe-uprobe.patch
+tracing-do-not-set-trace-clock-if-tracefs-lockdown-is-in-effect.patch
+tracing-fix-histogram-code-when-expression-has-same-var-as-value.patch
+powerpc-mm-hash-fix-sharing-context-ids-between-kernel-userspace.patch
+powerpc-xive-discard-esb-load-value-when-interrupt-is-invalid.patch
+revert-iwlwifi-mvm-fix-scan-config-command-size.patch
+iwlwifi-mvm-don-t-send-the-iwl_mvm_rxq_nssn_sync-notif-to-rx-queues.patch
+xarray-fix-infinite-loop-with-entry-at-ulong_max.patch
+xarray-fix-xa_find_after-with-multi-index-entries.patch
+xarray-fix-xas_find-returning-too-many-entries.patch
+pinctrl-sunrisepoint-add-missing-interrupt-status-register-offset.patch
+iommu-vt-d-call-__dmar_remove_one_dev_info-with-valid-pointer.patch
+input-keyspan-remote-fix-control-message-timeouts.patch
+revert-input-synaptics-rmi4-don-t-increment-rmiaddr-for-smbus-transfers.patch
+arm-8950-1-ftrace-recordmcount-filter-relocation-types.patch
+mmc-tegra-fix-sdr50-tuning-override.patch
+mmc-sdhci-fix-minimum-clock-rate-for-v3-controller.patch
+mmc-sdhci_am654-remove-inverted-write-protect-flag.patch
+mmc-sdhci_am654-reset-command-and-data-line-after-tuning.patch
--- /dev/null
+From bf24daac8f2bd5b8affaec03c2be1d20bcdd6837 Mon Sep 17 00:00:00 2001
+From: Masami Ichikawa <masami256@gmail.com>
+Date: Thu, 16 Jan 2020 22:12:36 +0900
+Subject: tracing: Do not set trace clock if tracefs lockdown is in effect
+
+From: Masami Ichikawa <masami256@gmail.com>
+
+commit bf24daac8f2bd5b8affaec03c2be1d20bcdd6837 upstream.
+
+When trace_clock option is not set and unstable clcok detected,
+tracing_set_default_clock() sets trace_clock(ThinkPad A285 is one of
+case). In that case, if lockdown is in effect, null pointer
+dereference error happens in ring_buffer_set_clock().
+
+Link: http://lkml.kernel.org/r/20200116131236.3866925-1-masami256@gmail.com
+
+Cc: stable@vger.kernel.org
+Fixes: 17911ff38aa58 ("tracing: Add locked_down checks to the open calls of files created for tracefs")
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1788488
+Signed-off-by: Masami Ichikawa <masami256@gmail.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9270,6 +9270,11 @@ __init static int tracing_set_default_cl
+ {
+ /* sched_clock_stable() is determined in late_initcall */
+ if (!trace_boot_clock && !sched_clock_stable()) {
++ if (security_locked_down(LOCKDOWN_TRACEFS)) {
++ pr_warn("Can not set tracing clock due to lockdown\n");
++ return -EPERM;
++ }
++
+ printk(KERN_WARNING
+ "Unstable clock detected, switching default tracing clock to \"global\"\n"
+ "If you want to keep using the local clock, then add:\n"
--- /dev/null
+From 8bcebc77e85f3d7536f96845a0fe94b1dddb6af0 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 20 Jan 2020 13:07:31 -0500
+Subject: tracing: Fix histogram code when expression has same var as value
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 8bcebc77e85f3d7536f96845a0fe94b1dddb6af0 upstream.
+
+While working on a tool to convert SQL syntex into the histogram language of
+the kernel, I discovered the following bug:
+
+ # echo 'first u64 start_time u64 end_time pid_t pid u64 delta' >> synthetic_events
+ # echo 'hist:keys=pid:start=common_timestamp' > events/sched/sched_waking/trigger
+ # echo 'hist:keys=next_pid:delta=common_timestamp-$start,start2=$start:onmatch(sched.sched_waking).trace(first,$start2,common_timestamp,next_pid,$delta)' > events/sched/sched_switch/trigger
+
+Would not display any histograms in the sched_switch histogram side.
+
+But if I were to swap the location of
+
+ "delta=common_timestamp-$start" with "start2=$start"
+
+Such that the last line had:
+
+ # echo 'hist:keys=next_pid:start2=$start,delta=common_timestamp-$start:onmatch(sched.sched_waking).trace(first,$start2,common_timestamp,next_pid,$delta)' > events/sched/sched_switch/trigger
+
+The histogram works as expected.
+
+What I found out is that the expressions clear out the value once it is
+resolved. As the variables are resolved in the order listed, when
+processing:
+
+ delta=common_timestamp-$start
+
+The $start is cleared. When it gets to "start2=$start", it errors out with
+"unresolved symbol" (which is silent as this happens at the location of the
+trace), and the histogram is dropped.
+
+When processing the histogram for variable references, instead of adding a
+new reference for a variable used twice, use the same reference. That way,
+not only is it more efficient, but the order will no longer matter in
+processing of the variables.
+
+From Tom Zanussi:
+
+ "Just to clarify some more about what the problem was is that without
+ your patch, we would have two separate references to the same variable,
+ and during resolve_var_refs(), they'd both want to be resolved
+ separately, so in this case, since the first reference to start wasn't
+ part of an expression, it wouldn't get the read-once flag set, so would
+ be read normally, and then the second reference would do the read-once
+ read and also be read but using read-once. So everything worked and
+ you didn't see a problem:
+
+ from: start2=$start,delta=common_timestamp-$start
+
+ In the second case, when you switched them around, the first reference
+ would be resolved by doing the read-once, and following that the second
+ reference would try to resolve and see that the variable had already
+ been read, so failed as unset, which caused it to short-circuit out and
+ not do the trigger action to generate the synthetic event:
+
+ to: delta=common_timestamp-$start,start2=$start
+
+ With your patch, we only have the single resolution which happens
+ correctly the one time it's resolved, so this can't happen."
+
+Link: https://lore.kernel.org/r/20200116154216.58ca08eb@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 067fe038e70f6 ("tracing: Add variable reference handling to hist triggers")
+Reviewed-by: Tom Zanuss <zanussi@kernel.org>
+Tested-by: Tom Zanussi <zanussi@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -116,6 +116,7 @@ struct hist_field {
+ struct ftrace_event_field *field;
+ unsigned long flags;
+ hist_field_fn_t fn;
++ unsigned int ref;
+ unsigned int size;
+ unsigned int offset;
+ unsigned int is_signed;
+@@ -2427,8 +2428,16 @@ static int contains_operator(char *str)
+ return field_op;
+ }
+
++static void get_hist_field(struct hist_field *hist_field)
++{
++ hist_field->ref++;
++}
++
+ static void __destroy_hist_field(struct hist_field *hist_field)
+ {
++ if (--hist_field->ref > 1)
++ return;
++
+ kfree(hist_field->var.name);
+ kfree(hist_field->name);
+ kfree(hist_field->type);
+@@ -2470,6 +2479,8 @@ static struct hist_field *create_hist_fi
+ if (!hist_field)
+ return NULL;
+
++ hist_field->ref = 1;
++
+ hist_field->hist_data = hist_data;
+
+ if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
+@@ -2665,6 +2676,17 @@ static struct hist_field *create_var_ref
+ {
+ unsigned long flags = HIST_FIELD_FL_VAR_REF;
+ struct hist_field *ref_field;
++ int i;
++
++ /* Check if the variable already exists */
++ for (i = 0; i < hist_data->n_var_refs; i++) {
++ ref_field = hist_data->var_refs[i];
++ if (ref_field->var.idx == var_field->var.idx &&
++ ref_field->var.hist_data == var_field->hist_data) {
++ get_hist_field(ref_field);
++ return ref_field;
++ }
++ }
+
+ ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
+ if (ref_field) {
--- /dev/null
+From aeed8aa3874dc15b9d82a6fe796fd7cfbb684448 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 20 Dec 2019 11:31:43 +0900
+Subject: tracing: trigger: Replace unneeded RCU-list traversals
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit aeed8aa3874dc15b9d82a6fe796fd7cfbb684448 upstream.
+
+With CONFIG_PROVE_RCU_LIST, I had many suspicious RCU warnings
+when I ran ftracetest trigger testcases.
+
+-----
+ # dmesg -c > /dev/null
+ # ./ftracetest test.d/trigger
+ ...
+ # dmesg | grep "RCU-list traversed" | cut -f 2 -d ] | cut -f 2 -d " "
+ kernel/trace/trace_events_hist.c:6070
+ kernel/trace/trace_events_hist.c:1760
+ kernel/trace/trace_events_hist.c:5911
+ kernel/trace/trace_events_trigger.c:504
+ kernel/trace/trace_events_hist.c:1810
+ kernel/trace/trace_events_hist.c:3158
+ kernel/trace/trace_events_hist.c:3105
+ kernel/trace/trace_events_hist.c:5518
+ kernel/trace/trace_events_hist.c:5998
+ kernel/trace/trace_events_hist.c:6019
+ kernel/trace/trace_events_hist.c:6044
+ kernel/trace/trace_events_trigger.c:1500
+ kernel/trace/trace_events_trigger.c:1540
+ kernel/trace/trace_events_trigger.c:539
+ kernel/trace/trace_events_trigger.c:584
+-----
+
+I investigated those warnings and found that the RCU-list
+traversals in event trigger and hist didn't need to use
+RCU version because those were called only under event_mutex.
+
+I also checked other RCU-list traversals related to event
+trigger list, and found that most of them were called from
+event_hist_trigger_func() or hist_unregister_trigger() or
+register/unregister functions except for a few cases.
+
+Replace these unneeded RCU-list traversals with normal list
+traversal macro and lockdep_assert_held() to check the
+event_mutex is held.
+
+Link: http://lkml.kernel.org/r/157680910305.11685.15110237954275915782.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: 30350d65ac567 ("tracing: Add variable support to hist triggers")
+Reviewed-by: Tom Zanussi <zanussi@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 41 ++++++++++++++++++++++++++----------
+ kernel/trace/trace_events_trigger.c | 20 +++++++++++++----
+ 2 files changed, 45 insertions(+), 16 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1766,11 +1766,13 @@ static struct hist_field *find_var(struc
+ struct event_trigger_data *test;
+ struct hist_field *hist_field;
+
++ lockdep_assert_held(&event_mutex);
++
+ hist_field = find_var_field(hist_data, var_name);
+ if (hist_field)
+ return hist_field;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ test_data = test->private_data;
+ hist_field = find_var_field(test_data, var_name);
+@@ -1820,7 +1822,9 @@ static struct hist_field *find_file_var(
+ struct event_trigger_data *test;
+ struct hist_field *hist_field;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ test_data = test->private_data;
+ hist_field = find_var_field(test_data, var_name);
+@@ -3115,7 +3119,9 @@ static char *find_trigger_filter(struct
+ {
+ struct event_trigger_data *test;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (test->private_data == hist_data)
+ return test->filter_str;
+@@ -3166,9 +3172,11 @@ find_compatible_hist(struct hist_trigger
+ struct event_trigger_data *test;
+ unsigned int n_keys;
+
++ lockdep_assert_held(&event_mutex);
++
+ n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ hist_data = test->private_data;
+
+@@ -5528,7 +5536,7 @@ static int hist_show(struct seq_file *m,
+ goto out_unlock;
+ }
+
+- list_for_each_entry_rcu(data, &event_file->triggers, list) {
++ list_for_each_entry(data, &event_file->triggers, list) {
+ if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
+ hist_trigger_show(m, data, n++);
+ }
+@@ -5921,7 +5929,9 @@ static int hist_register_trigger(char *g
+ if (hist_data->attrs->name && !named_data)
+ goto new;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, test, named_data, false))
+ continue;
+@@ -6005,10 +6015,12 @@ static bool have_hist_trigger_match(stru
+ struct event_trigger_data *test, *named_data = NULL;
+ bool match = false;
+
++ lockdep_assert_held(&event_mutex);
++
+ if (hist_data->attrs->name)
+ named_data = find_named_trigger(hist_data->attrs->name);
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (hist_trigger_match(data, test, named_data, false)) {
+ match = true;
+@@ -6026,10 +6038,12 @@ static bool hist_trigger_check_refs(stru
+ struct hist_trigger_data *hist_data = data->private_data;
+ struct event_trigger_data *test, *named_data = NULL;
+
++ lockdep_assert_held(&event_mutex);
++
+ if (hist_data->attrs->name)
+ named_data = find_named_trigger(hist_data->attrs->name);
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, test, named_data, false))
+ continue;
+@@ -6051,10 +6065,12 @@ static void hist_unregister_trigger(char
+ struct event_trigger_data *test, *named_data = NULL;
+ bool unregistered = false;
+
++ lockdep_assert_held(&event_mutex);
++
+ if (hist_data->attrs->name)
+ named_data = find_named_trigger(hist_data->attrs->name);
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (!hist_trigger_match(data, test, named_data, false))
+ continue;
+@@ -6080,7 +6096,9 @@ static bool hist_file_check_refs(struct
+ struct hist_trigger_data *hist_data;
+ struct event_trigger_data *test;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ hist_data = test->private_data;
+ if (check_var_refs(hist_data))
+@@ -6323,7 +6341,8 @@ hist_enable_trigger(struct event_trigger
+ struct enable_trigger_data *enable_data = data->private_data;
+ struct event_trigger_data *test;
+
+- list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
++ list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
++ lockdep_is_held(&event_mutex)) {
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+ if (enable_data->enable)
+ test->paused = false;
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event
+ struct event_trigger_data *data;
+ bool set_cond = false;
+
+- list_for_each_entry_rcu(data, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(data, &file->triggers, list) {
+ if (data->filter || event_command_post_trigger(data->cmd_ops) ||
+ event_command_needs_rec(data->cmd_ops)) {
+ set_cond = true;
+@@ -536,7 +538,9 @@ static int register_trigger(char *glob,
+ struct event_trigger_data *test;
+ int ret = 0;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(test, &file->triggers, list) {
+ if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
+ ret = -EEXIST;
+ goto out;
+@@ -581,7 +585,9 @@ static void unregister_trigger(char *glo
+ struct event_trigger_data *data;
+ bool unregistered = false;
+
+- list_for_each_entry_rcu(data, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(data, &file->triggers, list) {
+ if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
+ unregistered = true;
+ list_del_rcu(&data->list);
+@@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *
+ struct event_trigger_data *test;
+ int ret = 0;
+
+- list_for_each_entry_rcu(test, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(test, &file->triggers, list) {
+ test_enable_data = test->private_data;
+ if (test_enable_data &&
+ (test->cmd_ops->trigger_type ==
+@@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(cha
+ struct event_trigger_data *data;
+ bool unregistered = false;
+
+- list_for_each_entry_rcu(data, &file->triggers, list) {
++ lockdep_assert_held(&event_mutex);
++
++ list_for_each_entry(data, &file->triggers, list) {
+ enable_data = data->private_data;
+ if (enable_data &&
+ (data->cmd_ops->trigger_type ==
--- /dev/null
+From 99c9a923e97a583a38050baa92c9377d73946330 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 10 Jan 2020 10:45:39 +0900
+Subject: tracing/uprobe: Fix double perf_event linking on multiprobe uprobe
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 99c9a923e97a583a38050baa92c9377d73946330 upstream.
+
+Fix double perf_event linking to trace_uprobe_filter on
+multiple uprobe event by moving trace_uprobe_filter under
+trace_probe_event.
+
+In uprobe perf event, trace_uprobe_filter data structure is
+managing target mm filters (in perf_event) related to each
+uprobe event.
+
+Since commit 60d53e2c3b75 ("tracing/probe: Split trace_event
+related data from trace_probe") left the trace_uprobe_filter
+data structure in trace_uprobe, if a trace_probe_event has
+multiple trace_uprobe (multi-probe event), a perf_event is
+added to different trace_uprobe_filter on each trace_uprobe.
+This leads a linked list corruption.
+
+To fix this issue, move trace_uprobe_filter to trace_probe_event
+and link it once on each event instead of each probe.
+
+Link: http://lkml.kernel.org/r/157862073931.1800.3800576241181489174.stgit@devnote2
+
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: "David S . Miller" <davem@davemloft.net>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: =?utf-8?q?Toke_H=C3=B8iland-J?= =?utf-8?b?w7hyZ2Vuc2Vu?= <thoiland@redhat.com>
+Cc: Jean-Tsung Hsiao <jhsiao@redhat.com>
+Cc: Jesper Dangaard Brouer <brouer@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 60d53e2c3b75 ("tracing/probe: Split trace_event related data from trace_probe")
+Link: https://lkml.kernel.org/r/20200108171611.GA8472@kernel.org
+Reported-by: Arnaldo Carvalho de Melo <acme@kernel.org>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_kprobe.c | 2
+ kernel/trace/trace_probe.c | 5 +
+ kernel/trace/trace_probe.h | 3 -
+ kernel/trace/trace_uprobe.c | 124 ++++++++++++++++++++++++++++----------------
+ 4 files changed, 86 insertions(+), 48 deletions(-)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_
+ INIT_HLIST_NODE(&tk->rp.kp.hlist);
+ INIT_LIST_HEAD(&tk->rp.kp.list);
+
+- ret = trace_probe_init(&tk->tp, event, group);
++ ret = trace_probe_init(&tk->tp, event, group, 0);
+ if (ret < 0)
+ goto error;
+
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -984,7 +984,7 @@ void trace_probe_cleanup(struct trace_pr
+ }
+
+ int trace_probe_init(struct trace_probe *tp, const char *event,
+- const char *group)
++ const char *group, size_t event_data_size)
+ {
+ struct trace_event_call *call;
+ int ret = 0;
+@@ -992,7 +992,8 @@ int trace_probe_init(struct trace_probe
+ if (!event || !group)
+ return -EINVAL;
+
+- tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
++ tp->event = kzalloc(sizeof(struct trace_probe_event) + event_data_size,
++ GFP_KERNEL);
+ if (!tp->event)
+ return -ENOMEM;
+
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -230,6 +230,7 @@ struct trace_probe_event {
+ struct trace_event_call call;
+ struct list_head files;
+ struct list_head probes;
++ char data[0];
+ };
+
+ struct trace_probe {
+@@ -322,7 +323,7 @@ static inline bool trace_probe_has_singl
+ }
+
+ int trace_probe_init(struct trace_probe *tp, const char *event,
+- const char *group);
++ const char *group, size_t event_data_size);
+ void trace_probe_cleanup(struct trace_probe *tp);
+ int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
+ void trace_probe_unlink(struct trace_probe *tp);
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -60,7 +60,6 @@ static struct dyn_event_operations trace
+ */
+ struct trace_uprobe {
+ struct dyn_event devent;
+- struct trace_uprobe_filter filter;
+ struct uprobe_consumer consumer;
+ struct path path;
+ struct inode *inode;
+@@ -264,6 +263,14 @@ process_fetch_insn(struct fetch_insn *co
+ }
+ NOKPROBE_SYMBOL(process_fetch_insn)
+
++static struct trace_uprobe_filter *
++trace_uprobe_get_filter(struct trace_uprobe *tu)
++{
++ struct trace_probe_event *event = tu->tp.event;
++
++ return (struct trace_uprobe_filter *)&event->data[0];
++}
++
+ static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
+ {
+ rwlock_init(&filter->rwlock);
+@@ -351,7 +358,8 @@ alloc_trace_uprobe(const char *group, co
+ if (!tu)
+ return ERR_PTR(-ENOMEM);
+
+- ret = trace_probe_init(&tu->tp, event, group);
++ ret = trace_probe_init(&tu->tp, event, group,
++ sizeof(struct trace_uprobe_filter));
+ if (ret < 0)
+ goto error;
+
+@@ -359,7 +367,7 @@ alloc_trace_uprobe(const char *group, co
+ tu->consumer.handler = uprobe_dispatcher;
+ if (is_ret)
+ tu->consumer.ret_handler = uretprobe_dispatcher;
+- init_trace_uprobe_filter(&tu->filter);
++ init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
+ return tu;
+
+ error:
+@@ -1067,13 +1075,14 @@ static void __probe_event_disable(struct
+ struct trace_probe *pos;
+ struct trace_uprobe *tu;
+
++ tu = container_of(tp, struct trace_uprobe, tp);
++ WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
++
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
+ if (!tu->inode)
+ continue;
+
+- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+-
+ uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
+ tu->inode = NULL;
+ }
+@@ -1108,7 +1117,7 @@ static int probe_event_enable(struct tra
+ }
+
+ tu = container_of(tp, struct trace_uprobe, tp);
+- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
++ WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
+
+ if (enabled)
+ return 0;
+@@ -1205,39 +1214,39 @@ __uprobe_perf_filter(struct trace_uprobe
+ }
+
+ static inline bool
+-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
++trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
++ struct perf_event *event)
+ {
+- return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
++ return __uprobe_perf_filter(filter, event->hw.target->mm);
+ }
+
+-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
++static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
++ struct perf_event *event)
+ {
+ bool done;
+
+- write_lock(&tu->filter.rwlock);
++ write_lock(&filter->rwlock);
+ if (event->hw.target) {
+ list_del(&event->hw.tp_list);
+- done = tu->filter.nr_systemwide ||
++ done = filter->nr_systemwide ||
+ (event->hw.target->flags & PF_EXITING) ||
+- uprobe_filter_event(tu, event);
++ trace_uprobe_filter_event(filter, event);
+ } else {
+- tu->filter.nr_systemwide--;
+- done = tu->filter.nr_systemwide;
++ filter->nr_systemwide--;
++ done = filter->nr_systemwide;
+ }
+- write_unlock(&tu->filter.rwlock);
+-
+- if (!done)
+- return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
++ write_unlock(&filter->rwlock);
+
+- return 0;
++ return done;
+ }
+
+-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
++/* This returns true if the filter always covers target mm */
++static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
++ struct perf_event *event)
+ {
+ bool done;
+- int err;
+
+- write_lock(&tu->filter.rwlock);
++ write_lock(&filter->rwlock);
+ if (event->hw.target) {
+ /*
+ * event->parent != NULL means copy_process(), we can avoid
+@@ -1247,28 +1256,21 @@ static int uprobe_perf_open(struct trace
+ * attr.enable_on_exec means that exec/mmap will install the
+ * breakpoints we need.
+ */
+- done = tu->filter.nr_systemwide ||
++ done = filter->nr_systemwide ||
+ event->parent || event->attr.enable_on_exec ||
+- uprobe_filter_event(tu, event);
+- list_add(&event->hw.tp_list, &tu->filter.perf_events);
++ trace_uprobe_filter_event(filter, event);
++ list_add(&event->hw.tp_list, &filter->perf_events);
+ } else {
+- done = tu->filter.nr_systemwide;
+- tu->filter.nr_systemwide++;
++ done = filter->nr_systemwide;
++ filter->nr_systemwide++;
+ }
+- write_unlock(&tu->filter.rwlock);
++ write_unlock(&filter->rwlock);
+
+- err = 0;
+- if (!done) {
+- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+- if (err)
+- uprobe_perf_close(tu, event);
+- }
+- return err;
++ return done;
+ }
+
+-static int uprobe_perf_multi_call(struct trace_event_call *call,
+- struct perf_event *event,
+- int (*op)(struct trace_uprobe *tu, struct perf_event *event))
++static int uprobe_perf_close(struct trace_event_call *call,
++ struct perf_event *event)
+ {
+ struct trace_probe *pos, *tp;
+ struct trace_uprobe *tu;
+@@ -1278,25 +1280,59 @@ static int uprobe_perf_multi_call(struct
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+
++ tu = container_of(tp, struct trace_uprobe, tp);
++ if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
++ return 0;
++
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
+- ret = op(tu, event);
++ ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
+ if (ret)
+ break;
+ }
+
+ return ret;
+ }
++
++static int uprobe_perf_open(struct trace_event_call *call,
++ struct perf_event *event)
++{
++ struct trace_probe *pos, *tp;
++ struct trace_uprobe *tu;
++ int err = 0;
++
++ tp = trace_probe_primary_from_call(call);
++ if (WARN_ON_ONCE(!tp))
++ return -ENODEV;
++
++ tu = container_of(tp, struct trace_uprobe, tp);
++ if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
++ return 0;
++
++ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
++ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
++ if (err) {
++ uprobe_perf_close(call, event);
++ break;
++ }
++ }
++
++ return err;
++}
++
+ static bool uprobe_perf_filter(struct uprobe_consumer *uc,
+ enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+ {
++ struct trace_uprobe_filter *filter;
+ struct trace_uprobe *tu;
+ int ret;
+
+ tu = container_of(uc, struct trace_uprobe, consumer);
+- read_lock(&tu->filter.rwlock);
+- ret = __uprobe_perf_filter(&tu->filter, mm);
+- read_unlock(&tu->filter.rwlock);
++ filter = trace_uprobe_get_filter(tu);
++
++ read_lock(&filter->rwlock);
++ ret = __uprobe_perf_filter(filter, mm);
++ read_unlock(&filter->rwlock);
+
+ return ret;
+ }
+@@ -1419,10 +1455,10 @@ trace_uprobe_register(struct trace_event
+ return 0;
+
+ case TRACE_REG_PERF_OPEN:
+- return uprobe_perf_multi_call(event, data, uprobe_perf_open);
++ return uprobe_perf_open(event, data);
+
+ case TRACE_REG_PERF_CLOSE:
+- return uprobe_perf_multi_call(event, data, uprobe_perf_close);
++ return uprobe_perf_close(event, data);
+
+ #endif
+ default:
--- /dev/null
+From 430f24f94c8a174d411a550d7b5529301922e67a Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 17 Jan 2020 17:45:12 -0500
+Subject: XArray: Fix infinite loop with entry at ULONG_MAX
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 430f24f94c8a174d411a550d7b5529301922e67a upstream.
+
+If there is an entry at ULONG_MAX, xa_for_each() will overflow the
+'index + 1' in xa_find_after() and wrap around to 0. Catch this case
+and terminate the loop by returning NULL.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_xarray.c | 17 +++++++++++++++++
+ lib/xarray.c | 3 +++
+ 2 files changed, 20 insertions(+)
+
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -1046,11 +1046,28 @@ static noinline void check_find_3(struct
+ xa_destroy(xa);
+ }
+
++static noinline void check_find_4(struct xarray *xa)
++{
++ unsigned long index = 0;
++ void *entry;
++
++ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
++
++ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
++ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
++
++ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
++ XA_BUG_ON(xa, entry);
++
++ xa_erase_index(xa, ULONG_MAX);
++}
++
+ static noinline void check_find(struct xarray *xa)
+ {
+ check_find_1(xa);
+ check_find_2(xa);
+ check_find_3(xa);
++ check_find_4(xa);
+ check_multi_find(xa);
+ check_multi_find_2(xa);
+ }
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1847,6 +1847,9 @@ void *xa_find_after(struct xarray *xa, u
+ XA_STATE(xas, xa, *indexp + 1);
+ void *entry;
+
++ if (xas.xa_index == 0)
++ return NULL;
++
+ rcu_read_lock();
+ for (;;) {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
--- /dev/null
+From 19c30f4dd0923ef191f35c652ee4058e91e89056 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 17 Jan 2020 22:00:41 -0500
+Subject: XArray: Fix xa_find_after with multi-index entries
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 19c30f4dd0923ef191f35c652ee4058e91e89056 upstream.
+
+If the entry is of an order which is a multiple of XA_CHUNK_SIZE,
+the current detection of sibling entries does not work. Factor out
+an xas_sibling() function to make xa_find_after() a little more
+understandable, and write a new implementation that doesn't suffer from
+the same bug.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_xarray.c | 32 +++++++++++++++++++-------------
+ lib/xarray.c | 20 +++++++++++++-------
+ 2 files changed, 32 insertions(+), 20 deletions(-)
+
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -902,28 +902,30 @@ static noinline void check_store_iter(st
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+
+-static noinline void check_multi_find(struct xarray *xa)
++static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
+ {
+ #ifdef CONFIG_XARRAY_MULTI
++ unsigned long multi = 3 << order;
++ unsigned long next = 4 << order;
+ unsigned long index;
+
+- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
+- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
++ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
++ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+
+ index = 0;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+- xa_mk_value(12));
+- XA_BUG_ON(xa, index != 12);
+- index = 13;
++ xa_mk_value(multi));
++ XA_BUG_ON(xa, index != multi);
++ index = multi + 1;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+- xa_mk_value(12));
+- XA_BUG_ON(xa, (index < 12) || (index >= 16));
++ xa_mk_value(multi));
++ XA_BUG_ON(xa, (index < multi) || (index >= next));
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
+- xa_mk_value(16));
+- XA_BUG_ON(xa, index != 16);
++ xa_mk_value(next));
++ XA_BUG_ON(xa, index != next);
+
+- xa_erase_index(xa, 12);
+- xa_erase_index(xa, 16);
++ xa_erase_index(xa, multi);
++ xa_erase_index(xa, next);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ #endif
+ }
+@@ -1064,11 +1066,15 @@ static noinline void check_find_4(struct
+
+ static noinline void check_find(struct xarray *xa)
+ {
++ unsigned i;
++
+ check_find_1(xa);
+ check_find_2(xa);
+ check_find_3(xa);
+ check_find_4(xa);
+- check_multi_find(xa);
++
++ for (i = 2; i < 10; i++)
++ check_multi_find_1(xa, i);
+ check_multi_find_2(xa);
+ }
+
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1824,6 +1824,17 @@ void *xa_find(struct xarray *xa, unsigne
+ }
+ EXPORT_SYMBOL(xa_find);
+
++static bool xas_sibling(struct xa_state *xas)
++{
++ struct xa_node *node = xas->xa_node;
++ unsigned long mask;
++
++ if (!node)
++ return false;
++ mask = (XA_CHUNK_SIZE << node->shift) - 1;
++ return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
++}
++
+ /**
+ * xa_find_after() - Search the XArray for a present entry.
+ * @xa: XArray.
+@@ -1858,13 +1869,8 @@ void *xa_find_after(struct xarray *xa, u
+ entry = xas_find(&xas, max);
+ if (xas.xa_node == XAS_BOUNDS)
+ break;
+- if (xas.xa_shift) {
+- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
+- continue;
+- } else {
+- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
+- continue;
+- }
++ if (xas_sibling(&xas))
++ continue;
+ if (!xas_retry(&xas, entry))
+ break;
+ }
--- /dev/null
+From c44aa5e8ab58b5f4cf473970ec784c3333496a2e Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 17 Jan 2020 22:13:21 -0500
+Subject: XArray: Fix xas_find returning too many entries
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit c44aa5e8ab58b5f4cf473970ec784c3333496a2e upstream.
+
+If you call xas_find() with the initial index > max, it should have
+returned NULL but was returning the entry at index.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_xarray.c | 5 +++++
+ lib/xarray.c | 10 ++++++++--
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -2,6 +2,7 @@
+ /*
+ * test_xarray.c: Test the XArray API
+ * Copyright (c) 2017-2018 Microsoft Corporation
++ * Copyright (c) 2019-2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+@@ -911,6 +912,7 @@ static noinline void check_multi_find_1(
+
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
++ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
+
+ index = 0;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+@@ -923,9 +925,12 @@ static noinline void check_multi_find_1(
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(next));
+ XA_BUG_ON(xa, index != next);
++ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
++ XA_BUG_ON(xa, index != next);
+
+ xa_erase_index(xa, multi);
+ xa_erase_index(xa, next);
++ xa_erase_index(xa, next + 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ #endif
+ }
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0+
+ /*
+ * XArray implementation
+- * Copyright (c) 2017 Microsoft Corporation
++ * Copyright (c) 2017-2018 Microsoft Corporation
++ * Copyright (c) 2018-2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+@@ -1081,6 +1082,8 @@ void *xas_find(struct xa_state *xas, uns
+
+ if (xas_error(xas))
+ return NULL;
++ if (xas->xa_index > max)
++ return set_bounds(xas);
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+@@ -1150,6 +1153,8 @@ void *xas_find_marked(struct xa_state *x
+
+ if (xas_error(xas))
+ return NULL;
++ if (xas->xa_index > max)
++ goto max;
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+@@ -1867,7 +1872,8 @@ void *xa_find_after(struct xarray *xa, u
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+- if (xas.xa_node == XAS_BOUNDS)
++
++ if (xas_invalid(&xas))
+ break;
+ if (xas_sibling(&xas))
+ continue;