--- /dev/null
+From d6e47819721ae2d9d090058ad5570a66f3c42e39 Mon Sep 17 00:00:00 2001
+From: "Yan, Zheng" <zyan@redhat.com>
+Date: Thu, 23 May 2019 11:01:37 +0800
+Subject: ceph: hold i_ceph_lock when removing caps for freeing inode
+
+From: Yan, Zheng <zyan@redhat.com>
+
+commit d6e47819721ae2d9d090058ad5570a66f3c42e39 upstream.
+
+ceph_d_revalidate(, LOOKUP_RCU) may call __ceph_caps_issued_mask()
+on a freeing inode.
+
+Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ceph/caps.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1072,20 +1072,23 @@ static int send_cap_msg(struct ceph_mds_
+ }
+
+ /*
+- * Queue cap releases when an inode is dropped from our cache. Since
+- * inode is about to be destroyed, there is no need for i_ceph_lock.
++ * Queue cap releases when an inode is dropped from our cache.
+ */
+ void ceph_queue_caps_release(struct inode *inode)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct rb_node *p;
+
++ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
++ * may call __ceph_caps_issued_mask() on a freeing inode. */
++ spin_lock(&ci->i_ceph_lock);
+ p = rb_first(&ci->i_caps);
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+ p = rb_next(p);
+ __ceph_remove_cap(cap, true);
+ }
++ spin_unlock(&ci->i_ceph_lock);
+ }
+
+ /*
--- /dev/null
+From 5515e9a6273b8c02034466bcbd717ac9f53dab99 Mon Sep 17 00:00:00 2001
+From: Miroslav Lichvar <mlichvar@redhat.com>
+Date: Tue, 16 Jul 2019 16:30:09 -0700
+Subject: drivers/pps/pps.c: clear offset flags in PPS_SETPARAMS ioctl
+
+From: Miroslav Lichvar <mlichvar@redhat.com>
+
+commit 5515e9a6273b8c02034466bcbd717ac9f53dab99 upstream.
+
+The PPS assert/clear offset corrections are set by the PPS_SETPARAMS
+ioctl in the pps_ktime structs, which also contain flags. The flags are
+not initialized by applications (using the timepps.h header) and they
+are not used by the kernel for anything except returning them back in
+the PPS_GETPARAMS ioctl.
+
+Set the flags to zero to make it clear they are unused and avoid leaking
+uninitialized data of the PPS_SETPARAMS caller to other applications
+that have a read access to the PPS device.
+
+Link: http://lkml.kernel.org/r/20190702092251.24303-1-mlichvar@redhat.com
+Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Rodolfo Giometti <giometti@enneenne.com>
+Cc: Greg KH <greg@kroah.com>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pps/pps.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -129,6 +129,14 @@ static long pps_cdev_ioctl(struct file *
+ pps->params.mode |= PPS_CANWAIT;
+ pps->params.api_version = PPS_API_VERS;
+
++ /*
++ * Clear unused fields of pps_kparams to avoid leaking
++ * uninitialized data of the PPS_SETPARAMS caller via
++ * PPS_GETPARAMS
++ */
++ pps->params.assert_off_tu.flags = 0;
++ pps->params.clear_off_tu.flags = 0;
++
+ spin_unlock_irq(&pps->lock);
+
+ break;
--- /dev/null
+From 16d51a590a8ce3befb1308e0e7ab77f3b661af33 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Tue, 16 Jul 2019 17:20:45 +0200
+Subject: sched/fair: Don't free p->numa_faults with concurrent readers
+
+From: Jann Horn <jannh@google.com>
+
+commit 16d51a590a8ce3befb1308e0e7ab77f3b661af33 upstream.
+
+When going through execve(), zero out the NUMA fault statistics instead of
+freeing them.
+
+During execve, the task is reachable through procfs and the scheduler. A
+concurrent /proc/*/sched reader can read data from a freed ->numa_faults
+allocation (confirmed by KASAN) and write it back to userspace.
+I believe that it would also be possible for a use-after-free read to occur
+through a race between a NUMA fault and execve(): task_numa_fault() can
+lead to task_numa_compare(), which invokes task_weight() on the currently
+running task of a different CPU.
+
+Another way to fix this would be to make ->numa_faults RCU-managed or add
+extra locking, but it seems easier to wipe the NUMA fault statistics on
+execve.
+
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will@kernel.org>
+Fixes: 82727018b0d3 ("sched/numa: Call task_numa_free() from do_execve()")
+Link: https://lkml.kernel.org/r/20190716152047.14424-1-jannh@google.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/exec.c | 2 +-
+ include/linux/sched.h | 4 ++--
+ kernel/fork.c | 2 +-
+ kernel/sched/fair.c | 24 ++++++++++++++++++++----
+ 4 files changed, 24 insertions(+), 8 deletions(-)
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1642,7 +1642,7 @@ static int do_execveat_common(int fd, st
+ current->fs->in_exec = 0;
+ current->in_execve = 0;
+ acct_update_integrals(current);
+- task_numa_free(current);
++ task_numa_free(current, false);
+ free_bprm(bprm);
+ kfree(pathbuf);
+ putname(filename);
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1860,7 +1860,7 @@ extern int arch_task_struct_size __read_
+ extern void task_numa_fault(int last_node, int node, int pages, int flags);
+ extern pid_t task_numa_group_id(struct task_struct *p);
+ extern void set_numabalancing_state(bool enabled);
+-extern void task_numa_free(struct task_struct *p);
++extern void task_numa_free(struct task_struct *p, bool final);
+ extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
+ int src_nid, int dst_cpu);
+ #else
+@@ -1875,7 +1875,7 @@ static inline pid_t task_numa_group_id(s
+ static inline void set_numabalancing_state(bool enabled)
+ {
+ }
+-static inline void task_numa_free(struct task_struct *p)
++static inline void task_numa_free(struct task_struct *p, bool final)
+ {
+ }
+ static inline bool should_numa_migrate_memory(struct task_struct *p,
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -254,7 +254,7 @@ void __put_task_struct(struct task_struc
+ WARN_ON(tsk == current);
+
+ cgroup_free(tsk);
+- task_numa_free(tsk);
++ task_numa_free(tsk, true);
+ security_task_free(tsk);
+ exit_creds(tsk);
+ delayacct_tsk_free(tsk);
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2054,13 +2054,23 @@ no_join:
+ return;
+ }
+
+-void task_numa_free(struct task_struct *p)
++/*
++ * Get rid of NUMA staticstics associated with a task (either current or dead).
++ * If @final is set, the task is dead and has reached refcount zero, so we can
++ * safely free all relevant data structures. Otherwise, there might be
++ * concurrent reads from places like load balancing and procfs, and we should
++ * reset the data back to default state without freeing ->numa_faults.
++ */
++void task_numa_free(struct task_struct *p, bool final)
+ {
+ struct numa_group *grp = p->numa_group;
+- void *numa_faults = p->numa_faults;
++ unsigned long *numa_faults = p->numa_faults;
+ unsigned long flags;
+ int i;
+
++ if (!numa_faults)
++ return;
++
+ if (grp) {
+ spin_lock_irqsave(&grp->lock, flags);
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+@@ -2073,8 +2083,14 @@ void task_numa_free(struct task_struct *
+ put_numa_group(grp);
+ }
+
+- p->numa_faults = NULL;
+- kfree(numa_faults);
++ if (final) {
++ p->numa_faults = NULL;
++ kfree(numa_faults);
++ } else {
++ p->total_numa_faults = 0;
++ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
++ numa_faults[i] = 0;
++ }
+ }
+
+ /*