--- /dev/null
+From 30da870ce4a4e007c901858a96e9e394a1daa74a Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 6 May 2018 12:15:20 -0400
+Subject: affs_lookup(): close a race with affs_remove_link()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 30da870ce4a4e007c901858a96e9e394a1daa74a upstream.
+
+we unlock the directory hash too early - if we are looking at secondary
+link and primary (in another directory) gets removed just as we unlock,
+we could have the old primary moved in place of the secondary, leaving
+us to look into freed entry (and leaving our dentry with ->d_fsdata
+pointing to a freed entry).
+
+Cc: stable@vger.kernel.org # 2.4.4+
+Acked-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/affs/namei.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/fs/affs/namei.c
++++ b/fs/affs/namei.c
+@@ -206,9 +206,10 @@ affs_lookup(struct inode *dir, struct de
+
+ affs_lock_dir(dir);
+ bh = affs_find_entry(dir, dentry);
+- affs_unlock_dir(dir);
+- if (IS_ERR(bh))
++ if (IS_ERR(bh)) {
++ affs_unlock_dir(dir);
+ return ERR_CAST(bh);
++ }
+ if (bh) {
+ u32 ino = bh->b_blocknr;
+
+@@ -222,10 +223,13 @@ affs_lookup(struct inode *dir, struct de
+ }
+ affs_brelse(bh);
+ inode = affs_iget(sb, ino);
+- if (IS_ERR(inode))
++ if (IS_ERR(inode)) {
++ affs_unlock_dir(dir);
+ return ERR_CAST(inode);
++ }
+ }
+ d_add(dentry, inode);
++ affs_unlock_dir(dir);
+ return NULL;
+ }
+
--- /dev/null
+From baf10564fbb66ea222cae66fbff11c444590ffd9 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 20 May 2018 16:46:23 -0400
+Subject: aio: fix io_destroy(2) vs. lookup_ioctx() race
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit baf10564fbb66ea222cae66fbff11c444590ffd9 upstream.
+
+kill_ioctx() used to have an explicit RCU delay between removing the
+reference from ->ioctx_table and percpu_ref_kill() dropping the refcount.
+At some point that delay had been removed, on the theory that
+percpu_ref_kill() itself contained an RCU delay. Unfortunately, that was
+the wrong kind of RCU delay and it didn't care about rcu_read_lock() used
+by lookup_ioctx(). As the result, we could get ctx freed right under
+lookup_ioctx(). Tejun has fixed that in a6d7cff472e ("fs/aio: Add explicit
+RCU grace period when freeing kioctx"); however, that fix is not enough.
+
+Suppose io_destroy() from one thread races with e.g. io_setup() from another;
+CPU1 removes the reference from current->mm->ioctx_table[...] just as CPU2
+has picked it (under rcu_read_lock()). Then CPU1 proceeds to drop the
+refcount, getting it to 0 and triggering a call of free_ioctx_users(),
+which proceeds to drop the secondary refcount and once that reaches zero
+calls free_ioctx_reqs(). That does
+ INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
+ queue_rcu_work(system_wq, &ctx->free_rwork);
+and schedules freeing the whole thing after RCU delay.
+
+In the meanwhile CPU2 has gotten around to percpu_ref_get(), bumping the
+refcount from 0 to 1 and returned the reference to io_setup().
+
+Tejun's fix (that queue_rcu_work() in there) guarantees that ctx won't get
+freed until after percpu_ref_get(). Sure, we'd increment the counter before
+ctx can be freed. Now we are out of rcu_read_lock() and there's nothing to
+stop freeing of the whole thing. Unfortunately, CPU2 assumes that since it
+has grabbed the reference, ctx is *NOT* going away until it gets around to
+dropping that reference.
+
+The fix is obvious - use percpu_ref_tryget_live() and treat failure as miss.
+It's not costlier than what we currently do in normal case, it's safe to
+call since freeing *is* delayed and it closes the race window - either
+lookup_ioctx() comes before percpu_ref_kill() (in which case ctx->users
+won't reach 0 until the caller of lookup_ioctx() drops it) or lookup_ioctx()
+fails, ctx->users is unaffected and caller of lookup_ioctx() doesn't see
+the object in question at all.
+
+Cc: stable@kernel.org
+Fixes: a6d7cff472e "fs/aio: Add explicit RCU grace period when freeing kioctx"
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1087,8 +1087,8 @@ static struct kioctx *lookup_ioctx(unsig
+
+ ctx = rcu_dereference(table->table[id]);
+ if (ctx && ctx->user_id == ctx_id) {
+- percpu_ref_get(&ctx->users);
+- ret = ctx;
++ if (percpu_ref_tryget_live(&ctx->users))
++ ret = ctx;
+ }
+ out:
+ rcu_read_unlock();
--- /dev/null
+From 3ae180972564846e6d794e3615e1ab0a1e6c4ef9 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Date: Thu, 17 May 2018 22:34:39 +0100
+Subject: ALSA: timer: Fix pause event notification
+
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+
+commit 3ae180972564846e6d794e3615e1ab0a1e6c4ef9 upstream.
+
+Commit f65e0d299807 ("ALSA: timer: Call notifier in the same spinlock")
+combined the start/continue and stop/pause functions, and in doing so
+changed the event code for the pause case to SNDRV_TIMER_EVENT_CONTINUE.
+Change it back to SNDRV_TIMER_EVENT_PAUSE.
+
+Fixes: f65e0d299807 ("ALSA: timer: Call notifier in the same spinlock")
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/timer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_ti
+ else
+ timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
+ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+- SNDRV_TIMER_EVENT_CONTINUE);
++ SNDRV_TIMER_EVENT_PAUSE);
+ unlock:
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return result;
+@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct s
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+ snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+- SNDRV_TIMER_EVENT_CONTINUE);
++ SNDRV_TIMER_EVENT_PAUSE);
+ spin_unlock(&timeri->timer->lock);
+ }
+ spin_unlock_irqrestore(&slave_active_lock, flags);
--- /dev/null
+From 79f546a696bff2590169fb5684e23d65f4d9f591 Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Fri, 11 May 2018 11:20:57 +1000
+Subject: fs: don't scan the inode cache before SB_BORN is set
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 79f546a696bff2590169fb5684e23d65f4d9f591 upstream.
+
+We recently had an oops reported on a 4.14 kernel in
+xfs_reclaim_inodes_count() where sb->s_fs_info pointed to garbage
+and so the m_perag_tree lookup walked into lala land. It produces
+an oops down this path during the failed mount:
+
+ radix_tree_gang_lookup_tag+0xc4/0x130
+ xfs_perag_get_tag+0x37/0xf0
+ xfs_reclaim_inodes_count+0x32/0x40
+ xfs_fs_nr_cached_objects+0x11/0x20
+ super_cache_count+0x35/0xc0
+ shrink_slab.part.66+0xb1/0x370
+ shrink_node+0x7e/0x1a0
+ try_to_free_pages+0x199/0x470
+ __alloc_pages_slowpath+0x3a1/0xd20
+ __alloc_pages_nodemask+0x1c3/0x200
+ cache_grow_begin+0x20b/0x2e0
+ fallback_alloc+0x160/0x200
+ kmem_cache_alloc+0x111/0x4e0
+
+The problem is that the superblock shrinker is running before the
+filesystem structures it depends on have been fully set up. i.e.
+the shrinker is registered in sget(), before ->fill_super() has been
+called, and the shrinker can call into the filesystem before
+fill_super() does it's setup work. Essentially we are exposed to
+both use-after-free and use-before-initialisation bugs here.
+
+To fix this, add a check for the SB_BORN flag in super_cache_count.
+In general, this flag is not set until ->fs_mount() completes
+successfully, so we know that it is set after the filesystem
+setup has completed. This matches the trylock_super() behaviour
+which will not let super_cache_scan() run if SB_BORN is not set, and
+hence will not allow the superblock shrinker from entering the
+filesystem while it is being set up or after it has failed setup
+and is being torn down.
+
+Cc: stable@kernel.org
+Signed-Off-By: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/super.c | 30 ++++++++++++++++++++++++------
+ 1 file changed, 24 insertions(+), 6 deletions(-)
+
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -120,13 +120,23 @@ static unsigned long super_cache_count(s
+ sb = container_of(shrink, struct super_block, s_shrink);
+
+ /*
+- * Don't call trylock_super as it is a potential
+- * scalability bottleneck. The counts could get updated
+- * between super_cache_count and super_cache_scan anyway.
+- * Call to super_cache_count with shrinker_rwsem held
+- * ensures the safety of call to list_lru_shrink_count() and
+- * s_op->nr_cached_objects().
++ * We don't call trylock_super() here as it is a scalability bottleneck,
++ * so we're exposed to partial setup state. The shrinker rwsem does not
++ * protect filesystem operations backing list_lru_shrink_count() or
++ * s_op->nr_cached_objects(). Counts can change between
++ * super_cache_count and super_cache_scan, so we really don't need locks
++ * here.
++ *
++ * However, if we are currently mounting the superblock, the underlying
++ * filesystem might be in a state of partial construction and hence it
++ * is dangerous to access it. trylock_super() uses a SB_BORN check to
++ * avoid this situation, so do the same here. The memory barrier is
++ * matched with the one in mount_fs() as we don't hold locks here.
+ */
++ if (!(sb->s_flags & SB_BORN))
++ return 0;
++ smp_rmb();
++
+ if (sb->s_op && sb->s_op->nr_cached_objects)
+ total_objects = sb->s_op->nr_cached_objects(sb, sc);
+
+@@ -1232,6 +1242,14 @@ mount_fs(struct file_system_type *type,
+ sb = root->d_sb;
+ BUG_ON(!sb);
+ WARN_ON(!sb->s_bdi);
++
++ /*
++ * Write barrier is for super_cache_count(). We place it before setting
++ * SB_BORN as the data dependency between the two functions is the
++ * superblock structure contents that we just set up, not the SB_BORN
++ * flag.
++ */
++ smp_wmb();
+ sb->s_flags |= SB_BORN;
+
+ error = security_sb_kern_mount(sb, flags, secdata);
--- /dev/null
+From ba3696e94d9d590d9a7e55f68e81c25dba515191 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 14 May 2018 18:23:50 +0100
+Subject: KVM: Fix spelling mistake: "cop_unsuable" -> "cop_unusable"
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit ba3696e94d9d590d9a7e55f68e81c25dba515191 upstream.
+
+Trivial fix to spelling mistake in debugfs_entries text.
+
+Fixes: 669e846e6c4e ("KVM/MIPS32: MIPS arch specific APIs for KVM")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: kernel-janitors@vger.kernel.org
+Cc: <stable@vger.kernel.org> # 3.10+
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kvm/mips.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_en
+ { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
+ { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
+ { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
+- { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
++ { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+ { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
+ { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
+ { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
--- /dev/null
+From 55a2aa08b3af519a9693f99cdf7fa6d8b62d9f65 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neil@brown.name>
+Date: Fri, 27 Apr 2018 09:28:34 +1000
+Subject: MIPS: c-r4k: Fix data corruption related to cache coherence
+
+From: NeilBrown <neil@brown.name>
+
+commit 55a2aa08b3af519a9693f99cdf7fa6d8b62d9f65 upstream.
+
+When DMA will be performed to a MIPS32 1004K CPS, the L1-cache for the
+range needs to be flushed and invalidated first.
+The code currently takes one of two approaches.
+1/ If the range is less than the size of the dcache, then HIT type
+ requests flush/invalidate cache lines for the particular addresses.
+ HIT-type requests a globalised by the CPS so this is safe on SMP.
+
+2/ If the range is larger than the size of dcache, then INDEX type
+ requests flush/invalidate the whole cache. INDEX type requests affect
+ the local cache only. CPS does not propagate them in any way. So this
+ invalidation is not safe on SMP CPS systems.
+
+Data corruption due to '2' can quite easily be demonstrated by
+repeatedly "echo 3 > /proc/sys/vm/drop_caches" and then sha1sum a file
+that is several times the size of available memory. Dropping caches
+means that large contiguous extents (large than dcache) are more likely.
+
+This was not a problem before Linux-4.8 because option 2 was never used
+if CONFIG_MIPS_CPS was defined. The commit which removed that apparently
+didn't appreciate the full consequence of the change.
+
+We could, in theory, globalize the INDEX based flush by sending an IPI
+to other cores. These cache invalidation routines can be called with
+interrupts disabled and synchronous IPI require interrupts to be
+enabled. Asynchronous IPI may not trigger writeback soon enough. So we
+cannot use IPI in practice.
+
+We can already test if IPI would be needed for an INDEX operation with
+r4k_op_needs_ipi(R4K_INDEX). If this is true then we mustn't try the
+INDEX approach as we cannot use IPI. If this is false (e.g. when there
+is only one core and hence one L1 cache) then it is safe to use the
+INDEX approach without IPI.
+
+This patch avoids options 2 if r4k_op_needs_ipi(R4K_INDEX), and so
+eliminates the corruption.
+
+Fixes: c00ab4896ed5 ("MIPS: Remove cpu_has_safe_index_cacheops")
+Signed-off-by: NeilBrown <neil@brown.name>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Paul Burton <paul.burton@mips.com>
+Cc: linux-mips@linux-mips.org
+Cc: <stable@vger.kernel.org> # 4.8+
+Patchwork: https://patchwork.linux-mips.org/patch/19259/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/c-r4k.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsi
+ /*
+ * Either no secondary cache or the available caches don't have the
+ * subset property so we have to flush the primary caches
+- * explicitly
++ * explicitly.
++ * If we would need IPI to perform an INDEX-type operation, then
++ * we have to use the HIT-type alternative as IPI cannot be used
++ * here due to interrupts possibly being disabled.
+ */
+- if (size >= dcache_size) {
++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned l
+ return;
+ }
+
+- if (size >= dcache_size) {
++ if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
--- /dev/null
+From 9a3a92ccfe3620743d4ae57c987dc8e9c5f88996 Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@mips.com>
+Date: Mon, 14 May 2018 16:49:43 +0100
+Subject: MIPS: Fix ptrace(2) PTRACE_PEEKUSR and PTRACE_POKEUSR accesses to o32 FGRs
+
+From: Maciej W. Rozycki <macro@mips.com>
+
+commit 9a3a92ccfe3620743d4ae57c987dc8e9c5f88996 upstream.
+
+Check the TIF_32BIT_FPREGS task setting of the tracee rather than the
+tracer in determining the layout of floating-point general registers in
+the floating-point context, correcting access to odd-numbered registers
+for o32 tracees where the setting disagrees between the two processes.
+
+Fixes: 597ce1723e0f ("MIPS: Support for 64-bit FP with O32 binaries")
+Signed-off-by: Maciej W. Rozycki <macro@mips.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: <stable@vger.kernel.org> # 3.14+
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/ptrace.c | 4 ++--
+ arch/mips/kernel/ptrace32.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -798,7 +798,7 @@ long arch_ptrace(struct task_struct *chi
+ fregs = get_fpu_regs(child);
+
+ #ifdef CONFIG_32BIT
+- if (test_thread_flag(TIF_32BIT_FPREGS)) {
++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+@@ -887,7 +887,7 @@ long arch_ptrace(struct task_struct *chi
+
+ init_fp_ctx(child);
+ #ifdef CONFIG_32BIT
+- if (test_thread_flag(TIF_32BIT_FPREGS)) {
++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+--- a/arch/mips/kernel/ptrace32.c
++++ b/arch/mips/kernel/ptrace32.c
+@@ -98,7 +98,7 @@ long compat_arch_ptrace(struct task_stru
+ break;
+ }
+ fregs = get_fpu_regs(child);
+- if (test_thread_flag(TIF_32BIT_FPREGS)) {
++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+@@ -205,7 +205,7 @@ long compat_arch_ptrace(struct task_stru
+ sizeof(child->thread.fpu));
+ child->thread.fpu.fcr31 = 0;
+ }
+- if (test_thread_flag(TIF_32BIT_FPREGS)) {
++ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
--- /dev/null
+From 71e909c0cdad28a1df1fa14442929e68615dee45 Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@mips.com>
+Date: Mon, 30 Apr 2018 15:56:47 +0100
+Subject: MIPS: ptrace: Expose FIR register through FP regset
+
+From: Maciej W. Rozycki <macro@mips.com>
+
+commit 71e909c0cdad28a1df1fa14442929e68615dee45 upstream.
+
+Correct commit 7aeb753b5353 ("MIPS: Implement task_user_regset_view.")
+and expose the FIR register using the unused 4 bytes at the end of the
+NT_PRFPREG regset. Without that register included clients cannot use
+the PTRACE_GETREGSET request to retrieve the complete FPU register set
+and have to resort to one of the older interfaces, either PTRACE_PEEKUSR
+or PTRACE_GETFPREGS, to retrieve the missing piece of data. Also the
+register is irreversibly missing from core dumps.
+
+This register is architecturally hardwired and read-only so the write
+path does not matter. Ignore data supplied on writes then.
+
+Fixes: 7aeb753b5353 ("MIPS: Implement task_user_regset_view.")
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Maciej W. Rozycki <macro@mips.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: <stable@vger.kernel.org> # 3.13+
+Patchwork: https://patchwork.linux-mips.org/patch/19273/
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/ptrace.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -454,7 +454,7 @@ static int fpr_get_msa(struct task_struc
+ /*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+- * the FCSR register separately.
++ * the FCSR and FIR registers separately.
+ */
+ static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+@@ -462,6 +462,7 @@ static int fpr_get(struct task_struct *t
+ void *kbuf, void __user *ubuf)
+ {
+ const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
++ const int fir_pos = fcr31_pos + sizeof(u32);
+ int err;
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+@@ -474,6 +475,12 @@ static int fpr_get(struct task_struct *t
+ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcr31,
+ fcr31_pos, fcr31_pos + sizeof(u32));
++ if (err)
++ return err;
++
++ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++ &boot_cpu_data.fpu_id,
++ fir_pos, fir_pos + sizeof(u32));
+
+ return err;
+ }
+@@ -522,7 +529,8 @@ static int fpr_set_msa(struct task_struc
+ /*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+- * the FCSR register separately.
++ * the FCSR register separately. Ignore the incoming FIR register
++ * contents though, as the register is read-only.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+@@ -536,6 +544,7 @@ static int fpr_set(struct task_struct *t
+ const void *kbuf, const void __user *ubuf)
+ {
+ const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
++ const int fir_pos = fcr31_pos + sizeof(u32);
+ u32 fcr31;
+ int err;
+
+@@ -563,6 +572,11 @@ static int fpr_set(struct task_struct *t
+ ptrace_setfcr31(target, fcr31);
+ }
+
++ if (count > 0)
++ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
++ fir_pos,
++ fir_pos + sizeof(u32));
++
+ return err;
+ }
+
affs_lookup-close-a-race-with-affs_remove_link.patch
fs-don-t-scan-the-inode-cache-before-sb_born-is-set.patch
aio-fix-io_destroy-2-vs.-lookup_ioctx-race.patch
+alsa-timer-fix-pause-event-notification.patch