--- /dev/null
+From 8536a5ef886005bc443c2da9b842d69fd3d7647f Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Wed, 15 Dec 2021 09:31:36 +0100
+Subject: ARM: 9169/1: entry: fix Thumb2 bug in iWMMXt exception handling
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 8536a5ef886005bc443c2da9b842d69fd3d7647f upstream.
+
+The Thumb2 version of the FP exception handling entry code treats the
+register holding the CP number (R8) differently, resulting in the iWMMXT
+CP number check to be incorrect.
+
+Fix this by unifying the ARM and Thumb2 code paths, and switch the
+order of the additions of the TI_USED_CP offset and the shifted CP
+index.
+
+Cc: <stable@vger.kernel.org>
+Fixes: b86040a59feb ("Thumb-2: Implementation of the unified start-up and exceptions code")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/entry-armv.S | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -625,11 +625,9 @@ call_fpe:
+ tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
+ reteq lr
+ and r8, r0, #0x00000f00 @ mask out CP number
+- THUMB( lsr r8, r8, #8 )
+ mov r7, #1
+- add r6, r10, #TI_USED_CP
+- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
+- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
++ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
++ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
+ #ifdef CONFIG_IWMMXT
+ @ Test if we need to give access to iWMMXt coprocessors
+ ldr r5, [r10, #TI_FLAGS]
+@@ -638,7 +636,7 @@ call_fpe:
+ bcs iwmmxt_task_enable
+ #endif
+ ARM( add pc, pc, r8, lsr #6 )
+- THUMB( lsl r8, r8, #2 )
++ THUMB( lsr r8, r8, #6 )
+ THUMB( add pc, r8 )
+ nop
+
--- /dev/null
+From jgross@suse.com Mon Dec 27 13:38:43 2021
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 23 Dec 2021 11:53:08 +0100
+Subject: xen/blkfront: fix bug in backported patch
+To: stable@vger.kernel.org
+Cc: xen-devel@lists.xenproject.org, Juergen Gross <jgross@suse.com>
+Message-ID: <20211223105308.17077-1-jgross@suse.com>
+
+From: Juergen Gross <jgross@suse.com>
+
+The backport of commit 8f5a695d99000fc ("xen/blkfront: don't take local
+copy of a request from the ring page") to stable 4.4 kernel introduced
+a bug when adding the needed blkif_ring_get_request() function, as
+info->ring.req_prod_pvt was incremented twice now.
+
+Fix that be deleting the now superfluous increments after calling that
+function.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/xen-blkfront.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -493,8 +493,6 @@ static int blkif_queue_discard_req(struc
+ else
+ ring_req->u.discard.flag = 0;
+
+- info->ring.req_prod_pvt++;
+-
+ /* Copy the request to the ring page. */
+ *final_ring_req = *ring_req;
+ info->shadow[id].inflight = true;
+@@ -711,8 +709,6 @@ static int blkif_queue_rw_req(struct req
+ if (setup.segments)
+ kunmap_atomic(setup.segments);
+
+- info->ring.req_prod_pvt++;
+-
+ /* Copy request(s) to the ring page. */
+ *final_ring_req = *ring_req;
+ info->shadow[id].inflight = true;