]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.14
authorSasha Levin <sashal@kernel.org>
Thu, 8 Dec 2022 13:53:37 +0000 (08:53 -0500)
committerSasha Levin <sashal@kernel.org>
Thu, 8 Dec 2022 13:53:37 +0000 (08:53 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
16 files changed:
queue-4.14/alsa-seq-fix-function-prototype-mismatch-in-snd_seq_.patch [new file with mode: 0644]
queue-4.14/arm-9251-1-perf-fix-stacktraces-for-tracepoint-event.patch [new file with mode: 0644]
queue-4.14/arm-9266-1-mm-fix-no-mmu-zero_page-implementation.patch [new file with mode: 0644]
queue-4.14/arm-dts-rockchip-disable-arm_global_timer-on-rk3066-.patch [new file with mode: 0644]
queue-4.14/arm-dts-rockchip-fix-ir-receiver-node-names.patch [new file with mode: 0644]
queue-4.14/arm-dts-rockchip-fix-node-name-for-hym8563-rtc.patch [new file with mode: 0644]
queue-4.14/asoc-soc-pcm-add-null-check-in-be-reparenting.patch [new file with mode: 0644]
queue-4.14/mm-khugepaged-fix-gup-fast-interaction-by-sending-ip.patch [new file with mode: 0644]
queue-4.14/mm-khugepaged-invoke-mmu-notifiers-in-shmem-file-col.patch [new file with mode: 0644]
queue-4.14/net-usb-qmi_wwan-add-u-blox-0x1342-composition.patch [new file with mode: 0644]
queue-4.14/rcutorture-automatically-create-initrd-directory.patch [new file with mode: 0644]
queue-4.14/regulator-twl6030-fix-get-status-of-twl6032-regulato.patch [new file with mode: 0644]
queue-4.14/series [new file with mode: 0644]
queue-4.14/xen-netback-do-some-code-cleanup.patch [new file with mode: 0644]
queue-4.14/xen-netback-don-t-call-kfree_skb-with-interrupts-dis.patch [new file with mode: 0644]
queue-4.14/xen-netback-ensure-protocol-headers-don-t-fall-in-th.patch [new file with mode: 0644]

diff --git a/queue-4.14/alsa-seq-fix-function-prototype-mismatch-in-snd_seq_.patch b/queue-4.14/alsa-seq-fix-function-prototype-mismatch-in-snd_seq_.patch
new file mode 100644 (file)
index 0000000..0a61b16
--- /dev/null
@@ -0,0 +1,77 @@
+From b3fb0e8127ab366cac7c679b18c73a3ebf467625 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Nov 2022 15:23:50 -0800
+Subject: ALSA: seq: Fix function prototype mismatch in
+ snd_seq_expand_var_event
+
+From: Kees Cook <keescook@chromium.org>
+
+[ Upstream commit 05530ef7cf7c7d700f6753f058999b1b5099a026 ]
+
+With clang's kernel control flow integrity (kCFI, CONFIG_CFI_CLANG),
+indirect call targets are validated against the expected function
+pointer prototype to make sure the call target is valid to help mitigate
+ROP attacks. If they are not identical, there is a failure at run time,
+which manifests as either a kernel panic or thread getting killed.
+
+seq_copy_in_user() and seq_copy_in_kernel() did not have prototypes
+matching snd_seq_dump_func_t. Adjust this and remove the casts. There
+are not resulting binary output differences.
+
+This was found as a result of Clang's new -Wcast-function-type-strict
+flag, which is more sensitive than the simpler -Wcast-function-type,
+which only checks for type width mismatches.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Link: https://lore.kernel.org/lkml/202211041527.HD8TLSE1-lkp@intel.com
+Cc: Jaroslav Kysela <perex@perex.cz>
+Cc: Takashi Iwai <tiwai@suse.com>
+Cc: "Gustavo A. R. Silva" <gustavoars@kernel.org>
+Cc: alsa-devel@alsa-project.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20221118232346.never.380-kees@kernel.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/seq/seq_memory.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index ab1112e90f88..6f413c711535 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -126,15 +126,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
+  * expand the variable length event to linear buffer space.
+  */
+-static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
++static int seq_copy_in_kernel(void *ptr, void *src, int size)
+ {
++      char **bufptr = ptr;
++
+       memcpy(*bufptr, src, size);
+       *bufptr += size;
+       return 0;
+ }
+-static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
++static int seq_copy_in_user(void *ptr, void *src, int size)
+ {
++      char __user **bufptr = ptr;
++
+       if (copy_to_user(*bufptr, src, size))
+               return -EFAULT;
+       *bufptr += size;
+@@ -163,8 +167,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
+               return newlen;
+       }
+       err = snd_seq_dump_var_event(event,
+-                                   in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
+-                                   (snd_seq_dump_func_t)seq_copy_in_user,
++                                   in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
+                                    &buf);
+       return err < 0 ? err : newlen;
+ }
+-- 
+2.35.1
+
diff --git a/queue-4.14/arm-9251-1-perf-fix-stacktraces-for-tracepoint-event.patch b/queue-4.14/arm-9251-1-perf-fix-stacktraces-for-tracepoint-event.patch
new file mode 100644 (file)
index 0000000..ceaa273
--- /dev/null
@@ -0,0 +1,70 @@
+From 94287a7363f16a84392cc5f43cb9b0ec34600fcd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Sep 2022 16:09:12 +0100
+Subject: ARM: 9251/1: perf: Fix stacktraces for tracepoint events in THUMB2
+ kernels
+
+From: Tomislav Novak <tnovak@fb.com>
+
+[ Upstream commit 612695bccfdbd52004551308a55bae410e7cd22f ]
+
+Store the frame address where arm_get_current_stackframe() looks for it
+(ARM_r7 instead of ARM_fp if CONFIG_THUMB2_KERNEL=y). Otherwise frame->fp
+gets set to 0, causing unwind_frame() to fail.
+
+  # bpftrace -e 't:sched:sched_switch { @[kstack] = count(); exit(); }'
+  Attaching 1 probe...
+  @[
+      __schedule+1059
+  ]: 1
+
+A typical first unwind instruction is 0x97 (SP = R7), so after executing
+it SP ends up being 0 and -URC_FAILURE is returned.
+
+  unwind_frame(pc = ac9da7d7 lr = 00000000 sp = c69bdda0 fp = 00000000)
+  unwind_find_idx(ac9da7d7)
+  unwind_exec_insn: insn = 00000097
+  unwind_exec_insn: fp = 00000000 sp = 00000000 lr = 00000000 pc = 00000000
+
+With this patch:
+
+  # bpftrace -e 't:sched:sched_switch { @[kstack] = count(); exit(); }'
+  Attaching 1 probe...
+  @[
+      __schedule+1059
+      __schedule+1059
+      schedule+79
+      schedule_hrtimeout_range_clock+163
+      schedule_hrtimeout_range+17
+      ep_poll+471
+      SyS_epoll_wait+111
+      sys_epoll_pwait+231
+      __ret_fast_syscall+1
+  ]: 1
+
+Link: https://lore.kernel.org/r/20220920230728.2617421-1-tnovak@fb.com/
+
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Tomislav Novak <tnovak@fb.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/include/asm/perf_event.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
+index 4f9dec489931..c5d27140834e 100644
+--- a/arch/arm/include/asm/perf_event.h
++++ b/arch/arm/include/asm/perf_event.h
+@@ -21,7 +21,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
+ #define perf_arch_fetch_caller_regs(regs, __ip) { \
+       (regs)->ARM_pc = (__ip); \
+-      (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
++      frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
+       (regs)->ARM_sp = current_stack_pointer; \
+       (regs)->ARM_cpsr = SVC_MODE; \
+ }
+-- 
+2.35.1
+
diff --git a/queue-4.14/arm-9266-1-mm-fix-no-mmu-zero_page-implementation.patch b/queue-4.14/arm-9266-1-mm-fix-no-mmu-zero_page-implementation.patch
new file mode 100644 (file)
index 0000000..a47d84f
--- /dev/null
@@ -0,0 +1,141 @@
+From 01374cb769582d7f931f29706cfa97b81c1d65cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Nov 2022 21:46:18 +0100
+Subject: ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation
+
+From: Giulio Benetti <giulio.benetti@benettiengineering.com>
+
+[ Upstream commit 340a982825f76f1cff0daa605970fe47321b5ee7 ]
+
+Actually in no-MMU SoCs(i.e. i.MXRT) ZERO_PAGE(vaddr) expands to
+```
+virt_to_page(0)
+```
+that in order expands to:
+```
+pfn_to_page(virt_to_pfn(0))
+```
+and then virt_to_pfn(0) to:
+```
+        ((((unsigned long)(0) - PAGE_OFFSET) >> PAGE_SHIFT) +
+         PHYS_PFN_OFFSET)
+```
+where PAGE_OFFSET and PHYS_PFN_OFFSET are the DRAM offset(0x80000000) and
+PAGE_SHIFT is 12. This way we obtain 16MB(0x01000000) summed to the base of
+DRAM(0x80000000).
+When ZERO_PAGE(0) is then used, for example in bio_add_page(), the page
+gets an address that is out of DRAM bounds.
+So instead of using fake virtual page 0 let's allocate a dedicated
+zero_page during paging_init() and assign it to a global 'struct page *
+empty_zero_page' the same way mmu.c does and it's the same approach used
+in m68k with commit dc068f462179 as discussed here[0]. Then let's move
+ZERO_PAGE() definition to the top of pgtable.h to be in common between
+mmu.c and nommu.c.
+
+[0]: https://lore.kernel.org/linux-m68k/2a462b23-5b8e-bbf4-ec7d-778434a3b9d7@google.com/T/#m1266ceb63
+ad140743174d6b3070364d3c9a5179b
+
+Signed-off-by: Giulio Benetti <giulio.benetti@benettiengineering.com>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/include/asm/pgtable-nommu.h |  6 ------
+ arch/arm/include/asm/pgtable.h       | 16 +++++++++-------
+ arch/arm/mm/nommu.c                  | 19 +++++++++++++++++++
+ 3 files changed, 28 insertions(+), 13 deletions(-)
+
+diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
+index a0d726a47c8a..e7ca798513c1 100644
+--- a/arch/arm/include/asm/pgtable-nommu.h
++++ b/arch/arm/include/asm/pgtable-nommu.h
+@@ -54,12 +54,6 @@
+ typedef pte_t *pte_addr_t;
+-/*
+- * ZERO_PAGE is a global shared page that is always zero: used
+- * for zero-mapped memory areas etc..
+- */
+-#define ZERO_PAGE(vaddr)      (virt_to_page(0))
+-
+ /*
+  * Mark the prot value as uncacheable and unbufferable.
+  */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 1c462381c225..ce89af40651d 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -13,6 +13,15 @@
+ #include <linux/const.h>
+ #include <asm/proc-fns.h>
++#ifndef __ASSEMBLY__
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern struct page *empty_zero_page;
++#define ZERO_PAGE(vaddr)      (empty_zero_page)
++#endif
++
+ #ifndef CONFIG_MMU
+ #include <asm-generic/4level-fixup.h>
+@@ -166,13 +175,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ #define __S111  __PAGE_SHARED_EXEC
+ #ifndef __ASSEMBLY__
+-/*
+- * ZERO_PAGE is a global shared page that is always zero: used
+- * for zero-mapped memory areas etc..
+- */
+-extern struct page *empty_zero_page;
+-#define ZERO_PAGE(vaddr)      (empty_zero_page)
+-
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index 91537d90f5f5..d58dd8af2cf0 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -25,6 +25,13 @@
+ unsigned long vectors_base;
++/*
++ * empty_zero_page is a special page that is used for
++ * zero-initialized data and COW.
++ */
++struct page *empty_zero_page;
++EXPORT_SYMBOL(empty_zero_page);
++
+ #ifdef CONFIG_ARM_MPU
+ struct mpu_rgn_info mpu_rgn_info;
+@@ -366,9 +373,21 @@ void __init adjust_lowmem_bounds(void)
+  */
+ void __init paging_init(const struct machine_desc *mdesc)
+ {
++      void *zero_page;
++
+       early_trap_init((void *)vectors_base);
+       mpu_setup();
++
++      /* allocate the zero page. */
++      zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++      if (!zero_page)
++              panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
++                    __func__, PAGE_SIZE, PAGE_SIZE);
++
+       bootmem_init();
++
++      empty_zero_page = virt_to_page(zero_page);
++      flush_dcache_page(empty_zero_page);
+ }
+ /*
+-- 
+2.35.1
+
diff --git a/queue-4.14/arm-dts-rockchip-disable-arm_global_timer-on-rk3066-.patch b/queue-4.14/arm-dts-rockchip-disable-arm_global_timer-on-rk3066-.patch
new file mode 100644 (file)
index 0000000..82edd35
--- /dev/null
@@ -0,0 +1,64 @@
+From 64973bb120fd94528913a1574e390f5540532ccf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 30 Oct 2022 21:56:29 +0100
+Subject: ARM: dts: rockchip: disable arm_global_timer on rk3066 and rk3188
+
+From: Johan Jonker <jbx6244@gmail.com>
+
+[ Upstream commit da74858a475782a3f16470907814c8cc5950ad68 ]
+
+The clock source and the sched_clock provided by the arm_global_timer
+on Rockchip rk3066a/rk3188 are quite unstable because their rates
+depend on the CPU frequency.
+
+Recent changes to the arm_global_timer driver makes it impossible to use.
+
+On the other side, the arm_global_timer has a higher rating than the
+ROCKCHIP_TIMER, it will be selected by default by the time framework
+while we want to use the stable Rockchip clock source.
+
+Keep the arm_global_timer disabled in order to have the
+DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
+
+Signed-off-by: Johan Jonker <jbx6244@gmail.com>
+Link: https://lore.kernel.org/r/f275ca8d-fd0a-26e5-b978-b7f3df815e0a@gmail.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/rk3188.dtsi | 1 -
+ arch/arm/boot/dts/rk3xxx.dtsi | 7 +++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
+index 74eb1dfa2f6c..3689a23a1bca 100644
+--- a/arch/arm/boot/dts/rk3188.dtsi
++++ b/arch/arm/boot/dts/rk3188.dtsi
+@@ -546,7 +546,6 @@
+ &global_timer {
+       interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+-      status = "disabled";
+ };
+ &local_timer {
+diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
+index 4aa6f60d6a22..5f9950704f13 100644
+--- a/arch/arm/boot/dts/rk3xxx.dtsi
++++ b/arch/arm/boot/dts/rk3xxx.dtsi
+@@ -134,6 +134,13 @@
+               reg = <0x1013c200 0x20>;
+               interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
+               clocks = <&cru CORE_PERI>;
++              status = "disabled";
++              /* The clock source and the sched_clock provided by the arm_global_timer
++               * on Rockchip rk3066a/rk3188 are quite unstable because their rates
++               * depend on the CPU frequency.
++               * Keep the arm_global_timer disabled in order to have the
++               * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
++               */
+       };
+       local_timer: local-timer@1013c600 {
+-- 
+2.35.1
+
diff --git a/queue-4.14/arm-dts-rockchip-fix-ir-receiver-node-names.patch b/queue-4.14/arm-dts-rockchip-fix-ir-receiver-node-names.patch
new file mode 100644 (file)
index 0000000..b1d8dd9
--- /dev/null
@@ -0,0 +1,36 @@
+From 090ed6059586aefeb6b4fba56a1060ad2a332a92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Oct 2022 10:58:22 +0200
+Subject: ARM: dts: rockchip: fix ir-receiver node names
+
+From: Johan Jonker <jbx6244@gmail.com>
+
+[ Upstream commit dd847fe34cdf1e89afed1af24986359f13082bfb ]
+
+Fix ir-receiver node names on Rockchip boards,
+so that they match with regex: '^ir(-receiver)?(@[a-f0-9]+)?$'
+
+Signed-off-by: Johan Jonker <jbx6244@gmail.com>
+Link: https://lore.kernel.org/r/ea5af279-f44c-afea-023d-bb37f5a0d58d@gmail.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/rk3188-radxarock.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
+index 541a798d3d20..3844fbd84ba7 100644
+--- a/arch/arm/boot/dts/rk3188-radxarock.dts
++++ b/arch/arm/boot/dts/rk3188-radxarock.dts
+@@ -104,7 +104,7 @@
+               #sound-dai-cells = <0>;
+       };
+-      ir_recv: gpio-ir-receiver {
++      ir_recv: ir-receiver {
+               compatible = "gpio-ir-receiver";
+               gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+               pinctrl-names = "default";
+-- 
+2.35.1
+
diff --git a/queue-4.14/arm-dts-rockchip-fix-node-name-for-hym8563-rtc.patch b/queue-4.14/arm-dts-rockchip-fix-node-name-for-hym8563-rtc.patch
new file mode 100644 (file)
index 0000000..94845c2
--- /dev/null
@@ -0,0 +1,91 @@
+From 8e010cefdccf2afa9220fc1f9686a348880f020a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Oct 2022 18:55:46 +0200
+Subject: arm: dts: rockchip: fix node name for hym8563 rtc
+
+From: Sebastian Reichel <sebastian.reichel@collabora.com>
+
+[ Upstream commit 17b57beafccb4569accbfc8c11390744cf59c021 ]
+
+Fix the node name for hym8563 in all arm rockchip devicetrees.
+
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Link: https://lore.kernel.org/r/20221024165549.74574-4-sebastian.reichel@collabora.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/rk3036-evb.dts          | 2 +-
+ arch/arm/boot/dts/rk3288-evb-act8846.dts  | 2 +-
+ arch/arm/boot/dts/rk3288-firefly.dtsi     | 2 +-
+ arch/arm/boot/dts/rk3288-miqi.dts         | 2 +-
+ arch/arm/boot/dts/rk3288-rock2-square.dts | 2 +-
+ 5 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts
+index c0953410121b..41309de6c91d 100644
+--- a/arch/arm/boot/dts/rk3036-evb.dts
++++ b/arch/arm/boot/dts/rk3036-evb.dts
+@@ -69,7 +69,7 @@
+ &i2c1 {
+       status = "okay";
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
+index b9418d170502..e5231ecb8279 100644
+--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
++++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
+@@ -91,7 +91,7 @@
+               vin-supply = <&vcc_sys>;
+       };
+-      hym8563@51 {
++      rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
+index b9e6f3a97240..5b14e9105070 100644
+--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
++++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
+@@ -270,7 +270,7 @@
+               vin-supply = <&vcc_sys>;
+       };
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
+index 4d923aa6ed11..2fd39bbf0b01 100644
+--- a/arch/arm/boot/dts/rk3288-miqi.dts
++++ b/arch/arm/boot/dts/rk3288-miqi.dts
+@@ -183,7 +183,7 @@
+               vin-supply = <&vcc_sys>;
+       };
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
+index 0e084b8a86ac..6011b117ab68 100644
+--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
++++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
+@@ -177,7 +177,7 @@
+ };
+ &i2c0 {
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+-- 
+2.35.1
+
diff --git a/queue-4.14/asoc-soc-pcm-add-null-check-in-be-reparenting.patch b/queue-4.14/asoc-soc-pcm-add-null-check-in-be-reparenting.patch
new file mode 100644 (file)
index 0000000..1dc6ab2
--- /dev/null
@@ -0,0 +1,37 @@
+From 5ba1ac88c85bda68782432cbb83ae9c0135ce695 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Nov 2022 12:01:13 +0530
+Subject: ASoC: soc-pcm: Add NULL check in BE reparenting
+
+From: Srinivasa Rao Mandadapu <quic_srivasam@quicinc.com>
+
+[ Upstream commit db8f91d424fe0ea6db337aca8bc05908bbce1498 ]
+
+Add NULL check in dpcm_be_reparent API, to handle
+kernel NULL pointer dereference error.
+The issue occurred in fuzzing test.
+
+Signed-off-by: Srinivasa Rao Mandadapu <quic_srivasam@quicinc.com>
+Link: https://lore.kernel.org/r/1669098673-29703-1-git-send-email-quic_srivasam@quicinc.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/soc-pcm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index e995e96ab903..3a9c875534c1 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1168,6 +1168,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
+               return;
+       be_substream = snd_soc_dpcm_get_substream(be, stream);
++      if (!be_substream)
++              return;
+       list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+               if (dpcm->fe == fe)
+-- 
+2.35.1
+
diff --git a/queue-4.14/mm-khugepaged-fix-gup-fast-interaction-by-sending-ip.patch b/queue-4.14/mm-khugepaged-fix-gup-fast-interaction-by-sending-ip.patch
new file mode 100644 (file)
index 0000000..d21dfcc
--- /dev/null
@@ -0,0 +1,97 @@
+From 165bce335b9ddb3d8c8d79c4977184bd34631720 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Dec 2022 18:16:02 +0100
+Subject: mm/khugepaged: fix GUP-fast interaction by sending IPI
+
+From: Jann Horn <jannh@google.com>
+
+commit 2ba99c5e08812494bc57f319fb562f527d9bacd8 upstream.
+
+Since commit 70cbc3cc78a99 ("mm: gup: fix the fast GUP race against THP
+collapse"), the lockless_pages_from_mm() fastpath rechecks the pmd_t to
+ensure that the page table was not removed by khugepaged in between.
+
+However, lockless_pages_from_mm() still requires that the page table is
+not concurrently freed.  Fix it by sending IPIs (if the architecture uses
+semi-RCU-style page table freeing) before freeing/reusing page tables.
+
+Link: https://lkml.kernel.org/r/20221129154730.2274278-2-jannh@google.com
+Link: https://lkml.kernel.org/r/20221128180252.1684965-2-jannh@google.com
+Link: https://lkml.kernel.org/r/20221125213714.4115729-2-jannh@google.com
+Fixes: ba76149f47d8 ("thp: khugepaged")
+Signed-off-by: Jann Horn <jannh@google.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[manual backport: two of the three places in khugepaged that can free
+ptes were refactored into a common helper between 5.15 and 6.0;
+TLB flushing was refactored between 5.4 and 5.10;
+TLB flushing was refactored between 4.19 and 5.4;
+pmd collapse for PTE-mapped THP was only added in 5.4]
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/asm-generic/tlb.h | 6 ++++++
+ mm/khugepaged.c           | 2 ++
+ mm/memory.c               | 5 +++++
+ 3 files changed, 13 insertions(+)
+
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index 5e7e4aaf36c5..43409a047480 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -60,6 +60,12 @@ struct mmu_table_batch {
+ extern void tlb_table_flush(struct mmu_gather *tlb);
+ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
++void tlb_remove_table_sync_one(void);
++
++#else
++
++static inline void tlb_remove_table_sync_one(void) { }
++
+ #endif
+ /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index f426d42d629d..f67c02010add 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1046,6 +1046,7 @@ static void collapse_huge_page(struct mm_struct *mm,
+       _pmd = pmdp_collapse_flush(vma, address, pmd);
+       spin_unlock(pmd_ptl);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
++      tlb_remove_table_sync_one();
+       spin_lock(pte_ptl);
+       isolated = __collapse_huge_page_isolate(vma, address, pte);
+@@ -1295,6 +1296,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+                               _pmd = pmdp_collapse_flush(vma, addr, pmd);
+                               spin_unlock(ptl);
+                               atomic_long_dec(&mm->nr_ptes);
++                              tlb_remove_table_sync_one();
+                               pte_free(mm, pmd_pgtable(_pmd));
+                       }
+                       up_write(&mm->mmap_sem);
+diff --git a/mm/memory.c b/mm/memory.c
+index 615cb3fe763d..0136af15ba18 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -373,6 +373,11 @@ static void tlb_remove_table_smp_sync(void *arg)
+       /* Simply deliver the interrupt */
+ }
++void tlb_remove_table_sync_one(void)
++{
++      smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
++}
++
+ static void tlb_remove_table_one(void *table)
+ {
+       /*
+-- 
+2.35.1
+
diff --git a/queue-4.14/mm-khugepaged-invoke-mmu-notifiers-in-shmem-file-col.patch b/queue-4.14/mm-khugepaged-invoke-mmu-notifiers-in-shmem-file-col.patch
new file mode 100644 (file)
index 0000000..7763bdc
--- /dev/null
@@ -0,0 +1,72 @@
+From ce9077a52b4724eecb73b6c1e7bc2a4d3c11b996 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Dec 2022 18:16:03 +0100
+Subject: mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths
+
+From: Jann Horn <jannh@google.com>
+
+commit f268f6cf875f3220afc77bdd0bf1bb136eb54db9 upstream.
+
+Any codepath that zaps page table entries must invoke MMU notifiers to
+ensure that secondary MMUs (like KVM) don't keep accessing pages which
+aren't mapped anymore.  Secondary MMUs don't hold their own references to
+pages that are mirrored over, so failing to notify them can lead to page
+use-after-free.
+
+I'm marking this as addressing an issue introduced in commit f3f0e1d2150b
+("khugepaged: add support of collapse for tmpfs/shmem pages"), but most of
+the security impact of this only came in commit 27e1f8273113 ("khugepaged:
+enable collapse pmd for pte-mapped THP"), which actually omitted flushes
+for the removal of present PTEs, not just for the removal of empty page
+tables.
+
+Link: https://lkml.kernel.org/r/20221129154730.2274278-3-jannh@google.com
+Link: https://lkml.kernel.org/r/20221128180252.1684965-3-jannh@google.com
+Link: https://lkml.kernel.org/r/20221125213714.4115729-3-jannh@google.com
+Fixes: f3f0e1d2150b ("khugepaged: add support of collapse for tmpfs/shmem pages")
+Signed-off-by: Jann Horn <jannh@google.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[manual backport: this code was refactored from two copies into a common
+helper between 5.15 and 6.0;
+pmd collapse for PTE-mapped THP was only added in 5.4;
+MMU notifier API changed between 4.19 and 5.4]
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/khugepaged.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index f67c02010add..7ad88b9e5a65 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1291,13 +1291,20 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+                */
+               if (down_write_trylock(&mm->mmap_sem)) {
+                       if (!khugepaged_test_exit(mm)) {
+-                              spinlock_t *ptl = pmd_lock(mm, pmd);
++                              spinlock_t *ptl;
++                              unsigned long end = addr + HPAGE_PMD_SIZE;
++
++                              mmu_notifier_invalidate_range_start(mm, addr,
++                                                                  end);
++                              ptl = pmd_lock(mm, pmd);
+                               /* assume page table is clear */
+                               _pmd = pmdp_collapse_flush(vma, addr, pmd);
+                               spin_unlock(ptl);
+                               atomic_long_dec(&mm->nr_ptes);
+                               tlb_remove_table_sync_one();
+                               pte_free(mm, pmd_pgtable(_pmd));
++                              mmu_notifier_invalidate_range_end(mm, addr,
++                                                                end);
+                       }
+                       up_write(&mm->mmap_sem);
+               }
+-- 
+2.35.1
+
diff --git a/queue-4.14/net-usb-qmi_wwan-add-u-blox-0x1342-composition.patch b/queue-4.14/net-usb-qmi_wwan-add-u-blox-0x1342-composition.patch
new file mode 100644 (file)
index 0000000..416a700
--- /dev/null
@@ -0,0 +1,53 @@
+From d5d895ad9fbaca45757a2ef1e3df1d30682f1c71 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Nov 2022 13:54:55 +0100
+Subject: net: usb: qmi_wwan: add u-blox 0x1342 composition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Davide Tronchin <davide.tronchin.94@gmail.com>
+
+[ Upstream commit a487069e11b6527373f7c6f435d8998051d0b5d9 ]
+
+Add RmNet support for LARA-L6.
+
+LARA-L6 module can be configured (by AT interface) in three different
+USB modes:
+* Default mode (Vendor ID: 0x1546 Product ID: 0x1341) with 4 serial
+interfaces
+* RmNet mode (Vendor ID: 0x1546 Product ID: 0x1342) with 4 serial
+interfaces and 1 RmNet virtual network interface
+* CDC-ECM mode (Vendor ID: 0x1546 Product ID: 0x1343) with 4 serial
+interface and 1 CDC-ECM virtual network interface
+
+In RmNet mode LARA-L6 exposes the following interfaces:
+If 0: Diagnostic
+If 1: AT parser
+If 2: AT parser
+If 3: AT parset/alternative functions
+If 4: RMNET interface
+
+Signed-off-by: Davide Tronchin <davide.tronchin.94@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 74040db959d8..a2c3f5ee1780 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1366,6 +1366,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
+       {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with eSIM support*/
+       {QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
++      {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+-- 
+2.35.1
+
diff --git a/queue-4.14/rcutorture-automatically-create-initrd-directory.patch b/queue-4.14/rcutorture-automatically-create-initrd-directory.patch
new file mode 100644 (file)
index 0000000..44b706d
--- /dev/null
@@ -0,0 +1,118 @@
+From 1515d0b649ad10c0b41cdcbce3b19436633be0d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Aug 2018 14:16:46 -0700
+Subject: rcutorture: Automatically create initrd directory
+
+From: Connor Shu <Connor.Shu@ibm.com>
+
+[ Upstream commit 8f15c682ac5a778feb8e343f9057b89beb40d85b ]
+
+The rcutorture scripts currently expect the user to create the
+tools/testing/selftests/rcutorture/initrd directory.  Should the user
+fail to do this, the kernel build will fail with obscure and confusing
+error messages.  This commit therefore adds explicit checks for the
+tools/testing/selftests/rcutorture/initrd directory, and if not present,
+creates one on systems on which dracut is installed.  If this directory
+could not be created, a less obscure error message is emitted and the
+test is aborted.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Connor Shu <Connor.Shu@ibm.com>
+[ paulmck: Adapt the script to fit into the rcutorture framework and
+  severely abbreviate the initrd/init script. ]
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/rcutorture/bin/kvm.sh |  8 +++
+ .../selftests/rcutorture/bin/mkinitrd.sh      | 60 +++++++++++++++++++
+ 2 files changed, 68 insertions(+)
+ create mode 100755 tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+
+diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
+index b55895fb10ed..2299347b8e37 100755
+--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
++++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
+@@ -182,6 +182,14 @@ do
+       shift
+ done
++if test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh
++then
++      :
++else
++      echo No initrd and unable to create one, aborting test >&2
++      exit 1
++fi
++
+ CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
+ if test -z "$configs"
+diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+new file mode 100755
+index 000000000000..ae773760f396
+--- /dev/null
++++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+@@ -0,0 +1,60 @@
++#!/bin/bash
++#
++# Create an initrd directory if one does not already exist.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++# Copyright (C) IBM Corporation, 2013
++#
++# Author: Connor Shu <Connor.Shu@ibm.com>
++
++D=tools/testing/selftests/rcutorture
++
++# Prerequisite checks
++[ -z "$D" ] && echo >&2 "No argument supplied" && exit 1
++if [ ! -d "$D" ]; then
++    echo >&2 "$D does not exist: Malformed kernel source tree?"
++    exit 1
++fi
++if [ -d "$D/initrd" ]; then
++    echo "$D/initrd already exists, no need to create it"
++    exit 0
++fi
++
++T=${TMPDIR-/tmp}/mkinitrd.sh.$$
++trap 'rm -rf $T' 0 2
++mkdir $T
++
++cat > $T/init << '__EOF___'
++#!/bin/sh
++while :
++do
++      sleep 1000000
++done
++__EOF___
++
++# Try using dracut to create initrd
++command -v dracut >/dev/null 2>&1 || { echo >&2 "Dracut not installed"; exit 1; }
++echo Creating $D/initrd using dracut.
++
++# Filesystem creation
++dracut --force --no-hostonly --no-hostonly-cmdline --module "base" $T/initramfs.img
++cd $D
++mkdir initrd
++cd initrd
++zcat $T/initramfs.img | cpio -id
++cp $T/init init
++echo Done creating $D/initrd using dracut
++exit 0
+-- 
+2.35.1
+
diff --git a/queue-4.14/regulator-twl6030-fix-get-status-of-twl6032-regulato.patch b/queue-4.14/regulator-twl6030-fix-get-status-of-twl6032-regulato.patch
new file mode 100644 (file)
index 0000000..d60821f
--- /dev/null
@@ -0,0 +1,69 @@
+From 3051a01bb9c0b58315e4a58bc245ab132d860c71 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 20 Nov 2022 23:12:08 +0100
+Subject: regulator: twl6030: fix get status of twl6032 regulators
+
+From: Andreas Kemnade <andreas@kemnade.info>
+
+[ Upstream commit 31a6297b89aabc81b274c093a308a7f5b55081a7 ]
+
+Status is reported as always off in the 6032 case. Status
+reporting now matches the logic in the setters. Once of
+the differences to the 6030 is that there are no groups,
+therefore the state needs to be read out in the lower bits.
+
+Signed-off-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://lore.kernel.org/r/20221120221208.3093727-3-andreas@kemnade.info
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/twl6030-regulator.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
+index 219cbd910dbf..485d25f683d8 100644
+--- a/drivers/regulator/twl6030-regulator.c
++++ b/drivers/regulator/twl6030-regulator.c
+@@ -71,6 +71,7 @@ struct twlreg_info {
+ #define TWL6030_CFG_STATE_SLEEP       0x03
+ #define TWL6030_CFG_STATE_GRP_SHIFT   5
+ #define TWL6030_CFG_STATE_APP_SHIFT   2
++#define TWL6030_CFG_STATE_MASK                0x03
+ #define TWL6030_CFG_STATE_APP_MASK    (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+ #define TWL6030_CFG_STATE_APP(v)      (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+                                               TWL6030_CFG_STATE_APP_SHIFT)
+@@ -131,13 +132,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
+               if (grp < 0)
+                       return grp;
+               grp &= P1_GRP_6030;
++              val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
++              val = TWL6030_CFG_STATE_APP(val);
+       } else {
++              val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
++              val &= TWL6030_CFG_STATE_MASK;
+               grp = 1;
+       }
+-      val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+-      val = TWL6030_CFG_STATE_APP(val);
+-
+       return grp && (val == TWL6030_CFG_STATE_ON);
+ }
+@@ -190,7 +192,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
+       val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+-      switch (TWL6030_CFG_STATE_APP(val)) {
++      if (info->features & TWL6032_SUBCLASS)
++              val &= TWL6030_CFG_STATE_MASK;
++      else
++              val = TWL6030_CFG_STATE_APP(val);
++
++      switch (val) {
+       case TWL6030_CFG_STATE_ON:
+               return REGULATOR_STATUS_NORMAL;
+-- 
+2.35.1
+
diff --git a/queue-4.14/series b/queue-4.14/series
new file mode 100644 (file)
index 0000000..8d8df0f
--- /dev/null
@@ -0,0 +1,15 @@
+arm-dts-rockchip-fix-node-name-for-hym8563-rtc.patch
+arm-dts-rockchip-fix-ir-receiver-node-names.patch
+arm-9251-1-perf-fix-stacktraces-for-tracepoint-event.patch
+arm-9266-1-mm-fix-no-mmu-zero_page-implementation.patch
+arm-dts-rockchip-disable-arm_global_timer-on-rk3066-.patch
+alsa-seq-fix-function-prototype-mismatch-in-snd_seq_.patch
+asoc-soc-pcm-add-null-check-in-be-reparenting.patch
+regulator-twl6030-fix-get-status-of-twl6032-regulato.patch
+net-usb-qmi_wwan-add-u-blox-0x1342-composition.patch
+mm-khugepaged-fix-gup-fast-interaction-by-sending-ip.patch
+mm-khugepaged-invoke-mmu-notifiers-in-shmem-file-col.patch
+xen-netback-ensure-protocol-headers-don-t-fall-in-th.patch
+xen-netback-do-some-code-cleanup.patch
+xen-netback-don-t-call-kfree_skb-with-interrupts-dis.patch
+rcutorture-automatically-create-initrd-directory.patch
diff --git a/queue-4.14/xen-netback-do-some-code-cleanup.patch b/queue-4.14/xen-netback-do-some-code-cleanup.patch
new file mode 100644 (file)
index 0000000..d6d2d1d
--- /dev/null
@@ -0,0 +1,147 @@
+From 965adc1b298c94c0e4ce18a29fe12dc281a73785 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jun 2022 06:37:26 +0200
+Subject: xen/netback: do some code cleanup
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit 5834e72eda0b7e5767eb107259d98eef19ebd11f ]
+
+Remove some unused macros and functions, make local functions static.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Acked-by: Wei Liu <wei.liu@kernel.org>
+Link: https://lore.kernel.org/r/20220608043726.9380-1-jgross@suse.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 74e7e1efdad4 ("xen/netback: don't call kfree_skb() with interrupts disabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/xen-netback/common.h    | 12 ------------
+ drivers/net/xen-netback/interface.c | 16 +---------------
+ drivers/net/xen-netback/netback.c   |  4 +++-
+ drivers/net/xen-netback/rx.c        |  2 +-
+ 4 files changed, 5 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index bfa3c6aaebe6..4ef648f79993 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -48,7 +48,6 @@
+ #include <linux/debugfs.h>
+ typedef unsigned int pending_ring_idx_t;
+-#define INVALID_PENDING_RING_IDX (~0U)
+ struct pending_tx_info {
+       struct xen_netif_tx_request req; /* tx request */
+@@ -82,8 +81,6 @@ struct xenvif_rx_meta {
+ /* Discriminate from any valid pending_idx value. */
+ #define INVALID_PENDING_IDX 0xFFFF
+-#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
+-
+ #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
+ /* The maximum number of frags is derived from the size of a grant (same
+@@ -345,11 +342,6 @@ void xenvif_free(struct xenvif *vif);
+ int xenvif_xenbus_init(void);
+ void xenvif_xenbus_fini(void);
+-int xenvif_schedulable(struct xenvif *vif);
+-
+-int xenvif_queue_stopped(struct xenvif_queue *queue);
+-void xenvif_wake_queue(struct xenvif_queue *queue);
+-
+ /* (Un)Map communication rings. */
+ void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
+ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+@@ -372,7 +364,6 @@ int xenvif_dealloc_kthread(void *data);
+ irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+ bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
+-void xenvif_rx_action(struct xenvif_queue *queue);
+ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+ void xenvif_carrier_on(struct xenvif *vif);
+@@ -380,9 +371,6 @@ void xenvif_carrier_on(struct xenvif *vif);
+ /* Callback from stack when TX packet can be released */
+ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+-/* Unmap a pending page and release it back to the guest */
+-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+-
+ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
+ {
+       return MAX_PENDING_REQS -
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 8ec25a5f1ee9..c3f64ca0bb63 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
+       wake_up(&queue->dealloc_wq);
+ }
+-int xenvif_schedulable(struct xenvif *vif)
++static int xenvif_schedulable(struct xenvif *vif)
+ {
+       return netif_running(vif->dev) &&
+               test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
+@@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+       return IRQ_HANDLED;
+ }
+-int xenvif_queue_stopped(struct xenvif_queue *queue)
+-{
+-      struct net_device *dev = queue->vif->dev;
+-      unsigned int id = queue->id;
+-      return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
+-}
+-
+-void xenvif_wake_queue(struct xenvif_queue *queue)
+-{
+-      struct net_device *dev = queue->vif->dev;
+-      unsigned int id = queue->id;
+-      netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
+-}
+-
+ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+                              void *accel_priv,
+                              select_queue_fallback_t fallback)
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 6ad36f1dcb44..665c96f4d7e4 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -105,6 +105,8 @@ static void make_tx_response(struct xenvif_queue *queue,
+                            s8       st);
+ static void push_tx_responses(struct xenvif_queue *queue);
++static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
++
+ static inline int tx_work_todo(struct xenvif_queue *queue);
+ static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
+@@ -1426,7 +1428,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
+               notify_remote_via_irq(queue->tx_irq);
+ }
+-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
++static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+ {
+       int ret;
+       struct gnttab_unmap_grant_ref tx_unmap_op;
+diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
+index 2612810eadaf..6964f8b1a36b 100644
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -473,7 +473,7 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
+ #define RX_BATCH_SIZE 64
+-void xenvif_rx_action(struct xenvif_queue *queue)
++static void xenvif_rx_action(struct xenvif_queue *queue)
+ {
+       struct sk_buff_head completed_skbs;
+       unsigned int work_done = 0;
+-- 
+2.35.1
+
diff --git a/queue-4.14/xen-netback-don-t-call-kfree_skb-with-interrupts-dis.patch b/queue-4.14/xen-netback-don-t-call-kfree_skb-with-interrupts-dis.patch
new file mode 100644 (file)
index 0000000..7c5bc67
--- /dev/null
@@ -0,0 +1,105 @@
+From e69ec9a90510c0e8002dee90fa3ef7f7a10a1712 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Dec 2022 08:54:24 +0100
+Subject: xen/netback: don't call kfree_skb() with interrupts disabled
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit 74e7e1efdad45580cc3839f2a155174cf158f9b5 ]
+
+It is not allowed to call kfree_skb() from hardware interrupt
+context or with interrupts being disabled. So remove kfree_skb()
+from the spin_lock_irqsave() section and use the already existing
+"drop" label in xenvif_start_xmit() for dropping the SKB. At the
+same time replace the dev_kfree_skb() call there with a call of
+dev_kfree_skb_any(), as xenvif_start_xmit() can be called with
+disabled interrupts.
+
+This is XSA-424 / CVE-2022-42328 / CVE-2022-42329.
+
+Fixes: be81992f9086 ("xen/netback: don't queue unlimited number of packages")
+Reported-by: Yang Yingliang <yangyingliang@huawei.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/xen-netback/common.h    | 2 +-
+ drivers/net/xen-netback/interface.c | 6 ++++--
+ drivers/net/xen-netback/rx.c        | 8 +++++---
+ 3 files changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 4ef648f79993..e5f254500c1c 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -364,7 +364,7 @@ int xenvif_dealloc_kthread(void *data);
+ irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+ bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
+-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
++bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+ void xenvif_carrier_on(struct xenvif *vif);
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index c3f64ca0bb63..c8e551932666 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+               skb_clear_hash(skb);
+-      xenvif_rx_queue_tail(queue, skb);
++      if (!xenvif_rx_queue_tail(queue, skb))
++              goto drop;
++
+       xenvif_kick_thread(queue);
+       return NETDEV_TX_OK;
+  drop:
+       vif->dev->stats.tx_dropped++;
+-      dev_kfree_skb(skb);
++      dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
+index 6964f8b1a36b..5067fa0c751f 100644
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+       return false;
+ }
+-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
++bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+ {
+       unsigned long flags;
++      bool ret = true;
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+               struct net_device *dev = queue->vif->dev;
+               netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+-              kfree_skb(skb);
+-              queue->vif->dev->stats.rx_dropped++;
++              ret = false;
+       } else {
+               if (skb_queue_empty(&queue->rx_queue))
+                       xenvif_update_needed_slots(queue, skb);
+@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+       }
+       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
++
++      return ret;
+ }
+ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+-- 
+2.35.1
+
diff --git a/queue-4.14/xen-netback-ensure-protocol-headers-don-t-fall-in-th.patch b/queue-4.14/xen-netback-ensure-protocol-headers-don-t-fall-in-th.patch
new file mode 100644 (file)
index 0000000..ae8fa42
--- /dev/null
@@ -0,0 +1,390 @@
+From cbccac1f0b4dc92a9af97415ba7b3b05ebbad1ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Nov 2022 09:16:59 +0000
+Subject: xen/netback: Ensure protocol headers don't fall in the non-linear
+ area
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+[ Upstream commit ad7f402ae4f466647c3a669b8a6f3e5d4271c84a ]
+
+In some cases, the frontend may send a packet where the protocol headers
+are spread across multiple slots. This would result in netback creating
+an skb where the protocol headers spill over into the non-linear area.
+Some drivers and NICs don't handle this properly resulting in an
+interface reset or worse.
+
+This issue was introduced by the removal of an unconditional skb pull in
+the tx path to improve performance.  Fix this without reintroducing the
+pull by setting up grant copy ops for as many slots as needed to reach
+the XEN_NETBACK_TX_COPY_LEN size. Adjust the rest of the code to handle
+multiple copy operations per skb.
+
+This is XSA-423 / CVE-2022-3643.
+
+Fixes: 7e5d7753956b ("xen-netback: remove unconditional __pskb_pull_tail() in guest Tx path")
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/xen-netback/netback.c | 223 ++++++++++++++++--------------
+ 1 file changed, 123 insertions(+), 100 deletions(-)
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index e1d6dbb4b770..6ad36f1dcb44 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -323,10 +323,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
+ struct xenvif_tx_cb {
+-      u16 pending_idx;
++      u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
++      u8 copy_count;
+ };
+ #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
++#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
++#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
+ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
+                                          u16 pending_idx,
+@@ -361,31 +364,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+       return skb;
+ }
+-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
+-                                                      struct sk_buff *skb,
+-                                                      struct xen_netif_tx_request *txp,
+-                                                      struct gnttab_map_grant_ref *gop,
+-                                                      unsigned int frag_overflow,
+-                                                      struct sk_buff *nskb)
++static void xenvif_get_requests(struct xenvif_queue *queue,
++                              struct sk_buff *skb,
++                              struct xen_netif_tx_request *first,
++                              struct xen_netif_tx_request *txfrags,
++                              unsigned *copy_ops,
++                              unsigned *map_ops,
++                              unsigned int frag_overflow,
++                              struct sk_buff *nskb,
++                              unsigned int extra_count,
++                              unsigned int data_len)
+ {
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       skb_frag_t *frags = shinfo->frags;
+-      u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+-      int start;
++      u16 pending_idx;
+       pending_ring_idx_t index;
+       unsigned int nr_slots;
++      struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
++      struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
++      struct xen_netif_tx_request *txp = first;
++
++      nr_slots = shinfo->nr_frags + 1;
++
++      copy_count(skb) = 0;
++
++      /* Create copy ops for exactly data_len bytes into the skb head. */
++      __skb_put(skb, data_len);
++      while (data_len > 0) {
++              int amount = data_len > txp->size ? txp->size : data_len;
++
++              cop->source.u.ref = txp->gref;
++              cop->source.domid = queue->vif->domid;
++              cop->source.offset = txp->offset;
++
++              cop->dest.domid = DOMID_SELF;
++              cop->dest.offset = (offset_in_page(skb->data +
++                                                 skb_headlen(skb) -
++                                                 data_len)) & ~XEN_PAGE_MASK;
++              cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
++                                             - data_len);
++
++              cop->len = amount;
++              cop->flags = GNTCOPY_source_gref;
+-      nr_slots = shinfo->nr_frags;
++              index = pending_index(queue->pending_cons);
++              pending_idx = queue->pending_ring[index];
++              callback_param(queue, pending_idx).ctx = NULL;
++              copy_pending_idx(skb, copy_count(skb)) = pending_idx;
++              copy_count(skb)++;
++
++              cop++;
++              data_len -= amount;
+-      /* Skip first skb fragment if it is on same page as header fragment. */
+-      start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
++              if (amount == txp->size) {
++                      /* The copy op covered the full tx_request */
++
++                      memcpy(&queue->pending_tx_info[pending_idx].req,
++                             txp, sizeof(*txp));
++                      queue->pending_tx_info[pending_idx].extra_count =
++                              (txp == first) ? extra_count : 0;
++
++                      if (txp == first)
++                              txp = txfrags;
++                      else
++                              txp++;
++                      queue->pending_cons++;
++                      nr_slots--;
++              } else {
++                      /* The copy op partially covered the tx_request.
++                       * The remainder will be mapped.
++                       */
++                      txp->offset += amount;
++                      txp->size -= amount;
++              }
++      }
+-      for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+-           shinfo->nr_frags++, txp++, gop++) {
++      for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
++           shinfo->nr_frags++, gop++) {
+               index = pending_index(queue->pending_cons++);
+               pending_idx = queue->pending_ring[index];
+-              xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
++              xenvif_tx_create_map_op(queue, pending_idx, txp,
++                                      txp == first ? extra_count : 0, gop);
+               frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++
++              if (txp == first)
++                      txp = txfrags;
++              else
++                      txp++;
+       }
+       if (frag_overflow) {
+@@ -406,7 +471,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
+               skb_shinfo(skb)->frag_list = nskb;
+       }
+-      return gop;
++      (*copy_ops) = cop - queue->tx_copy_ops;
++      (*map_ops) = gop - queue->tx_map_ops;
+ }
+ static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
+@@ -442,7 +508,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
+                              struct gnttab_copy **gopp_copy)
+ {
+       struct gnttab_map_grant_ref *gop_map = *gopp_map;
+-      u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
++      u16 pending_idx;
+       /* This always points to the shinfo of the skb being checked, which
+        * could be either the first or the one on the frag_list
+        */
+@@ -453,24 +519,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
+       struct skb_shared_info *first_shinfo = NULL;
+       int nr_frags = shinfo->nr_frags;
+       const bool sharedslot = nr_frags &&
+-                              frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
++                              frag_get_pending_idx(&shinfo->frags[0]) ==
++                                  copy_pending_idx(skb, copy_count(skb) - 1);
+       int i, err;
+-      /* Check status of header. */
+-      err = (*gopp_copy)->status;
+-      if (unlikely(err)) {
+-              if (net_ratelimit())
+-                      netdev_dbg(queue->vif->dev,
+-                                 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
+-                                 (*gopp_copy)->status,
+-                                 pending_idx,
+-                                 (*gopp_copy)->source.u.ref);
+-              /* The first frag might still have this slot mapped */
+-              if (!sharedslot)
+-                      xenvif_idx_release(queue, pending_idx,
+-                                         XEN_NETIF_RSP_ERROR);
++      for (i = 0; i < copy_count(skb); i++) {
++              int newerr;
++
++              /* Check status of header. */
++              pending_idx = copy_pending_idx(skb, i);
++
++              newerr = (*gopp_copy)->status;
++              if (likely(!newerr)) {
++                      /* The first frag might still have this slot mapped */
++                      if (i < copy_count(skb) - 1 || !sharedslot)
++                              xenvif_idx_release(queue, pending_idx,
++                                                 XEN_NETIF_RSP_OKAY);
++              } else {
++                      err = newerr;
++                      if (net_ratelimit())
++                              netdev_dbg(queue->vif->dev,
++                                         "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
++                                         (*gopp_copy)->status,
++                                         pending_idx,
++                                         (*gopp_copy)->source.u.ref);
++                      /* The first frag might still have this slot mapped */
++                      if (i < copy_count(skb) - 1 || !sharedslot)
++                              xenvif_idx_release(queue, pending_idx,
++                                                 XEN_NETIF_RSP_ERROR);
++              }
++              (*gopp_copy)++;
+       }
+-      (*gopp_copy)++;
+ check_frags:
+       for (i = 0; i < nr_frags; i++, gop_map++) {
+@@ -517,14 +596,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
+               if (err)
+                       continue;
+-              /* First error: if the header haven't shared a slot with the
+-               * first frag, release it as well.
+-               */
+-              if (!sharedslot)
+-                      xenvif_idx_release(queue,
+-                                         XENVIF_TX_CB(skb)->pending_idx,
+-                                         XEN_NETIF_RSP_OKAY);
+-
+               /* Invalidate preceding fragments of this skb. */
+               for (j = 0; j < i; j++) {
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+@@ -796,7 +867,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+                                    unsigned *copy_ops,
+                                    unsigned *map_ops)
+ {
+-      struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+       struct sk_buff *skb, *nskb;
+       int ret;
+       unsigned int frag_overflow;
+@@ -878,8 +948,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+                       continue;
+               }
++              data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
++                      XEN_NETBACK_TX_COPY_LEN : txreq.size;
++
+               ret = xenvif_count_requests(queue, &txreq, extra_count,
+                                           txfrags, work_to_do);
++
+               if (unlikely(ret < 0))
+                       break;
+@@ -905,9 +979,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+               index = pending_index(queue->pending_cons);
+               pending_idx = queue->pending_ring[index];
+-              data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
+-                          ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
+-                      XEN_NETBACK_TX_COPY_LEN : txreq.size;
++              if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
++                      data_len = txreq.size;
+               skb = xenvif_alloc_skb(data_len);
+               if (unlikely(skb == NULL)) {
+@@ -918,8 +991,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+               }
+               skb_shinfo(skb)->nr_frags = ret;
+-              if (data_len < txreq.size)
+-                      skb_shinfo(skb)->nr_frags++;
+               /* At this point shinfo->nr_frags is in fact the number of
+                * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+                */
+@@ -981,54 +1052,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+                                            type);
+               }
+-              XENVIF_TX_CB(skb)->pending_idx = pending_idx;
+-
+-              __skb_put(skb, data_len);
+-              queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+-              queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
+-              queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
+-
+-              queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
+-                      virt_to_gfn(skb->data);
+-              queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+-              queue->tx_copy_ops[*copy_ops].dest.offset =
+-                      offset_in_page(skb->data) & ~XEN_PAGE_MASK;
+-
+-              queue->tx_copy_ops[*copy_ops].len = data_len;
+-              queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
+-
+-              (*copy_ops)++;
+-
+-              if (data_len < txreq.size) {
+-                      frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+-                                           pending_idx);
+-                      xenvif_tx_create_map_op(queue, pending_idx, &txreq,
+-                                              extra_count, gop);
+-                      gop++;
+-              } else {
+-                      frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+-                                           INVALID_PENDING_IDX);
+-                      memcpy(&queue->pending_tx_info[pending_idx].req,
+-                             &txreq, sizeof(txreq));
+-                      queue->pending_tx_info[pending_idx].extra_count =
+-                              extra_count;
+-              }
+-
+-              queue->pending_cons++;
+-
+-              gop = xenvif_get_requests(queue, skb, txfrags, gop,
+-                                        frag_overflow, nskb);
++              xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
++                                  map_ops, frag_overflow, nskb, extra_count,
++                                  data_len);
+               __skb_queue_tail(&queue->tx_queue, skb);
+               queue->tx.req_cons = idx;
+-              if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
++              if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
+                   (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
+                       break;
+       }
+-      (*map_ops) = gop - queue->tx_map_ops;
+       return;
+ }
+@@ -1107,9 +1143,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+       while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
+               struct xen_netif_tx_request *txp;
+               u16 pending_idx;
+-              unsigned data_len;
+-              pending_idx = XENVIF_TX_CB(skb)->pending_idx;
++              pending_idx = copy_pending_idx(skb, 0);
+               txp = &queue->pending_tx_info[pending_idx].req;
+               /* Check the remap error code. */
+@@ -1128,18 +1163,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+                       continue;
+               }
+-              data_len = skb->len;
+-              callback_param(queue, pending_idx).ctx = NULL;
+-              if (data_len < txp->size) {
+-                      /* Append the packet payload as a fragment. */
+-                      txp->offset += data_len;
+-                      txp->size -= data_len;
+-              } else {
+-                      /* Schedule a response immediately. */
+-                      xenvif_idx_release(queue, pending_idx,
+-                                         XEN_NETIF_RSP_OKAY);
+-              }
+-
+               if (txp->flags & XEN_NETTXF_csum_blank)
+                       skb->ip_summed = CHECKSUM_PARTIAL;
+               else if (txp->flags & XEN_NETTXF_data_validated)
+@@ -1316,7 +1339,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
+ /* Called after netfront has transmitted */
+ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
+ {
+-      unsigned nr_mops, nr_cops = 0;
++      unsigned nr_mops = 0, nr_cops = 0;
+       int work_done, ret;
+       if (unlikely(!tx_work_todo(queue)))
+-- 
+2.35.1
+