--- /dev/null
+From foo@baz Tue Jul 5 01:07:18 PM CEST 2022
+From: Ilya Lesokhin <ilyal@mellanox.com>
+Date: Mon, 30 Apr 2018 10:16:11 +0300
+Subject: net: Rename and export copy_skb_header
+
+From: Ilya Lesokhin <ilyal@mellanox.com>
+
+commit 08303c189581c985e60f588ad92a041e46b6e307 upstream.
+
+[ jgross@suse.com: added as needed by XSA-403 mitigation ]
+
+copy_skb_header is renamed to skb_copy_header and
+exported. Exposing this function give more flexibility
+in copying SKBs.
+skb_copy and skb_copy_expand do not give enough control
+over which parts are copied.
+
+Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com>
+Signed-off-by: Boris Pismenny <borisp@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 1 +
+ net/core/skbuff.c | 9 +++++----
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -975,6 +975,7 @@ static inline struct sk_buff *alloc_skb_
+ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
++void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
+ gfp_t gfp_mask, bool fclone);
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1071,7 +1071,7 @@ static void skb_headers_offset_update(st
+ skb->inner_mac_header += off;
+ }
+
+-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
++void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
+ {
+ __copy_skb_header(new, old);
+
+@@ -1079,6 +1079,7 @@ static void copy_skb_header(struct sk_bu
+ skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
+ skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
+ }
++EXPORT_SYMBOL(skb_copy_header);
+
+ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+ {
+@@ -1122,7 +1123,7 @@ struct sk_buff *skb_copy(const struct sk
+ if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
+ BUG();
+
+- copy_skb_header(n, skb);
++ skb_copy_header(n, skb);
+ return n;
+ }
+ EXPORT_SYMBOL(skb_copy);
+@@ -1185,7 +1186,7 @@ struct sk_buff *__pskb_copy_fclone(struc
+ skb_clone_fraglist(n);
+ }
+
+- copy_skb_header(n, skb);
++ skb_copy_header(n, skb);
+ out:
+ return n;
+ }
+@@ -1356,7 +1357,7 @@ struct sk_buff *skb_copy_expand(const st
+ skb->len + head_copy_len))
+ BUG();
+
+- copy_skb_header(n, skb);
++ skb_copy_header(n, skb);
+
+ skb_headers_offset_update(n, newheadroom - oldheadroom);
+
--- /dev/null
+From 8d17a33b076d24aa4861f336a125c888fb918605 Mon Sep 17 00:00:00 2001
+From: Carlo Lobrano <c.lobrano@gmail.com>
+Date: Fri, 3 Sep 2021 14:09:53 +0200
+Subject: net: usb: qmi_wwan: add Telit 0x1060 composition
+
+From: Carlo Lobrano <c.lobrano@gmail.com>
+
+commit 8d17a33b076d24aa4861f336a125c888fb918605 upstream.
+
+This patch adds support for Telit LN920 0x1060 composition
+
+0x1060: tty, adb, rmnet, tty, tty, tty, tty
+
+Signed-off-by: Carlo Lobrano <c.lobrano@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Fabio Porcedda <fabio.porcedda@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -928,6 +928,7 @@ static const struct usb_device_id produc
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
--- /dev/null
+From 94f2a444f28a649926c410eb9a38afb13a83ebe0 Mon Sep 17 00:00:00 2001
+From: Daniele Palmas <dnlplm@gmail.com>
+Date: Fri, 10 Dec 2021 10:57:22 +0100
+Subject: net: usb: qmi_wwan: add Telit 0x1070 composition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniele Palmas <dnlplm@gmail.com>
+
+commit 94f2a444f28a649926c410eb9a38afb13a83ebe0 upstream.
+
+Add the following Telit FN990 composition:
+
+0x1070: tty, adb, rmnet, tty, tty, tty, tty
+
+Signed-off-by: Daniele Palmas <dnlplm@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Link: https://lore.kernel.org/r/20211210095722.22269-1-dnlplm@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Cc: Fabio Porcedda <fabio.porcedda@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -929,6 +929,7 @@ static const struct usb_device_id produc
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
--- /dev/null
+From b4e467c82f8c12af78b6f6fa5730cb7dea7af1b4 Mon Sep 17 00:00:00 2001
+From: Daniele Palmas <dnlplm@gmail.com>
+Date: Wed, 15 May 2019 17:29:43 +0200
+Subject: net: usb: qmi_wwan: add Telit 0x1260 and 0x1261 compositions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniele Palmas <dnlplm@gmail.com>
+
+commit b4e467c82f8c12af78b6f6fa5730cb7dea7af1b4 upstream.
+
+Added support for Telit LE910Cx 0x1260 and 0x1261 compositions.
+
+Signed-off-by: Daniele Palmas <dnlplm@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Fabio Porcedda <fabio.porcedda@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -932,6 +932,8 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
--- /dev/null
+From 5fd8477ed8ca77e64b93d44a6dae4aa70c191396 Mon Sep 17 00:00:00 2001
+From: Daniele Palmas <dnlplm@gmail.com>
+Date: Mon, 2 Nov 2020 12:01:08 +0100
+Subject: net: usb: qmi_wwan: add Telit LE910Cx 0x1230 composition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniele Palmas <dnlplm@gmail.com>
+
+commit 5fd8477ed8ca77e64b93d44a6dae4aa70c191396 upstream.
+
+Add support for Telit LE910Cx 0x1230 composition:
+
+0x1230: tty, adb, rmnet, audio, tty, tty, tty, tty
+
+Signed-off-by: Daniele Palmas <dnlplm@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Link: https://lore.kernel.org/r/20201102110108.17244-1-dnlplm@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Cc: Fabio Porcedda <fabio.porcedda@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -932,6 +932,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
--- /dev/null
+From 1986af16e8ed355822600c24b3d2f0be46b573df Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=B6rgen=20Storvist?= <jorgen.storvist@gmail.com>
+Date: Thu, 13 Dec 2018 17:00:35 +0100
+Subject: qmi_wwan: Added support for Telit LN940 series
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jörgen Storvist <jorgen.storvist@gmail.com>
+
+commit 1986af16e8ed355822600c24b3d2f0be46b573df upstream.
+
+Added support for the Telit LN940 series cellular modules QMI interface.
+QMI_QUIRK_SET_DTR quirk requied for Qualcomm MDM9x40 chipset.
+
+Signed-off-by: Jörgen Storvist <jorgen.storvist@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -932,6 +932,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
net-dsa-bcm_sf2-force-pause-link-settings.patch
sit-use-min.patch
ipv6-sit-fix-ipip6_tunnel_get_prl-return-value.patch
+net-rename-and-export-copy_skb_header.patch
+xen-blkfront-fix-leaking-data-in-shared-pages.patch
+xen-netfront-fix-leaking-data-in-shared-pages.patch
+xen-netfront-force-data-bouncing-when-backend-is-untrusted.patch
+xen-blkfront-force-data-bouncing-when-backend-is-untrusted.patch
+xen-arm-fix-race-in-rb-tree-based-p2m-accounting.patch
+qmi_wwan-added-support-for-telit-ln940-series.patch
+net-usb-qmi_wwan-add-telit-0x1260-and-0x1261-compositions.patch
+net-usb-qmi_wwan-add-telit-le910cx-0x1230-composition.patch
+net-usb-qmi_wwan-add-telit-0x1060-composition.patch
+net-usb-qmi_wwan-add-telit-0x1070-composition.patch
--- /dev/null
+From foo@baz Tue Jul 5 01:07:18 PM CEST 2022
+From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+Date: Fri, 1 Jul 2022 09:57:42 +0200
+Subject: xen/arm: Fix race in RB-tree based P2M accounting
+
+From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+
+commit b75cd218274e01d026dc5240e86fdeb44bbed0c8 upstream.
+
+During the PV driver life cycle the mappings are added to
+the RB-tree by set_foreign_p2m_mapping(), which is called from
+gnttab_map_refs() and are removed by clear_foreign_p2m_mapping()
+which is called from gnttab_unmap_refs(). As both functions end
+up calling __set_phys_to_machine_multi() which updates the RB-tree,
+this function can be called concurrently.
+
+There is already a "p2m_lock" to protect against concurrent accesses,
+but the problem is that the first read of "phys_to_mach.rb_node"
+in __set_phys_to_machine_multi() is not covered by it, so this might
+lead to the incorrect mappings update (removing in our case) in RB-tree.
+
+In my environment the related issue happens rarely and only when
+PV net backend is running, the xen_add_phys_to_mach_entry() claims
+that it cannot add new pfn <-> mfn mapping to the tree since it is
+already exists which results in a failure when mapping foreign pages.
+
+But there might be other bad consequences related to the non-protected
+root reads such use-after-free, etc.
+
+While at it, also fix the similar usage in __pfn_to_mfn(), so
+initialize "struct rb_node *n" with the "p2m_lock" held in both
+functions to avoid possible bad consequences.
+
+This is CVE-2022-33744 / XSA-406.
+
+Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/xen/p2m.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/xen/p2m.c
++++ b/arch/arm/xen/p2m.c
+@@ -61,11 +61,12 @@ out:
+
+ unsigned long __pfn_to_mfn(unsigned long pfn)
+ {
+- struct rb_node *n = phys_to_mach.rb_node;
++ struct rb_node *n;
+ struct xen_p2m_entry *entry;
+ unsigned long irqflags;
+
+ read_lock_irqsave(&p2m_lock, irqflags);
++ n = phys_to_mach.rb_node;
+ while (n) {
+ entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+ if (entry->pfn <= pfn &&
+@@ -151,10 +152,11 @@ bool __set_phys_to_machine_multi(unsigne
+ int rc;
+ unsigned long irqflags;
+ struct xen_p2m_entry *p2m_entry;
+- struct rb_node *n = phys_to_mach.rb_node;
++ struct rb_node *n;
+
+ if (mfn == INVALID_P2M_ENTRY) {
+ write_lock_irqsave(&p2m_lock, irqflags);
++ n = phys_to_mach.rb_node;
+ while (n) {
+ p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+ if (p2m_entry->pfn <= pfn &&
--- /dev/null
+From foo@baz Tue Jul 5 01:07:18 PM CEST 2022
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Wed, 30 Mar 2022 09:03:48 +0200
+Subject: xen/blkfront: fix leaking data in shared pages
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 2f446ffe9d737e9a844b97887919c4fda18246e7 upstream.
+
+When allocating pages to be used for shared communication with the
+backend always zero them, this avoids leaking unintended data present
+on the pages.
+
+This is CVE-2022-26365, part of XSA-403.
+
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/xen-blkfront.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -297,7 +297,7 @@ static int fill_grant_buffer(struct blkf
+ goto out_of_memory;
+
+ if (info->feature_persistent) {
+- granted_page = alloc_page(GFP_NOIO);
++ granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
+ if (!granted_page) {
+ kfree(gnt_list_entry);
+ goto out_of_memory;
+@@ -1729,7 +1729,7 @@ static int setup_blkring(struct xenbus_d
+ for (i = 0; i < info->nr_ring_pages; i++)
+ rinfo->ring_ref[i] = GRANT_INVALID_REF;
+
+- sring = alloc_pages_exact(ring_size, GFP_NOIO);
++ sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
+ if (!sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+ return -ENOMEM;
+@@ -2311,7 +2311,8 @@ static int blkfront_setup_indirect(struc
+
+ BUG_ON(!list_empty(&rinfo->indirect_pages));
+ for (i = 0; i < num; i++) {
+- struct page *indirect_page = alloc_page(GFP_NOIO);
++ struct page *indirect_page = alloc_page(GFP_NOIO |
++ __GFP_ZERO);
+ if (!indirect_page)
+ goto out_of_memory;
+ list_add(&indirect_page->lru, &rinfo->indirect_pages);
--- /dev/null
+From foo@baz Tue Jul 5 01:07:18 PM CEST 2022
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Thu, 7 Apr 2022 13:04:24 +0200
+Subject: xen/blkfront: force data bouncing when backend is untrusted
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 2400617da7eebf9167d71a46122828bc479d64c9 upstream.
+
+Split the current bounce buffering logic used with persistent grants
+into it's own option, and allow enabling it independently of
+persistent grants. This allows to reuse the same code paths to
+perform the bounce buffering required to avoid leaking contiguous data
+in shared pages not part of the request fragments.
+
+Reporting whether the backend is to be trusted can be done using a
+module parameter, or from the xenstore frontend path as set by the
+toolstack when adding the device.
+
+This is CVE-2022-33742, part of XSA-403.
+
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/xen-blkfront.c | 45 ++++++++++++++++++++++++++++---------------
+ 1 file changed, 30 insertions(+), 15 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -144,6 +144,10 @@ static unsigned int xen_blkif_max_ring_o
+ module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
+ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
+
++static bool __read_mostly xen_blkif_trusted = true;
++module_param_named(trusted, xen_blkif_trusted, bool, 0644);
++MODULE_PARM_DESC(trusted, "Is the backend trusted");
++
+ #define BLK_RING_SIZE(info) \
+ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
+
+@@ -206,6 +210,7 @@ struct blkfront_info
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
+ unsigned int feature_persistent:1;
++ unsigned int bounce:1;
+ /* Number of 4KB segments handled */
+ unsigned int max_indirect_segments;
+ int is_ready;
+@@ -296,7 +301,7 @@ static int fill_grant_buffer(struct blkf
+ if (!gnt_list_entry)
+ goto out_of_memory;
+
+- if (info->feature_persistent) {
++ if (info->bounce) {
+ granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
+ if (!granted_page) {
+ kfree(gnt_list_entry);
+@@ -316,7 +321,7 @@ out_of_memory:
+ list_for_each_entry_safe(gnt_list_entry, n,
+ &rinfo->grants, node) {
+ list_del(&gnt_list_entry->node);
+- if (info->feature_persistent)
++ if (info->bounce)
+ __free_page(gnt_list_entry->page);
+ kfree(gnt_list_entry);
+ i--;
+@@ -362,7 +367,7 @@ static struct grant *get_grant(grant_ref
+ /* Assign a gref to this page */
+ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+ BUG_ON(gnt_list_entry->gref == -ENOSPC);
+- if (info->feature_persistent)
++ if (info->bounce)
+ grant_foreign_access(gnt_list_entry, info);
+ else {
+ /* Grant access to the GFN passed by the caller */
+@@ -386,7 +391,7 @@ static struct grant *get_indirect_grant(
+ /* Assign a gref to this page */
+ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+ BUG_ON(gnt_list_entry->gref == -ENOSPC);
+- if (!info->feature_persistent) {
++ if (!info->bounce) {
+ struct page *indirect_page;
+
+ /* Fetch a pre-allocated page to use for indirect grefs */
+@@ -701,7 +706,7 @@ static int blkif_queue_rw_req(struct req
+ .grant_idx = 0,
+ .segments = NULL,
+ .rinfo = rinfo,
+- .need_copy = rq_data_dir(req) && info->feature_persistent,
++ .need_copy = rq_data_dir(req) && info->bounce,
+ };
+
+ /*
+@@ -1015,11 +1020,12 @@ static void xlvbd_flush(struct blkfront_
+ {
+ blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
+ info->feature_fua ? true : false);
+- pr_info("blkfront: %s: %s %s %s %s %s\n",
++ pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
+ info->gd->disk_name, flush_info(info),
+ "persistent grants:", info->feature_persistent ?
+ "enabled;" : "disabled;", "indirect descriptors:",
+- info->max_indirect_segments ? "enabled;" : "disabled;");
++ info->max_indirect_segments ? "enabled;" : "disabled;",
++ "bounce buffer:", info->bounce ? "enabled" : "disabled;");
+ }
+
+ static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
+@@ -1254,7 +1260,7 @@ static void blkif_free_ring(struct blkfr
+ if (!list_empty(&rinfo->indirect_pages)) {
+ struct page *indirect_page, *n;
+
+- BUG_ON(info->feature_persistent);
++ BUG_ON(info->bounce);
+ list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
+ list_del(&indirect_page->lru);
+ __free_page(indirect_page);
+@@ -1271,7 +1277,7 @@ static void blkif_free_ring(struct blkfr
+ continue;
+
+ rinfo->persistent_gnts_c--;
+- if (info->feature_persistent)
++ if (info->bounce)
+ __free_page(persistent_gnt->page);
+ kfree(persistent_gnt);
+ }
+@@ -1291,7 +1297,7 @@ static void blkif_free_ring(struct blkfr
+ for (j = 0; j < segs; j++) {
+ persistent_gnt = rinfo->shadow[i].grants_used[j];
+ gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+- if (info->feature_persistent)
++ if (info->bounce)
+ __free_page(persistent_gnt->page);
+ kfree(persistent_gnt);
+ }
+@@ -1481,7 +1487,7 @@ static int blkif_completion(unsigned lon
+ data.s = s;
+ num_sg = s->num_sg;
+
+- if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
++ if (bret->operation == BLKIF_OP_READ && info->bounce) {
+ for_each_sg(s->sg, sg, num_sg, i) {
+ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+
+@@ -1540,7 +1546,7 @@ static int blkif_completion(unsigned lon
+ * Add the used indirect page back to the list of
+ * available pages for indirect grefs.
+ */
+- if (!info->feature_persistent) {
++ if (!info->bounce) {
+ indirect_page = s->indirect_grants[i]->page;
+ list_add(&indirect_page->lru, &rinfo->indirect_pages);
+ }
+@@ -1822,6 +1828,13 @@ static int talk_to_blkback(struct xenbus
+ int err;
+ unsigned int i, max_page_order = 0;
+ unsigned int ring_page_order = 0;
++ unsigned int trusted;
++
++ /* Check if backend is trusted. */
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted);
++ if (err < 0)
++ trusted = 1;
++ info->bounce = !xen_blkif_trusted || !trusted;
+
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "max-ring-page-order", "%u", &max_page_order);
+@@ -2301,10 +2314,10 @@ static int blkfront_setup_indirect(struc
+ if (err)
+ goto out_of_memory;
+
+- if (!info->feature_persistent && info->max_indirect_segments) {
++ if (!info->bounce && info->max_indirect_segments) {
+ /*
+- * We are using indirect descriptors but not persistent
+- * grants, we need to allocate a set of pages that can be
++ * We are using indirect descriptors but don't have a bounce
++ * buffer, we need to allocate a set of pages that can be
+ * used for mapping indirect grefs
+ */
+ int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
+@@ -2410,6 +2423,8 @@ static void blkfront_gather_backend_feat
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
++ if (info->feature_persistent)
++ info->bounce = true;
+
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u",
--- /dev/null
+From foo@baz Tue Jul 5 01:07:18 PM CEST 2022
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Wed, 6 Apr 2022 17:38:04 +0200
+Subject: xen/netfront: fix leaking data in shared pages
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 307c8de2b02344805ebead3440d8feed28f2f010 upstream.
+
+When allocating pages to be used for shared communication with the
+backend always zero them, this avoids leaking unintended data present
+on the pages.
+
+This is CVE-2022-33740, part of XSA-403.
+
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -260,7 +260,7 @@ static struct sk_buff *xennet_alloc_one_
+ if (unlikely(!skb))
+ return NULL;
+
+- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
+ if (!page) {
+ kfree_skb(skb);
+ return NULL;
--- /dev/null
+From foo@baz Tue Jul 5 01:07:18 PM CEST 2022
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Thu, 7 Apr 2022 12:20:06 +0200
+Subject: xen/netfront: force data bouncing when backend is untrusted
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit 4491001c2e0fa69efbb748c96ec96b100a5cdb7e upstream.
+
+Bounce all data on the skbs to be transmitted into zeroed pages if the
+backend is untrusted. This avoids leaking data present in the pages
+shared with the backend but not part of the skb fragments. This
+requires introducing a new helper in order to allocate skbs with a
+size multiple of XEN_PAGE_SIZE so we don't leak contiguous data on the
+granted pages.
+
+Reporting whether the backend is to be trusted can be done using a
+module parameter, or from the xenstore frontend path as set by the
+toolstack when adding the device.
+
+This is CVE-2022-33741, part of XSA-403.
+
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 53 +++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 51 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -62,6 +62,10 @@ module_param_named(max_queues, xennet_ma
+ MODULE_PARM_DESC(max_queues,
+ "Maximum number of queues per virtual interface");
+
++static bool __read_mostly xennet_trusted = true;
++module_param_named(trusted, xennet_trusted, bool, 0644);
++MODULE_PARM_DESC(trusted, "Is the backend trusted");
++
+ #define XENNET_TIMEOUT (5 * HZ)
+
+ static const struct ethtool_ops xennet_ethtool_ops;
+@@ -162,6 +166,9 @@ struct netfront_info {
+ /* Is device behaving sane? */
+ bool broken;
+
++ /* Should skbs be bounced into a zeroed buffer? */
++ bool bounce;
++
+ atomic_t rx_gso_checksum_fixup;
+ };
+
+@@ -591,6 +598,34 @@ static void xennet_mark_tx_pending(struc
+ queue->tx_link[i] = TX_PENDING;
+ }
+
++struct sk_buff *bounce_skb(const struct sk_buff *skb)
++{
++ unsigned int headerlen = skb_headroom(skb);
++ /* Align size to allocate full pages and avoid contiguous data leaks */
++ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
++ XEN_PAGE_SIZE);
++ struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
++
++ if (!n)
++ return NULL;
++
++ if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
++ WARN_ONCE(1, "misaligned skb allocated\n");
++ kfree_skb(n);
++ return NULL;
++ }
++
++ /* Set the data pointer */
++ skb_reserve(n, headerlen);
++ /* Set the tail pointer and length */
++ skb_put(n, skb->len);
++
++ BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
++
++ skb_copy_header(n, skb);
++ return n;
++}
++
+ #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
+
+ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+@@ -643,9 +678,13 @@ static int xennet_start_xmit(struct sk_b
+
+ /* The first req should be at least ETH_HLEN size or the packet will be
+ * dropped by netback.
++ *
++ * If the backend is not trusted bounce all data to zeroed pages to
++ * avoid exposing contiguous data on the granted page not belonging to
++ * the skb.
+ */
+- if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+- nskb = skb_copy(skb, GFP_ATOMIC);
++ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
++ nskb = bounce_skb(skb);
+ if (!nskb)
+ goto drop;
+ dev_kfree_skb_any(skb);
+@@ -1962,9 +2001,16 @@ static int talk_to_netback(struct xenbus
+ unsigned int max_queues = 0;
+ struct netfront_queue *queue = NULL;
+ unsigned int num_queues = 1;
++ unsigned int trusted;
+
+ info->netdev->irq = 0;
+
++ /* Check if backend is trusted. */
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted);
++ if (err < 0)
++ trusted = 1;
++ info->bounce = !xennet_trusted || !trusted;
++
+ /* Check if backend supports multiple queues */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "multi-queue-max-queues", "%u", &max_queues);
+@@ -2129,6 +2175,9 @@ static int xennet_connect(struct net_dev
+ err = talk_to_netback(np->xbdev, np);
+ if (err)
+ return err;
++ if (np->bounce)
++ dev_info(&np->xbdev->dev,
++ "bouncing transmitted data to zeroed pages\n");
+
+ /* talk_to_netback() sets the correct number of queues */
+ num_queues = dev->real_num_tx_queues;