]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Jan 2020 10:19:07 +0000 (11:19 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Jan 2020 10:19:07 +0000 (11:19 +0100)
added patches:
mlxsw-minimal-fix-an-error-handling-path-in-mlxsw_m_port_create.patch
mvneta-driver-disallow-xdp-program-on-hardware-buffer-management.patch
net-include-struct-nhmsg-size-in-nh-nlmsg-size.patch
net-socionext-fix-possible-user-after-free-in-netsec_process_rx.patch
net-socionext-fix-xdp_result-initialization-in-netsec_process_rx.patch
net_sched-ematch-reject-invalid-tcf_em_simple.patch
net_sched-fix-ops-bind_class-implementations.patch
net_sched-walk-through-all-child-classes-in-tc_bind_tclass.patch
rxrpc-fix-use-after-free-in-rxrpc_receive_data.patch
udp-segment-looped-gso-packets-correctly.patch

queue-5.5/mlxsw-minimal-fix-an-error-handling-path-in-mlxsw_m_port_create.patch [new file with mode: 0644]
queue-5.5/mvneta-driver-disallow-xdp-program-on-hardware-buffer-management.patch [new file with mode: 0644]
queue-5.5/net-include-struct-nhmsg-size-in-nh-nlmsg-size.patch [new file with mode: 0644]
queue-5.5/net-socionext-fix-possible-user-after-free-in-netsec_process_rx.patch [new file with mode: 0644]
queue-5.5/net-socionext-fix-xdp_result-initialization-in-netsec_process_rx.patch [new file with mode: 0644]
queue-5.5/net_sched-ematch-reject-invalid-tcf_em_simple.patch [new file with mode: 0644]
queue-5.5/net_sched-fix-ops-bind_class-implementations.patch [new file with mode: 0644]
queue-5.5/net_sched-walk-through-all-child-classes-in-tc_bind_tclass.patch [new file with mode: 0644]
queue-5.5/rxrpc-fix-use-after-free-in-rxrpc_receive_data.patch [new file with mode: 0644]
queue-5.5/series
queue-5.5/udp-segment-looped-gso-packets-correctly.patch [new file with mode: 0644]

diff --git a/queue-5.5/mlxsw-minimal-fix-an-error-handling-path-in-mlxsw_m_port_create.patch b/queue-5.5/mlxsw-minimal-fix-an-error-handling-path-in-mlxsw_m_port_create.patch
new file mode 100644 (file)
index 0000000..ec8f39f
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sat, 25 Jan 2020 22:18:47 +0100
+Subject: mlxsw: minimal: Fix an error handling path in 'mlxsw_m_port_create()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 6dd4b4f3936e17fedea1308bc70e9716f68bf232 ]
+
+An 'alloc_etherdev()' called is not ballanced by a corresponding
+'free_netdev()' call in one error handling path.
+
+Slighly reorder the error handling code to catch the missed case.
+
+Fixes: c100e47caa8e ("mlxsw: minimal: Add ethtool support")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Ido Schimmel <idosch@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/minimal.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+@@ -213,8 +213,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxs
+ err_register_netdev:
+       mlxsw_m->ports[local_port] = NULL;
+-      free_netdev(dev);
+ err_dev_addr_get:
++      free_netdev(dev);
+ err_alloc_etherdev:
+       mlxsw_core_port_fini(mlxsw_m->core, local_port);
+       return err;
diff --git a/queue-5.5/mvneta-driver-disallow-xdp-program-on-hardware-buffer-management.patch b/queue-5.5/mvneta-driver-disallow-xdp-program-on-hardware-buffer-management.patch
new file mode 100644 (file)
index 0000000..cb3bbe1
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Sven Auhagen <sven.auhagen@voleatech.de>
+Date: Sat, 25 Jan 2020 08:07:03 +0000
+Subject: mvneta driver disallow XDP program on hardware buffer management
+
+From: Sven Auhagen <sven.auhagen@voleatech.de>
+
+[ Upstream commit 79572c98c554dcdb080bca547c871a51716dcdf8 ]
+
+Recently XDP Support was added to the mvneta driver
+for software buffer management only.
+It is still possible to attach an XDP program if
+hardware buffer management is used.
+It is not doing anything at that point.
+
+The patch disallows attaching XDP programs to mvneta
+if hardware buffer management is used.
+
+I am sorry about that. It is my first submission and I am having
+some troubles with the format of my emails.
+
+v4 -> v5:
+- Remove extra tabs
+
+v3 -> v4:
+- Please ignore v3 I accidentally submitted
+  my other patch with git-send-mail and v4 is correct
+
+v2 -> v3:
+- My mailserver corrupted the patch
+  resubmission with git-send-email
+
+v1 -> v2:
+- Fixing the patches indentation
+
+Signed-off-by: Sven Auhagen <sven.auhagen@voleatech.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4226,6 +4226,12 @@ static int mvneta_xdp_setup(struct net_d
+               return -EOPNOTSUPP;
+       }
++      if (pp->bm_priv) {
++              NL_SET_ERR_MSG_MOD(extack,
++                                 "Hardware Buffer Management not supported on XDP");
++              return -EOPNOTSUPP;
++      }
++
+       need_update = !!pp->xdp_prog != !!prog;
+       if (running && need_update)
+               mvneta_stop(dev);
diff --git a/queue-5.5/net-include-struct-nhmsg-size-in-nh-nlmsg-size.patch b/queue-5.5/net-include-struct-nhmsg-size-in-nh-nlmsg-size.patch
new file mode 100644 (file)
index 0000000..454d954
--- /dev/null
@@ -0,0 +1,103 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Stephen Worley <sworley@cumulusnetworks.com>
+Date: Fri, 24 Jan 2020 16:53:27 -0500
+Subject: net: include struct nhmsg size in nh nlmsg size
+
+From: Stephen Worley <sworley@cumulusnetworks.com>
+
+[ Upstream commit f9e95555757915fc194288862d2978e370fe316b ]
+
+Include the size of struct nhmsg size when calculating
+how much of a payload to allocate in a new netlink nexthop
+notification message.
+
+Without this, we will fail to fill the skbuff at certain nexthop
+group sizes.
+
+You can reproduce the failure with the following iproute2 commands:
+
+ip link add dummy1 type dummy
+ip link add dummy2 type dummy
+ip link add dummy3 type dummy
+ip link add dummy4 type dummy
+ip link add dummy5 type dummy
+ip link add dummy6 type dummy
+ip link add dummy7 type dummy
+ip link add dummy8 type dummy
+ip link add dummy9 type dummy
+ip link add dummy10 type dummy
+ip link add dummy11 type dummy
+ip link add dummy12 type dummy
+ip link add dummy13 type dummy
+ip link add dummy14 type dummy
+ip link add dummy15 type dummy
+ip link add dummy16 type dummy
+ip link add dummy17 type dummy
+ip link add dummy18 type dummy
+ip link add dummy19 type dummy
+
+ip ro add 1.1.1.1/32 dev dummy1
+ip ro add 1.1.1.2/32 dev dummy2
+ip ro add 1.1.1.3/32 dev dummy3
+ip ro add 1.1.1.4/32 dev dummy4
+ip ro add 1.1.1.5/32 dev dummy5
+ip ro add 1.1.1.6/32 dev dummy6
+ip ro add 1.1.1.7/32 dev dummy7
+ip ro add 1.1.1.8/32 dev dummy8
+ip ro add 1.1.1.9/32 dev dummy9
+ip ro add 1.1.1.10/32 dev dummy10
+ip ro add 1.1.1.11/32 dev dummy11
+ip ro add 1.1.1.12/32 dev dummy12
+ip ro add 1.1.1.13/32 dev dummy13
+ip ro add 1.1.1.14/32 dev dummy14
+ip ro add 1.1.1.15/32 dev dummy15
+ip ro add 1.1.1.16/32 dev dummy16
+ip ro add 1.1.1.17/32 dev dummy17
+ip ro add 1.1.1.18/32 dev dummy18
+ip ro add 1.1.1.19/32 dev dummy19
+
+ip next add id 1 via 1.1.1.1 dev dummy1
+ip next add id 2 via 1.1.1.2 dev dummy2
+ip next add id 3 via 1.1.1.3 dev dummy3
+ip next add id 4 via 1.1.1.4 dev dummy4
+ip next add id 5 via 1.1.1.5 dev dummy5
+ip next add id 6 via 1.1.1.6 dev dummy6
+ip next add id 7 via 1.1.1.7 dev dummy7
+ip next add id 8 via 1.1.1.8 dev dummy8
+ip next add id 9 via 1.1.1.9 dev dummy9
+ip next add id 10 via 1.1.1.10 dev dummy10
+ip next add id 11 via 1.1.1.11 dev dummy11
+ip next add id 12 via 1.1.1.12 dev dummy12
+ip next add id 13 via 1.1.1.13 dev dummy13
+ip next add id 14 via 1.1.1.14 dev dummy14
+ip next add id 15 via 1.1.1.15 dev dummy15
+ip next add id 16 via 1.1.1.16 dev dummy16
+ip next add id 17 via 1.1.1.17 dev dummy17
+ip next add id 18 via 1.1.1.18 dev dummy18
+ip next add id 19 via 1.1.1.19 dev dummy19
+
+ip next add id 1111 group 1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19
+ip next del id 1111
+
+Fixes: 430a049190de ("nexthop: Add support for nexthop groups")
+Signed-off-by: Stephen Worley <sworley@cumulusnetworks.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/nexthop.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -321,7 +321,9 @@ static size_t nh_nlmsg_size_single(struc
+ static size_t nh_nlmsg_size(struct nexthop *nh)
+ {
+-      size_t sz = nla_total_size(4);    /* NHA_ID */
++      size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
++
++      sz += nla_total_size(4); /* NHA_ID */
+       if (nh->is_group)
+               sz += nh_nlmsg_size_grp(nh);
diff --git a/queue-5.5/net-socionext-fix-possible-user-after-free-in-netsec_process_rx.patch b/queue-5.5/net-socionext-fix-possible-user-after-free-in-netsec_process_rx.patch
new file mode 100644 (file)
index 0000000..cfbf2f2
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 25 Jan 2020 12:48:50 +0100
+Subject: net: socionext: fix possible user-after-free in netsec_process_rx
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit b5e82e3c89c78ee0407ea8e8087af5519b6c7bae ]
+
+Fix possible use-after-free in in netsec_process_rx that can occurs if
+the first packet is sent to the normal networking stack and the
+following one is dropped by the bpf program attached to the xdp hook.
+Fix the issue defining the skb pointer in the 'budget' loop
+
+Fixes: ba2b232108d3c ("net: netsec: add XDP support")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/socionext/netsec.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -929,7 +929,6 @@ static int netsec_process_rx(struct nets
+       struct netsec_rx_pkt_info rx_info;
+       enum dma_data_direction dma_dir;
+       struct bpf_prog *xdp_prog;
+-      struct sk_buff *skb = NULL;
+       u16 xdp_xmit = 0;
+       u32 xdp_act = 0;
+       int done = 0;
+@@ -943,6 +942,7 @@ static int netsec_process_rx(struct nets
+               struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+               struct netsec_desc *desc = &dring->desc[idx];
+               struct page *page = virt_to_page(desc->addr);
++              struct sk_buff *skb = NULL;
+               u32 xdp_result = XDP_PASS;
+               u16 pkt_len, desc_len;
+               dma_addr_t dma_handle;
diff --git a/queue-5.5/net-socionext-fix-xdp_result-initialization-in-netsec_process_rx.patch b/queue-5.5/net-socionext-fix-xdp_result-initialization-in-netsec_process_rx.patch
new file mode 100644 (file)
index 0000000..90ca129
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 25 Jan 2020 12:48:51 +0100
+Subject: net: socionext: fix xdp_result initialization in netsec_process_rx
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 02758cb6dac31a2b4bd9e535cffbe718acd46404 ]
+
+Fix xdp_result initialization in netsec_process_rx in order to not
+increase rx counters if there is no bpf program attached to the xdp hook
+and napi_gro_receive returns GRO_DROP
+
+Fixes: ba2b232108d3c ("net: netsec: add XDP support")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/socionext/netsec.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -942,8 +942,8 @@ static int netsec_process_rx(struct nets
+               struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+               struct netsec_desc *desc = &dring->desc[idx];
+               struct page *page = virt_to_page(desc->addr);
++              u32 xdp_result = NETSEC_XDP_PASS;
+               struct sk_buff *skb = NULL;
+-              u32 xdp_result = XDP_PASS;
+               u16 pkt_len, desc_len;
+               dma_addr_t dma_handle;
+               struct xdp_buff xdp;
diff --git a/queue-5.5/net_sched-ematch-reject-invalid-tcf_em_simple.patch b/queue-5.5/net_sched-ematch-reject-invalid-tcf_em_simple.patch
new file mode 100644 (file)
index 0000000..28cff84
--- /dev/null
@@ -0,0 +1,79 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 24 Jan 2020 14:57:20 -0800
+Subject: net_sched: ematch: reject invalid TCF_EM_SIMPLE
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 55cd9f67f1e45de8517cdaab985fb8e56c0bc1d8 ]
+
+It is possible for malicious userspace to set TCF_EM_SIMPLE bit
+even for matches that should not have this bit set.
+
+This can fool two places using tcf_em_is_simple()
+
+1) tcf_em_tree_destroy() -> memory leak of em->data
+   if ops->destroy() is NULL
+
+2) tcf_em_tree_dump() wrongly report/leak 4 low-order bytes
+   of a kernel pointer.
+
+BUG: memory leak
+unreferenced object 0xffff888121850a40 (size 32):
+  comm "syz-executor927", pid 7193, jiffies 4294941655 (age 19.840s)
+  hex dump (first 32 bytes):
+    00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00  ................
+    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+  backtrace:
+    [<00000000f67036ea>] kmemleak_alloc_recursive include/linux/kmemleak.h:43 [inline]
+    [<00000000f67036ea>] slab_post_alloc_hook mm/slab.h:586 [inline]
+    [<00000000f67036ea>] slab_alloc mm/slab.c:3320 [inline]
+    [<00000000f67036ea>] __do_kmalloc mm/slab.c:3654 [inline]
+    [<00000000f67036ea>] __kmalloc_track_caller+0x165/0x300 mm/slab.c:3671
+    [<00000000fab0cc8e>] kmemdup+0x27/0x60 mm/util.c:127
+    [<00000000d9992e0a>] kmemdup include/linux/string.h:453 [inline]
+    [<00000000d9992e0a>] em_nbyte_change+0x5b/0x90 net/sched/em_nbyte.c:32
+    [<000000007e04f711>] tcf_em_validate net/sched/ematch.c:241 [inline]
+    [<000000007e04f711>] tcf_em_tree_validate net/sched/ematch.c:359 [inline]
+    [<000000007e04f711>] tcf_em_tree_validate+0x332/0x46f net/sched/ematch.c:300
+    [<000000007a769204>] basic_set_parms net/sched/cls_basic.c:157 [inline]
+    [<000000007a769204>] basic_change+0x1d7/0x5f0 net/sched/cls_basic.c:219
+    [<00000000e57a5997>] tc_new_tfilter+0x566/0xf70 net/sched/cls_api.c:2104
+    [<0000000074b68559>] rtnetlink_rcv_msg+0x3b2/0x4b0 net/core/rtnetlink.c:5415
+    [<00000000b7fe53fb>] netlink_rcv_skb+0x61/0x170 net/netlink/af_netlink.c:2477
+    [<00000000e83a40d0>] rtnetlink_rcv+0x1d/0x30 net/core/rtnetlink.c:5442
+    [<00000000d62ba933>] netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline]
+    [<00000000d62ba933>] netlink_unicast+0x223/0x310 net/netlink/af_netlink.c:1328
+    [<0000000088070f72>] netlink_sendmsg+0x2c0/0x570 net/netlink/af_netlink.c:1917
+    [<00000000f70b15ea>] sock_sendmsg_nosec net/socket.c:639 [inline]
+    [<00000000f70b15ea>] sock_sendmsg+0x54/0x70 net/socket.c:659
+    [<00000000ef95a9be>] ____sys_sendmsg+0x2d0/0x300 net/socket.c:2330
+    [<00000000b650f1ab>] ___sys_sendmsg+0x8a/0xd0 net/socket.c:2384
+    [<0000000055bfa74a>] __sys_sendmsg+0x80/0xf0 net/socket.c:2417
+    [<000000002abac183>] __do_sys_sendmsg net/socket.c:2426 [inline]
+    [<000000002abac183>] __se_sys_sendmsg net/socket.c:2424 [inline]
+    [<000000002abac183>] __x64_sys_sendmsg+0x23/0x30 net/socket.c:2424
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot+03c4738ed29d5d366ddf@syzkaller.appspotmail.com
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/ematch.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -238,6 +238,9 @@ static int tcf_em_validate(struct tcf_pr
+                       goto errout;
+               if (em->ops->change) {
++                      err = -EINVAL;
++                      if (em_hdr->flags & TCF_EM_SIMPLE)
++                              goto errout;
+                       err = em->ops->change(net, data, data_len, em);
+                       if (err < 0)
+                               goto errout;
diff --git a/queue-5.5/net_sched-fix-ops-bind_class-implementations.patch b/queue-5.5/net_sched-fix-ops-bind_class-implementations.patch
new file mode 100644 (file)
index 0000000..237026e
--- /dev/null
@@ -0,0 +1,364 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 23 Jan 2020 16:26:18 -0800
+Subject: net_sched: fix ops->bind_class() implementations
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 2e24cd755552350b94a7617617c6877b8cbcb701 ]
+
+The current implementations of ops->bind_class() are merely
+searching for classid and updating class in the struct tcf_result,
+without invoking either of cl_ops->bind_tcf() or
+cl_ops->unbind_tcf(). This breaks the design of them as qdisc's
+like cbq use them to count filters too. This is why syzbot triggered
+the warning in cbq_destroy_class().
+
+In order to fix this, we have to call cl_ops->bind_tcf() and
+cl_ops->unbind_tcf() like the filter binding path. This patch does
+so by refactoring out two helper functions __tcf_bind_filter()
+and __tcf_unbind_filter(), which are lockless and accept a Qdisc
+pointer, then teaching each implementation to call them correctly.
+
+Note, we merely pass the Qdisc pointer as an opaque pointer to
+each filter, they only need to pass it down to the helper
+functions without understanding it at all.
+
+Fixes: 07d79fc7d94e ("net_sched: add reverse binding for tc class")
+Reported-and-tested-by: syzbot+0a0596220218fcb603a8@syzkaller.appspotmail.com
+Reported-and-tested-by: syzbot+63bdb6006961d8c917c6@syzkaller.appspotmail.com
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/pkt_cls.h     |   33 +++++++++++++++++++--------------
+ include/net/sch_generic.h |    3 ++-
+ net/sched/cls_basic.c     |   11 ++++++++---
+ net/sched/cls_bpf.c       |   11 ++++++++---
+ net/sched/cls_flower.c    |   11 ++++++++---
+ net/sched/cls_fw.c        |   11 ++++++++---
+ net/sched/cls_matchall.c  |   11 ++++++++---
+ net/sched/cls_route.c     |   11 ++++++++---
+ net/sched/cls_rsvp.h      |   11 ++++++++---
+ net/sched/cls_tcindex.c   |   11 ++++++++---
+ net/sched/cls_u32.c       |   11 ++++++++---
+ net/sched/sch_api.c       |    6 ++++--
+ 12 files changed, 97 insertions(+), 44 deletions(-)
+
+--- a/include/net/pkt_cls.h
++++ b/include/net/pkt_cls.h
+@@ -141,31 +141,38 @@ __cls_set_class(unsigned long *clp, unsi
+       return xchg(clp, cl);
+ }
+-static inline unsigned long
+-cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
++static inline void
++__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
+ {
+-      unsigned long old_cl;
++      unsigned long cl;
+-      sch_tree_lock(q);
+-      old_cl = __cls_set_class(clp, cl);
+-      sch_tree_unlock(q);
+-      return old_cl;
++      cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
++      cl = __cls_set_class(&r->class, cl);
++      if (cl)
++              q->ops->cl_ops->unbind_tcf(q, cl);
+ }
+ static inline void
+ tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
+ {
+       struct Qdisc *q = tp->chain->block->q;
+-      unsigned long cl;
+       /* Check q as it is not set for shared blocks. In that case,
+        * setting class is not supported.
+        */
+       if (!q)
+               return;
+-      cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
+-      cl = cls_set_class(q, &r->class, cl);
+-      if (cl)
++      sch_tree_lock(q);
++      __tcf_bind_filter(q, r, base);
++      sch_tree_unlock(q);
++}
++
++static inline void
++__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
++{
++      unsigned long cl;
++
++      if ((cl = __cls_set_class(&r->class, 0)) != 0)
+               q->ops->cl_ops->unbind_tcf(q, cl);
+ }
+@@ -173,12 +180,10 @@ static inline void
+ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
+ {
+       struct Qdisc *q = tp->chain->block->q;
+-      unsigned long cl;
+       if (!q)
+               return;
+-      if ((cl = __cls_set_class(&r->class, 0)) != 0)
+-              q->ops->cl_ops->unbind_tcf(q, cl);
++      __tcf_unbind_filter(q, r);
+ }
+ struct tcf_exts {
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -318,7 +318,8 @@ struct tcf_proto_ops {
+                                         void *type_data);
+       void                    (*hw_del)(struct tcf_proto *tp,
+                                         void *type_data);
+-      void                    (*bind_class)(void *, u32, unsigned long);
++      void                    (*bind_class)(void *, u32, unsigned long,
++                                            void *, unsigned long);
+       void *                  (*tmplt_create)(struct net *net,
+                                               struct tcf_chain *chain,
+                                               struct nlattr **tca,
+--- a/net/sched/cls_basic.c
++++ b/net/sched/cls_basic.c
+@@ -263,12 +263,17 @@ skip:
+       }
+ }
+-static void basic_bind_class(void *fh, u32 classid, unsigned long cl)
++static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                           unsigned long base)
+ {
+       struct basic_filter *f = fh;
+-      if (f && f->res.classid == classid)
+-              f->res.class = cl;
++      if (f && f->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &f->res, base);
++              else
++                      __tcf_unbind_filter(q, &f->res);
++      }
+ }
+ static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -631,12 +631,17 @@ nla_put_failure:
+       return -1;
+ }
+-static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
++static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
++                             void *q, unsigned long base)
+ {
+       struct cls_bpf_prog *prog = fh;
+-      if (prog && prog->res.classid == classid)
+-              prog->res.class = cl;
++      if (prog && prog->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &prog->res, base);
++              else
++                      __tcf_unbind_filter(q, &prog->res);
++      }
+ }
+ static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2765,12 +2765,17 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+-static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
++static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                        unsigned long base)
+ {
+       struct cls_fl_filter *f = fh;
+-      if (f && f->res.classid == classid)
+-              f->res.class = cl;
++      if (f && f->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &f->res, base);
++              else
++                      __tcf_unbind_filter(q, &f->res);
++      }
+ }
+ static bool fl_delete_empty(struct tcf_proto *tp)
+--- a/net/sched/cls_fw.c
++++ b/net/sched/cls_fw.c
+@@ -419,12 +419,17 @@ nla_put_failure:
+       return -1;
+ }
+-static void fw_bind_class(void *fh, u32 classid, unsigned long cl)
++static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                        unsigned long base)
+ {
+       struct fw_filter *f = fh;
+-      if (f && f->res.classid == classid)
+-              f->res.class = cl;
++      if (f && f->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &f->res, base);
++              else
++                      __tcf_unbind_filter(q, &f->res);
++      }
+ }
+ static struct tcf_proto_ops cls_fw_ops __read_mostly = {
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -393,12 +393,17 @@ nla_put_failure:
+       return -1;
+ }
+-static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
++static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                          unsigned long base)
+ {
+       struct cls_mall_head *head = fh;
+-      if (head && head->res.classid == classid)
+-              head->res.class = cl;
++      if (head && head->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &head->res, base);
++              else
++                      __tcf_unbind_filter(q, &head->res);
++      }
+ }
+ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
+--- a/net/sched/cls_route.c
++++ b/net/sched/cls_route.c
+@@ -641,12 +641,17 @@ nla_put_failure:
+       return -1;
+ }
+-static void route4_bind_class(void *fh, u32 classid, unsigned long cl)
++static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                            unsigned long base)
+ {
+       struct route4_filter *f = fh;
+-      if (f && f->res.classid == classid)
+-              f->res.class = cl;
++      if (f && f->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &f->res, base);
++              else
++                      __tcf_unbind_filter(q, &f->res);
++      }
+ }
+ static struct tcf_proto_ops cls_route4_ops __read_mostly = {
+--- a/net/sched/cls_rsvp.h
++++ b/net/sched/cls_rsvp.h
+@@ -738,12 +738,17 @@ nla_put_failure:
+       return -1;
+ }
+-static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl)
++static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                          unsigned long base)
+ {
+       struct rsvp_filter *f = fh;
+-      if (f && f->res.classid == classid)
+-              f->res.class = cl;
++      if (f && f->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &f->res, base);
++              else
++                      __tcf_unbind_filter(q, &f->res);
++      }
+ }
+ static struct tcf_proto_ops RSVP_OPS __read_mostly = {
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -654,12 +654,17 @@ nla_put_failure:
+       return -1;
+ }
+-static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
++static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
++                             void *q, unsigned long base)
+ {
+       struct tcindex_filter_result *r = fh;
+-      if (r && r->res.classid == classid)
+-              r->res.class = cl;
++      if (r && r->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &r->res, base);
++              else
++                      __tcf_unbind_filter(q, &r->res);
++      }
+ }
+ static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -1255,12 +1255,17 @@ static int u32_reoffload(struct tcf_prot
+       return 0;
+ }
+-static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
++static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
++                         unsigned long base)
+ {
+       struct tc_u_knode *n = fh;
+-      if (n && n->res.classid == classid)
+-              n->res.class = cl;
++      if (n && n->res.classid == classid) {
++              if (cl)
++                      __tcf_bind_filter(q, &n->res, base);
++              else
++                      __tcf_unbind_filter(q, &n->res);
++      }
+ }
+ static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1891,8 +1891,9 @@ static int tclass_del_notify(struct net
+ struct tcf_bind_args {
+       struct tcf_walker w;
+-      u32 classid;
++      unsigned long base;
+       unsigned long cl;
++      u32 classid;
+ };
+ static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
+@@ -1903,7 +1904,7 @@ static int tcf_node_bind(struct tcf_prot
+               struct Qdisc *q = tcf_block_q(tp->chain->block);
+               sch_tree_lock(q);
+-              tp->ops->bind_class(n, a->classid, a->cl);
++              tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
+               sch_tree_unlock(q);
+       }
+       return 0;
+@@ -1936,6 +1937,7 @@ static void tc_bind_tclass(struct Qdisc
+                       arg.w.fn = tcf_node_bind;
+                       arg.classid = clid;
++                      arg.base = cl;
+                       arg.cl = new_cl;
+                       tp->ops->walk(tp, &arg.w, true);
+               }
diff --git a/queue-5.5/net_sched-walk-through-all-child-classes-in-tc_bind_tclass.patch b/queue-5.5/net_sched-walk-through-all-child-classes-in-tc_bind_tclass.patch
new file mode 100644 (file)
index 0000000..45f9e8d
--- /dev/null
@@ -0,0 +1,112 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 23 Jan 2020 17:27:08 -0800
+Subject: net_sched: walk through all child classes in tc_bind_tclass()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 760d228e322e99cdf6d81b4b60a268b8f13cf67a ]
+
+In a complex TC class hierarchy like this:
+
+tc qdisc add dev eth0 root handle 1:0 cbq bandwidth 100Mbit         \
+  avpkt 1000 cell 8
+tc class add dev eth0 parent 1:0 classid 1:1 cbq bandwidth 100Mbit  \
+  rate 6Mbit weight 0.6Mbit prio 8 allot 1514 cell 8 maxburst 20      \
+  avpkt 1000 bounded
+
+tc filter add dev eth0 parent 1:0 protocol ip prio 1 u32 match ip \
+  sport 80 0xffff flowid 1:3
+tc filter add dev eth0 parent 1:0 protocol ip prio 1 u32 match ip \
+  sport 25 0xffff flowid 1:4
+
+tc class add dev eth0 parent 1:1 classid 1:3 cbq bandwidth 100Mbit  \
+  rate 5Mbit weight 0.5Mbit prio 5 allot 1514 cell 8 maxburst 20      \
+  avpkt 1000
+tc class add dev eth0 parent 1:1 classid 1:4 cbq bandwidth 100Mbit  \
+  rate 3Mbit weight 0.3Mbit prio 5 allot 1514 cell 8 maxburst 20      \
+  avpkt 1000
+
+where filters are installed on qdisc 1:0, so we can't merely
+search from class 1:1 when creating class 1:3 and class 1:4. We have
+to walk through all the child classes of the direct parent qdisc.
+Otherwise we would miss filters those need reverse binding.
+
+Fixes: 07d79fc7d94e ("net_sched: add reverse binding for tc class")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_api.c |   41 ++++++++++++++++++++++++++++++-----------
+ 1 file changed, 30 insertions(+), 11 deletions(-)
+
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1910,22 +1910,24 @@ static int tcf_node_bind(struct tcf_prot
+       return 0;
+ }
+-static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
+-                         unsigned long new_cl)
++struct tc_bind_class_args {
++      struct qdisc_walker w;
++      unsigned long new_cl;
++      u32 portid;
++      u32 clid;
++};
++
++static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
++                              struct qdisc_walker *w)
+ {
++      struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
+       const struct Qdisc_class_ops *cops = q->ops->cl_ops;
+       struct tcf_block *block;
+       struct tcf_chain *chain;
+-      unsigned long cl;
+-      cl = cops->find(q, portid);
+-      if (!cl)
+-              return;
+-      if (!cops->tcf_block)
+-              return;
+       block = cops->tcf_block(q, cl, NULL);
+       if (!block)
+-              return;
++              return 0;
+       for (chain = tcf_get_next_chain(block, NULL);
+            chain;
+            chain = tcf_get_next_chain(block, chain)) {
+@@ -1936,12 +1938,29 @@ static void tc_bind_tclass(struct Qdisc
+                       struct tcf_bind_args arg = {};
+                       arg.w.fn = tcf_node_bind;
+-                      arg.classid = clid;
++                      arg.classid = a->clid;
+                       arg.base = cl;
+-                      arg.cl = new_cl;
++                      arg.cl = a->new_cl;
+                       tp->ops->walk(tp, &arg.w, true);
+               }
+       }
++
++      return 0;
++}
++
++static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
++                         unsigned long new_cl)
++{
++      const struct Qdisc_class_ops *cops = q->ops->cl_ops;
++      struct tc_bind_class_args args = {};
++
++      if (!cops->tcf_block)
++              return;
++      args.portid = portid;
++      args.clid = clid;
++      args.new_cl = new_cl;
++      args.w.fn = tc_bind_class_walker;
++      q->ops->cl_ops->walk(q, &args.w);
+ }
+ #else
diff --git a/queue-5.5/rxrpc-fix-use-after-free-in-rxrpc_receive_data.patch b/queue-5.5/rxrpc-fix-use-after-free-in-rxrpc_receive_data.patch
new file mode 100644 (file)
index 0000000..2b55309
--- /dev/null
@@ -0,0 +1,84 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: David Howells <dhowells@redhat.com>
+Date: Fri, 24 Jan 2020 23:08:04 +0000
+Subject: rxrpc: Fix use-after-free in rxrpc_receive_data()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 122d74fac84204b9a98263636f6f9a3b2e665639 ]
+
+The subpacket scanning loop in rxrpc_receive_data() references the
+subpacket count in the private data part of the sk_buff in the loop
+termination condition.  However, when the final subpacket is pasted into
+the ring buffer, the function is no longer has a ref on the sk_buff and
+should not be looking at sp->* any more.  This point is actually marked in
+the code when skb is cleared (but sp is not - which is an error).
+
+Fix this by caching sp->nr_subpackets in a local variable and using that
+instead.
+
+Also clear 'sp' to catch accesses after that point.
+
+This can show up as an oops in rxrpc_get_skb() if sp->nr_subpackets gets
+trashed by the sk_buff getting freed and reused in the meantime.
+
+Fixes: e2de6c404898 ("rxrpc: Use info in skbuff instead of reparsing a jumbo packet")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/input.c |   12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -413,7 +413,7 @@ static void rxrpc_input_data(struct rxrp
+ {
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       enum rxrpc_call_state state;
+-      unsigned int j;
++      unsigned int j, nr_subpackets;
+       rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
+       rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
+       bool immediate_ack = false, jumbo_bad = false;
+@@ -457,7 +457,8 @@ static void rxrpc_input_data(struct rxrp
+       call->ackr_prev_seq = seq0;
+       hard_ack = READ_ONCE(call->rx_hard_ack);
+-      if (sp->nr_subpackets > 1) {
++      nr_subpackets = sp->nr_subpackets;
++      if (nr_subpackets > 1) {
+               if (call->nr_jumbo_bad > 3) {
+                       ack = RXRPC_ACK_NOSPACE;
+                       ack_serial = serial;
+@@ -465,11 +466,11 @@ static void rxrpc_input_data(struct rxrp
+               }
+       }
+-      for (j = 0; j < sp->nr_subpackets; j++) {
++      for (j = 0; j < nr_subpackets; j++) {
+               rxrpc_serial_t serial = sp->hdr.serial + j;
+               rxrpc_seq_t seq = seq0 + j;
+               unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
+-              bool terminal = (j == sp->nr_subpackets - 1);
++              bool terminal = (j == nr_subpackets - 1);
+               bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
+               u8 flags, annotation = j;
+@@ -506,7 +507,7 @@ static void rxrpc_input_data(struct rxrp
+               }
+               if (call->rxtx_buffer[ix]) {
+-                      rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1,
++                      rxrpc_input_dup_data(call, seq, nr_subpackets > 1,
+                                            &jumbo_bad);
+                       if (ack != RXRPC_ACK_DUPLICATE) {
+                               ack = RXRPC_ACK_DUPLICATE;
+@@ -564,6 +565,7 @@ static void rxrpc_input_data(struct rxrp
+                        * ring.
+                        */
+                       skb = NULL;
++                      sp = NULL;
+               }
+               if (last) {
index a3192c4110ac274e23165f541bec1060a38e2b49..7e779fcea022db9ea011e2785de886eb996a1e96 100644 (file)
@@ -33,3 +33,13 @@ ath9k-fix-storage-endpoint-lookup.patch
 brcmfmac-fix-interface-sanity-check.patch
 rtl8xxxu-fix-interface-sanity-check.patch
 zd1211rw-fix-storage-endpoint-lookup.patch
+mvneta-driver-disallow-xdp-program-on-hardware-buffer-management.patch
+net_sched-ematch-reject-invalid-tcf_em_simple.patch
+net_sched-fix-ops-bind_class-implementations.patch
+net_sched-walk-through-all-child-classes-in-tc_bind_tclass.patch
+net-socionext-fix-possible-user-after-free-in-netsec_process_rx.patch
+net-socionext-fix-xdp_result-initialization-in-netsec_process_rx.patch
+udp-segment-looped-gso-packets-correctly.patch
+mlxsw-minimal-fix-an-error-handling-path-in-mlxsw_m_port_create.patch
+net-include-struct-nhmsg-size-in-nh-nlmsg-size.patch
+rxrpc-fix-use-after-free-in-rxrpc_receive_data.patch
diff --git a/queue-5.5/udp-segment-looped-gso-packets-correctly.patch b/queue-5.5/udp-segment-looped-gso-packets-correctly.patch
new file mode 100644 (file)
index 0000000..fd5a551
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Thu 30 Jan 2020 11:16:14 AM CET
+From: Willem de Bruijn <willemb@google.com>
+Date: Mon, 27 Jan 2020 15:40:31 -0500
+Subject: udp: segment looped gso packets correctly
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 6cd021a58c18a1731f7e47f83e172c0c302d65e5 ]
+
+Multicast and broadcast packets can be looped from egress to ingress
+pre segmentation with dev_loopback_xmit. That function unconditionally
+sets ip_summed to CHECKSUM_UNNECESSARY.
+
+udp_rcv_segment segments gso packets in the udp rx path. Segmentation
+usually executes on egress, and does not expect packets of this type.
+__udp_gso_segment interprets !CHECKSUM_PARTIAL as CHECKSUM_NONE. But
+the offsets are not correct for gso_make_checksum.
+
+UDP GSO packets are of type CHECKSUM_PARTIAL, with their uh->check set
+to the correct pseudo header checksum. Reset ip_summed to this type.
+(CHECKSUM_PARTIAL is allowed on ingress, see comments in skbuff.h)
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Fixes: cf329aa42b66 ("udp: cope with UDP GRO packet misdirection")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/udp.h |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -476,6 +476,9 @@ static inline struct sk_buff *udp_rcv_se
+       if (!inet_get_convert_csum(sk))
+               features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
++      if (skb->pkt_type == PACKET_LOOPBACK)
++              skb->ip_summed = CHECKSUM_PARTIAL;
++
+       /* the GSO CB lays after the UDP one, no need to save and restore any
+        * CB fragment
+        */