]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Dec 2019 06:39:47 +0000 (07:39 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Dec 2019 06:39:47 +0000 (07:39 +0100)
added patches:
gve-fix-the-queue-page-list-allocated-pages-count.patch
macvlan-schedule-bc_work-even-if-error.patch
mdio_bus-don-t-use-managed-reset-controller.patch
net-dsa-sja1105-fix-sja1105_parse_rgmii_delays.patch
net-macb-add-missed-tasklet_kill.patch
net-psample-fix-skb_over_panic.patch
net-sched-fix-tc-s-class-show-no-bstats-on-class-with-nolock-subqueues.patch
net-skmsg-fix-tls-1.3-crash-with-full-sk_msg.patch
net-tls-free-the-record-on-encryption-error.patch
net-tls-remove-the-dead-inplace_crypto-code.patch
net-tls-take-into-account-that-bpf_exec_tx_verdict-may-free-the-record.patch
net-tls-use-sg_next-to-walk-sg-entries.patch
openvswitch-drop-unneeded-bug_on-in-ovs_flow_cmd_build_info.patch
openvswitch-fix-flow-command-message-size.patch
openvswitch-remove-another-bug_on.patch
r8169-fix-jumbo-configuration-for-rtl8168evl.patch
r8169-fix-resume-on-cable-plug-in.patch
sctp-cache-netns-in-sctp_ep_common.patch
sctp-fix-memory-leak-in-sctp_sf_do_5_2_4_dupcook.patch
selftests-bpf-correct-perror-strings.patch
selftests-bpf-test_sockmap-handle-file-creation-failures-gracefully.patch
selftests-pmtu-use-oneline-for-ip-route-list-cache.patch
selftests-tls-add-a-test-for-fragmented-messages.patch
slip-fix-use-after-free-read-in-slip_open.patch
tipc-fix-link-name-length-check.patch
x86-fpu-don-t-cache-access-to-fpu_fpregs_owner_ctx.patch

27 files changed:
queue-5.4/gve-fix-the-queue-page-list-allocated-pages-count.patch [new file with mode: 0644]
queue-5.4/macvlan-schedule-bc_work-even-if-error.patch [new file with mode: 0644]
queue-5.4/mdio_bus-don-t-use-managed-reset-controller.patch [new file with mode: 0644]
queue-5.4/net-dsa-sja1105-fix-sja1105_parse_rgmii_delays.patch [new file with mode: 0644]
queue-5.4/net-macb-add-missed-tasklet_kill.patch [new file with mode: 0644]
queue-5.4/net-psample-fix-skb_over_panic.patch [new file with mode: 0644]
queue-5.4/net-sched-fix-tc-s-class-show-no-bstats-on-class-with-nolock-subqueues.patch [new file with mode: 0644]
queue-5.4/net-skmsg-fix-tls-1.3-crash-with-full-sk_msg.patch [new file with mode: 0644]
queue-5.4/net-tls-free-the-record-on-encryption-error.patch [new file with mode: 0644]
queue-5.4/net-tls-remove-the-dead-inplace_crypto-code.patch [new file with mode: 0644]
queue-5.4/net-tls-take-into-account-that-bpf_exec_tx_verdict-may-free-the-record.patch [new file with mode: 0644]
queue-5.4/net-tls-use-sg_next-to-walk-sg-entries.patch [new file with mode: 0644]
queue-5.4/openvswitch-drop-unneeded-bug_on-in-ovs_flow_cmd_build_info.patch [new file with mode: 0644]
queue-5.4/openvswitch-fix-flow-command-message-size.patch [new file with mode: 0644]
queue-5.4/openvswitch-remove-another-bug_on.patch [new file with mode: 0644]
queue-5.4/r8169-fix-jumbo-configuration-for-rtl8168evl.patch [new file with mode: 0644]
queue-5.4/r8169-fix-resume-on-cable-plug-in.patch [new file with mode: 0644]
queue-5.4/sctp-cache-netns-in-sctp_ep_common.patch [new file with mode: 0644]
queue-5.4/sctp-fix-memory-leak-in-sctp_sf_do_5_2_4_dupcook.patch [new file with mode: 0644]
queue-5.4/selftests-bpf-correct-perror-strings.patch [new file with mode: 0644]
queue-5.4/selftests-bpf-test_sockmap-handle-file-creation-failures-gracefully.patch [new file with mode: 0644]
queue-5.4/selftests-pmtu-use-oneline-for-ip-route-list-cache.patch [new file with mode: 0644]
queue-5.4/selftests-tls-add-a-test-for-fragmented-messages.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/slip-fix-use-after-free-read-in-slip_open.patch [new file with mode: 0644]
queue-5.4/tipc-fix-link-name-length-check.patch [new file with mode: 0644]
queue-5.4/x86-fpu-don-t-cache-access-to-fpu_fpregs_owner_ctx.patch [new file with mode: 0644]

diff --git a/queue-5.4/gve-fix-the-queue-page-list-allocated-pages-count.patch b/queue-5.4/gve-fix-the-queue-page-list-allocated-pages-count.patch
new file mode 100644 (file)
index 0000000..b1d4162
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jeroen de Borst <jeroendb@google.com>
+Date: Tue, 26 Nov 2019 15:36:19 -0800
+Subject: gve: Fix the queue page list allocated pages count
+
+From: Jeroen de Borst <jeroendb@google.com>
+
+[ Upstream commit a95069ecb7092d03b2ea1c39ee04514fe9627540 ]
+
+In gve_alloc_queue_page_list(), when a page allocation fails,
+qpl->num_entries will be wrong.  In this case priv->num_registered_pages
+can underflow in gve_free_queue_page_list(), causing subsequent calls
+to gve_alloc_queue_page_list() to fail.
+
+Fixes: f5cedc84a30d ("gve: Add transmit and receive support")
+Signed-off-by: Jeroen de Borst <jeroendb@google.com>
+Reviewed-by: Catherine Sullivan <csully@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(str
+       }
+       qpl->id = id;
+-      qpl->num_entries = pages;
++      qpl->num_entries = 0;
+       qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
+       /* caller handles clean up */
+       if (!qpl->pages)
+@@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(str
+               /* caller handles clean up */
+               if (err)
+                       return -ENOMEM;
++              qpl->num_entries++;
+       }
+       priv->num_registered_pages += pages;
diff --git a/queue-5.4/macvlan-schedule-bc_work-even-if-error.patch b/queue-5.4/macvlan-schedule-bc_work-even-if-error.patch
new file mode 100644 (file)
index 0000000..6828641
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Menglong Dong <dong.menglong@zte.com.cn>
+Date: Mon, 25 Nov 2019 16:58:09 +0800
+Subject: macvlan: schedule bc_work even if error
+
+From: Menglong Dong <dong.menglong@zte.com.cn>
+
+[ Upstream commit 1d7ea55668878bb350979c377fc72509dd6f5b21 ]
+
+While enqueueing a broadcast skb to port->bc_queue, schedule_work()
+is called to add port->bc_work, which processes the skbs in
+bc_queue, to "events" work queue. If port->bc_queue is full, the
+skb will be discarded and schedule_work(&port->bc_work) won't be
+called. However, if port->bc_queue is full and port->bc_work is not
+running or pending, port->bc_queue will keep full and schedule_work()
+won't be called any more, and all broadcast skbs to macvlan will be
+discarded. This case can happen:
+
+macvlan_process_broadcast() is the pending function of port->bc_work,
+it moves all the skbs in port->bc_queue to the queue "list", and
+processes the skbs in "list". During this, new skbs will keep being
+added to port->bc_queue in macvlan_broadcast_enqueue(), and
+port->bc_queue may already full when macvlan_process_broadcast()
+return. This may happen, especially when there are a lot of real-time
+threads and the process is preempted.
+
+Fix this by calling schedule_work(&port->bc_work) even if
+port->bc_work is full in macvlan_broadcast_enqueue().
+
+Fixes: 412ca1550cbe ("macvlan: Move broadcasts into a work queue")
+Signed-off-by: Menglong Dong <dong.menglong@zte.com.cn>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macvlan.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -359,10 +359,11 @@ static void macvlan_broadcast_enqueue(st
+       }
+       spin_unlock(&port->bc_queue.lock);
++      schedule_work(&port->bc_work);
++
+       if (err)
+               goto free_nskb;
+-      schedule_work(&port->bc_work);
+       return;
+ free_nskb:
diff --git a/queue-5.4/mdio_bus-don-t-use-managed-reset-controller.patch b/queue-5.4/mdio_bus-don-t-use-managed-reset-controller.patch
new file mode 100644 (file)
index 0000000..324a980
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: David Bauer <mail@david-bauer.net>
+Date: Fri, 22 Nov 2019 22:44:51 +0100
+Subject: mdio_bus: don't use managed reset-controller
+
+From: David Bauer <mail@david-bauer.net>
+
+[ Upstream commit 32085f25d7b68404055f3525c780142fc72e543f ]
+
+Geert Uytterhoeven reported that using devm_reset_controller_get leads
+to a WARNING when probing a reset-controlled PHY. This is because the
+device devm_reset_controller_get gets supplied is not actually the
+one being probed.
+
+Acquire an unmanaged reset-control as well as free the reset_control on
+unregister to fix this.
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+CC: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David Bauer <mail@david-bauer.net>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/mdio_bus.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -62,8 +62,8 @@ static int mdiobus_register_reset(struct
+       struct reset_control *reset = NULL;
+       if (mdiodev->dev.of_node)
+-              reset = devm_reset_control_get_exclusive(&mdiodev->dev,
+-                                                       "phy");
++              reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
++                                                     "phy");
+       if (IS_ERR(reset)) {
+               if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
+                       reset = NULL;
+@@ -107,6 +107,8 @@ int mdiobus_unregister_device(struct mdi
+       if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
+               return -EINVAL;
++      reset_control_put(mdiodev->reset_ctrl);
++
+       mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
+       return 0;
diff --git a/queue-5.4/net-dsa-sja1105-fix-sja1105_parse_rgmii_delays.patch b/queue-5.4/net-dsa-sja1105-fix-sja1105_parse_rgmii_delays.patch
new file mode 100644 (file)
index 0000000..b283263
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Mon, 25 Nov 2019 12:43:51 +0100
+Subject: net: dsa: sja1105: fix sja1105_parse_rgmii_delays()
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+[ Upstream commit 9bca3a0a923fc3f0fb9e41391be1d0f291e86858 ]
+
+This function was using configuration of port 0 in devicetree for all ports.
+In case CPU port was not 0, the delay settings was ignored. This resulted not
+working communication between CPU and the switch.
+
+Fixes: f5b8631c293b ("net: dsa: sja1105: Error out if RGMII delays are requested in DT")
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/sja1105/sja1105_main.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -594,15 +594,15 @@ static int sja1105_parse_rgmii_delays(st
+       int i;
+       for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+-              if (ports->role == XMII_MAC)
++              if (ports[i].role == XMII_MAC)
+                       continue;
+-              if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+-                  ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
++              if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
++                  ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+                       priv->rgmii_rx_delay[i] = true;
+-              if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+-                  ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
++              if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
++                  ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+                       priv->rgmii_tx_delay[i] = true;
+               if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
diff --git a/queue-5.4/net-macb-add-missed-tasklet_kill.patch b/queue-5.4/net-macb-add-missed-tasklet_kill.patch
new file mode 100644 (file)
index 0000000..742a898
--- /dev/null
@@ -0,0 +1,30 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Chuhong Yuan <hslester96@gmail.com>
+Date: Thu, 28 Nov 2019 10:00:21 +0800
+Subject: net: macb: add missed tasklet_kill
+
+From: Chuhong Yuan <hslester96@gmail.com>
+
+[ Upstream commit 61183b056b49e2937ff92a1424291ba36a6f6d05 ]
+
+This driver forgets to kill tasklet in remove.
+Add the call to fix it.
+
+Fixes: 032dc41ba6e2 ("net: macb: Handle HRESP error")
+Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -4392,6 +4392,7 @@ static int macb_remove(struct platform_d
+               mdiobus_free(bp->mii_bus);
+               unregister_netdev(dev);
++              tasklet_kill(&bp->hresp_err_tasklet);
+               pm_runtime_disable(&pdev->dev);
+               pm_runtime_dont_use_autosuspend(&pdev->dev);
+               if (!pm_runtime_suspended(&pdev->dev)) {
diff --git a/queue-5.4/net-psample-fix-skb_over_panic.patch b/queue-5.4/net-psample-fix-skb_over_panic.patch
new file mode 100644 (file)
index 0000000..da4b465
--- /dev/null
@@ -0,0 +1,95 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Date: Wed, 27 Nov 2019 00:16:44 +0200
+Subject: net: psample: fix skb_over_panic
+
+From: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+
+[ Upstream commit 7eb9d7675c08937cd11d32b0b40442d4d731c5ee ]
+
+We need to calculate the skb size correctly otherwise we risk triggering
+skb_over_panic[1]. The issue is that data_len is added to the skb in a
+nl attribute, but we don't account for its header size (nlattr 4 bytes)
+and alignment. We account for it when calculating the total size in
+the > PSAMPLE_MAX_PACKET_SIZE comparison correctly, but not when
+allocating after that. The fix is simple - use nla_total_size() for
+data_len when allocating.
+
+To reproduce:
+ $ tc qdisc add dev eth1 clsact
+ $ tc filter add dev eth1 egress matchall action sample rate 1 group 1 trunc 129
+ $ mausezahn eth1 -b bcast -a rand -c 1 -p 129
+ < skb_over_panic BUG(), tail is 4 bytes past skb->end >
+
+[1] Trace:
+ [   50.459526][ T3480] skbuff: skb_over_panic: text:(____ptrval____) len:196 put:136 head:(____ptrval____) data:(____ptrval____) tail:0xc4 end:0xc0 dev:<NULL>
+ [   50.474339][ T3480] ------------[ cut here ]------------
+ [   50.481132][ T3480] kernel BUG at net/core/skbuff.c:108!
+ [   50.486059][ T3480] invalid opcode: 0000 [#1] PREEMPT SMP
+ [   50.489463][ T3480] CPU: 3 PID: 3480 Comm: mausezahn Not tainted 5.4.0-rc7 #108
+ [   50.492844][ T3480] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-2.fc30 04/01/2014
+ [   50.496551][ T3480] RIP: 0010:skb_panic+0x79/0x7b
+ [   50.498261][ T3480] Code: bc 00 00 00 41 57 4c 89 e6 48 c7 c7 90 29 9a 83 4c 8b 8b c0 00 00 00 50 8b 83 b8 00 00 00 50 ff b3 c8 00 00 00 e8 ae ef c0 fe <0f> 0b e8 2f df c8 fe 48 8b 55 08 44 89 f6 4c 89 e7 48 c7 c1 a0 22
+ [   50.504111][ T3480] RSP: 0018:ffffc90000447a10 EFLAGS: 00010282
+ [   50.505835][ T3480] RAX: 0000000000000087 RBX: ffff888039317d00 RCX: 0000000000000000
+ [   50.507900][ T3480] RDX: 0000000000000000 RSI: ffffffff812716e1 RDI: 00000000ffffffff
+ [   50.509820][ T3480] RBP: ffffc90000447a60 R08: 0000000000000001 R09: 0000000000000000
+ [   50.511735][ T3480] R10: ffffffff81d4f940 R11: 0000000000000000 R12: ffffffff834a22b0
+ [   50.513494][ T3480] R13: ffffffff82c10433 R14: 0000000000000088 R15: ffffffff838a8084
+ [   50.515222][ T3480] FS:  00007f3536462700(0000) GS:ffff88803eac0000(0000) knlGS:0000000000000000
+ [   50.517135][ T3480] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [   50.518583][ T3480] CR2: 0000000000442008 CR3: 000000003b222000 CR4: 00000000000006e0
+ [   50.520723][ T3480] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [   50.522709][ T3480] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ [   50.524450][ T3480] Call Trace:
+ [   50.525214][ T3480]  skb_put.cold+0x1b/0x1b
+ [   50.526171][ T3480]  psample_sample_packet+0x1d3/0x340
+ [   50.527307][ T3480]  tcf_sample_act+0x178/0x250
+ [   50.528339][ T3480]  tcf_action_exec+0xb1/0x190
+ [   50.529354][ T3480]  mall_classify+0x67/0x90
+ [   50.530332][ T3480]  tcf_classify+0x72/0x160
+ [   50.531286][ T3480]  __dev_queue_xmit+0x3db/0xd50
+ [   50.532327][ T3480]  dev_queue_xmit+0x18/0x20
+ [   50.533299][ T3480]  packet_sendmsg+0xee7/0x2090
+ [   50.534331][ T3480]  sock_sendmsg+0x54/0x70
+ [   50.535271][ T3480]  __sys_sendto+0x148/0x1f0
+ [   50.536252][ T3480]  ? tomoyo_file_ioctl+0x23/0x30
+ [   50.537334][ T3480]  ? ksys_ioctl+0x5e/0xb0
+ [   50.540068][ T3480]  __x64_sys_sendto+0x2a/0x30
+ [   50.542810][ T3480]  do_syscall_64+0x73/0x1f0
+ [   50.545383][ T3480]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ [   50.548477][ T3480] RIP: 0033:0x7f35357d6fb3
+ [   50.551020][ T3480] Code: 48 8b 0d 18 90 20 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 d3 20 00 00 75 13 49 89 ca b8 2c 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 34 c3 48 83 ec 08 e8 eb f6 ff ff 48 89 04 24
+ [   50.558547][ T3480] RSP: 002b:00007ffe0c7212c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
+ [   50.561870][ T3480] RAX: ffffffffffffffda RBX: 0000000001dac010 RCX: 00007f35357d6fb3
+ [   50.565142][ T3480] RDX: 0000000000000082 RSI: 0000000001dac2a2 RDI: 0000000000000003
+ [   50.568469][ T3480] RBP: 00007ffe0c7212f0 R08: 00007ffe0c7212d0 R09: 0000000000000014
+ [   50.571731][ T3480] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000082
+ [   50.574961][ T3480] R13: 0000000001dac2a2 R14: 0000000000000001 R15: 0000000000000003
+ [   50.578170][ T3480] Modules linked in: sch_ingress virtio_net
+ [   50.580976][ T3480] ---[ end trace 61a515626a595af6 ]---
+
+CC: Yotam Gigi <yotamg@mellanox.com>
+CC: Jiri Pirko <jiri@mellanox.com>
+CC: Jamal Hadi Salim <jhs@mojatatu.com>
+CC: Simon Horman <simon.horman@netronome.com>
+CC: Roopa Prabhu <roopa@cumulusnetworks.com>
+Fixes: 6ae0a6286171 ("net: Introduce psample, a new genetlink channel for packet sampling")
+Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/psample/psample.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -229,7 +229,7 @@ void psample_sample_packet(struct psampl
+               data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
+                           - NLA_ALIGNTO;
+-      nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
++      nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
+       if (unlikely(!nl_skb))
+               return;
diff --git a/queue-5.4/net-sched-fix-tc-s-class-show-no-bstats-on-class-with-nolock-subqueues.patch b/queue-5.4/net-sched-fix-tc-s-class-show-no-bstats-on-class-with-nolock-subqueues.patch
new file mode 100644 (file)
index 0000000..0070441
--- /dev/null
@@ -0,0 +1,83 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Dust Li <dust.li@linux.alibaba.com>
+Date: Thu, 28 Nov 2019 14:29:09 +0800
+Subject: net: sched: fix `tc -s class show` no bstats on class with nolock subqueues
+
+From: Dust Li <dust.li@linux.alibaba.com>
+
+[ Upstream commit 14e54ab9143fa60794d13ea0a66c792a2046a8f3 ]
+
+When a classful qdisc's child qdisc has set the flag
+TCQ_F_CPUSTATS (pfifo_fast for example), the child qdisc's
+cpu_bstats should be passed to gnet_stats_copy_basic(),
+but many classful qdisc didn't do that. As a result,
+`tc -s class show dev DEV` always return 0 for bytes and
+packets in this case.
+
+Pass the child qdisc's cpu_bstats to gnet_stats_copy_basic()
+to fix this issue.
+
+The qstats also has this problem, but it has been fixed
+in 5dd431b6b9 ("net: sched: introduce and use qstats read...")
+and bstats still remains buggy.
+
+Fixes: 22e0f8b9322c ("net: sched: make bstats per cpu and estimator RCU safe")
+Signed-off-by: Dust Li <dust.li@linux.alibaba.com>
+Signed-off-by: Tony Lu <tonylu@linux.alibaba.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_mq.c     |    3 ++-
+ net/sched/sch_mqprio.c |    4 ++--
+ net/sched/sch_multiq.c |    2 +-
+ net/sched/sch_prio.c   |    2 +-
+ 4 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -245,7 +245,8 @@ static int mq_dump_class_stats(struct Qd
+       struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+       sch = dev_queue->qdisc_sleeping;
+-      if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
++      if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
++                                &sch->bstats) < 0 ||
+           qdisc_qstats_copy(d, sch) < 0)
+               return -1;
+       return 0;
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -557,8 +557,8 @@ static int mqprio_dump_class_stats(struc
+               struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+               sch = dev_queue->qdisc_sleeping;
+-              if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+-                                        d, NULL, &sch->bstats) < 0 ||
++              if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
++                                        sch->cpu_bstats, &sch->bstats) < 0 ||
+                   qdisc_qstats_copy(d, sch) < 0)
+                       return -1;
+       }
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -339,7 +339,7 @@ static int multiq_dump_class_stats(struc
+       cl_q = q->queues[cl - 1];
+       if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+-                                d, NULL, &cl_q->bstats) < 0 ||
++                                d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
+           qdisc_qstats_copy(d, cl_q) < 0)
+               return -1;
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -356,7 +356,7 @@ static int prio_dump_class_stats(struct
+       cl_q = q->queues[cl - 1];
+       if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+-                                d, NULL, &cl_q->bstats) < 0 ||
++                                d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
+           qdisc_qstats_copy(d, cl_q) < 0)
+               return -1;
diff --git a/queue-5.4/net-skmsg-fix-tls-1.3-crash-with-full-sk_msg.patch b/queue-5.4/net-skmsg-fix-tls-1.3-crash-with-full-sk_msg.patch
new file mode 100644 (file)
index 0000000..029e5d1
--- /dev/null
@@ -0,0 +1,214 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:41 -0800
+Subject: net: skmsg: fix TLS 1.3 crash with full sk_msg
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 031097d9e079e40dce401031d1012e83d80eaf01 ]
+
+TLS 1.3 started using the entry at the end of the SG array
+for chaining-in the single byte content type entry. This mostly
+works:
+
+[ E E E E E E . . ]
+  ^           ^
+   start       end
+
+                 E < content type
+               /
+[ E E E E E E C . ]
+  ^           ^
+   start       end
+
+(Where E denotes a populated SG entry; C denotes a chaining entry.)
+
+If the array is full, however, the end will point to the start:
+
+[ E E E E E E E E ]
+  ^
+   start
+   end
+
+And we end up overwriting the start:
+
+    E < content type
+   /
+[ C E E E E E E E ]
+  ^
+   start
+   end
+
+The sg array is supposed to be a circular buffer with start and
+end markers pointing anywhere. In case where start > end
+(i.e. the circular buffer has "wrapped") there is an extra entry
+reserved at the end to chain the two halves together.
+
+[ E E E E E E . . l ]
+
+(Where l is the reserved entry for "looping" back to front.
+
+As suggested by John, let's reserve another entry for chaining
+SG entries after the main circular buffer. Note that this entry
+has to be pointed to by the end entry so its position is not fixed.
+
+Examples of full messages:
+
+[ E E E E E E E E . l ]
+  ^               ^
+   start           end
+
+   <---------------.
+[ E E . E E E E E E l ]
+      ^ ^
+   end   start
+
+Now the end will always point to an unused entry, so TLS 1.3
+can always use it.
+
+Fixes: 130b392c6cd6 ("net: tls: Add tls 1.3 support")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skmsg.h |   26 +++++++++++++-------------
+ net/core/filter.c     |    8 ++++----
+ net/core/skmsg.c      |    2 +-
+ net/ipv4/tcp_bpf.c    |    2 +-
+ 4 files changed, 19 insertions(+), 19 deletions(-)
+
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -14,6 +14,7 @@
+ #include <net/strparser.h>
+ #define MAX_MSG_FRAGS                 MAX_SKB_FRAGS
++#define NR_MSG_FRAG_IDS                       (MAX_MSG_FRAGS + 1)
+ enum __sk_action {
+       __SK_DROP = 0,
+@@ -29,11 +30,13 @@ struct sk_msg_sg {
+       u32                             size;
+       u32                             copybreak;
+       bool                            copy[MAX_MSG_FRAGS];
+-      /* The extra element is used for chaining the front and sections when
+-       * the list becomes partitioned (e.g. end < start). The crypto APIs
+-       * require the chaining.
++      /* The extra two elements:
++       * 1) used for chaining the front and sections when the list becomes
++       *    partitioned (e.g. end < start). The crypto APIs require the
++       *    chaining;
++       * 2) to chain tailer SG entries after the message.
+        */
+-      struct scatterlist              data[MAX_MSG_FRAGS + 1];
++      struct scatterlist              data[MAX_MSG_FRAGS + 2];
+ };
+ /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
+@@ -141,13 +144,13 @@ static inline void sk_msg_apply_bytes(st
+ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
+ {
+-      return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
++      return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
+ }
+ #define sk_msg_iter_var_prev(var)                     \
+       do {                                            \
+               if (var == 0)                           \
+-                      var = MAX_MSG_FRAGS - 1;        \
++                      var = NR_MSG_FRAG_IDS - 1;      \
+               else                                    \
+                       var--;                          \
+       } while (0)
+@@ -155,7 +158,7 @@ static inline u32 sk_msg_iter_dist(u32 s
+ #define sk_msg_iter_var_next(var)                     \
+       do {                                            \
+               var++;                                  \
+-              if (var == MAX_MSG_FRAGS)               \
++              if (var == NR_MSG_FRAG_IDS)             \
+                       var = 0;                        \
+       } while (0)
+@@ -172,9 +175,9 @@ static inline void sk_msg_clear_meta(str
+ static inline void sk_msg_init(struct sk_msg *msg)
+ {
+-      BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
++      BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
+       memset(msg, 0, sizeof(*msg));
+-      sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
++      sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
+ }
+ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
+@@ -195,14 +198,11 @@ static inline void sk_msg_xfer_full(stru
+ static inline bool sk_msg_full(const struct sk_msg *msg)
+ {
+-      return (msg->sg.end == msg->sg.start) && msg->sg.size;
++      return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
+ }
+ static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
+ {
+-      if (sk_msg_full(msg))
+-              return MAX_MSG_FRAGS;
+-
+       return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
+ }
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2299,7 +2299,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_
+       WARN_ON_ONCE(last_sge == first_sge);
+       shift = last_sge > first_sge ?
+               last_sge - first_sge - 1 :
+-              MAX_SKB_FRAGS - first_sge + last_sge - 1;
++              NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
+       if (!shift)
+               goto out;
+@@ -2308,8 +2308,8 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_
+       do {
+               u32 move_from;
+-              if (i + shift >= MAX_MSG_FRAGS)
+-                      move_from = i + shift - MAX_MSG_FRAGS;
++              if (i + shift >= NR_MSG_FRAG_IDS)
++                      move_from = i + shift - NR_MSG_FRAG_IDS;
+               else
+                       move_from = i + shift;
+               if (move_from == msg->sg.end)
+@@ -2323,7 +2323,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_
+       } while (1);
+       msg->sg.end = msg->sg.end - shift > msg->sg.end ?
+-                    msg->sg.end - shift + MAX_MSG_FRAGS :
++                    msg->sg.end - shift + NR_MSG_FRAG_IDS :
+                     msg->sg.end - shift;
+ out:
+       msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -421,7 +421,7 @@ static int sk_psock_skb_ingress(struct s
+       copied = skb->len;
+       msg->sg.start = 0;
+       msg->sg.size = copied;
+-      msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
++      msg->sg.end = num_sge;
+       msg->skb = skb;
+       sk_psock_queue_msg(psock, msg);
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir)
+ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+                               struct sk_msg *msg, int *copied, int flags)
+ {
+-      bool cork = false, enospc = msg->sg.start == msg->sg.end;
++      bool cork = false, enospc = sk_msg_full(msg);
+       struct sock *sk_redir;
+       u32 tosend, delta = 0;
+       int ret;
diff --git a/queue-5.4/net-tls-free-the-record-on-encryption-error.patch b/queue-5.4/net-tls-free-the-record-on-encryption-error.patch
new file mode 100644 (file)
index 0000000..fcbb478
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:40 -0800
+Subject: net/tls: free the record on encryption error
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit d10523d0b3d78153ee58d19853ced26c9004c8c4 ]
+
+When tls_do_encryption() fails the SG lists are left with the
+SG_END and SG_CHAIN marks in place. One could hope that once
+encryption fails we will never see the record again, but that
+is in fact not true. Commit d3b18ad31f93 ("tls: add bpf support
+to sk_msg handling") added special handling to ENOMEM and ENOSPC
+errors which mean we may see the same record re-submitted.
+
+As suggested by John free the record, the BPF code is already
+doing just that.
+
+Reported-by: syzbot+df0d4ec12332661dd1f9@syzkaller.appspotmail.com
+Fixes: d3b18ad31f93 ("tls: add bpf support to sk_msg handling")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -766,8 +766,14 @@ static int bpf_exec_tx_verdict(struct sk
+       policy = !(flags & MSG_SENDPAGE_NOPOLICY);
+       psock = sk_psock_get(sk);
+-      if (!psock || !policy)
+-              return tls_push_record(sk, flags, record_type);
++      if (!psock || !policy) {
++              err = tls_push_record(sk, flags, record_type);
++              if (err) {
++                      *copied -= sk_msg_free(sk, msg);
++                      tls_free_open_rec(sk);
++              }
++              return err;
++      }
+ more_data:
+       enospc = sk_msg_full(msg);
+       if (psock->eval == __SK_NONE) {
diff --git a/queue-5.4/net-tls-remove-the-dead-inplace_crypto-code.patch b/queue-5.4/net-tls-remove-the-dead-inplace_crypto-code.patch
new file mode 100644 (file)
index 0000000..43cd047
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:43 -0800
+Subject: net/tls: remove the dead inplace_crypto code
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 9e5ffed37df68d0ccfb2fdc528609e23a1e70ebe ]
+
+Looks like when BPF support was added by commit d3b18ad31f93
+("tls: add bpf support to sk_msg handling") and
+commit d829e9c4112b ("tls: convert to generic sk_msg interface")
+it broke/removed the support for in-place crypto as added by
+commit 4e6d47206c32 ("tls: Add support for inplace records
+encryption").
+
+The inplace_crypto member of struct tls_rec is dead, inited
+to zero, and sometimes set to zero again. It used to be
+set to 1 when record was allocated, but the skmsg code doesn't
+seem to have been written with the idea of in-place crypto
+in mind.
+
+Since non trivial effort is required to bring the feature back
+and we don't really have the HW to measure the benefit just
+remove the left over support for now to avoid confusing readers.
+
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tls.h |    1 -
+ net/tls/tls_sw.c  |    6 +-----
+ 2 files changed, 1 insertion(+), 6 deletions(-)
+
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -122,7 +122,6 @@ struct tls_rec {
+       struct list_head list;
+       int tx_ready;
+       int tx_flags;
+-      int inplace_crypto;
+       struct sk_msg msg_plaintext;
+       struct sk_msg msg_encrypted;
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -705,8 +705,7 @@ static int tls_push_record(struct sock *
+       }
+       i = msg_pl->sg.start;
+-      sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
+-               &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
++      sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
+       i = msg_en->sg.end;
+       sk_msg_iter_var_prev(i);
+@@ -971,8 +970,6 @@ alloc_encrypted:
+                       if (ret)
+                               goto fallback_to_reg_send;
+-                      rec->inplace_crypto = 0;
+-
+                       num_zc++;
+                       copied += try_to_copy;
+@@ -1171,7 +1168,6 @@ alloc_payload:
+               tls_ctx->pending_open_record_frags = true;
+               if (full_record || eor || sk_msg_full(msg_pl)) {
+-                      rec->inplace_crypto = 0;
+                       ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+                                                 record_type, &copied, flags);
+                       if (ret) {
diff --git a/queue-5.4/net-tls-take-into-account-that-bpf_exec_tx_verdict-may-free-the-record.patch b/queue-5.4/net-tls-take-into-account-that-bpf_exec_tx_verdict-may-free-the-record.patch
new file mode 100644 (file)
index 0000000..3e5534d
--- /dev/null
@@ -0,0 +1,65 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:39 -0800
+Subject: net/tls: take into account that bpf_exec_tx_verdict() may free the record
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit c329ef9684de9517d82af5b4758c9e1b64a8a11a ]
+
+bpf_exec_tx_verdict() may free the record if tls_push_record()
+fails, or if the entire record got consumed by BPF. Re-check
+ctx->open_rec before touching the data.
+
+Fixes: d3b18ad31f93 ("tls: add bpf support to sk_msg handling")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_sw.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -979,7 +979,7 @@ alloc_encrypted:
+                                       num_async++;
+                               else if (ret == -ENOMEM)
+                                       goto wait_for_memory;
+-                              else if (ret == -ENOSPC)
++                              else if (ctx->open_rec && ret == -ENOSPC)
+                                       goto rollback_iter;
+                               else if (ret != -EAGAIN)
+                                       goto send_end;
+@@ -1048,11 +1048,12 @@ wait_for_memory:
+               ret = sk_stream_wait_memory(sk, &timeo);
+               if (ret) {
+ trim_sgl:
+-                      tls_trim_both_msgs(sk, orig_size);
++                      if (ctx->open_rec)
++                              tls_trim_both_msgs(sk, orig_size);
+                       goto send_end;
+               }
+-              if (msg_en->sg.size < required_size)
++              if (ctx->open_rec && msg_en->sg.size < required_size)
+                       goto alloc_encrypted;
+       }
+@@ -1185,11 +1186,13 @@ wait_for_sndbuf:
+ wait_for_memory:
+               ret = sk_stream_wait_memory(sk, &timeo);
+               if (ret) {
+-                      tls_trim_both_msgs(sk, msg_pl->sg.size);
++                      if (ctx->open_rec)
++                              tls_trim_both_msgs(sk, msg_pl->sg.size);
+                       goto sendpage_end;
+               }
+-              goto alloc_payload;
++              if (ctx->open_rec)
++                      goto alloc_payload;
+       }
+       if (num_async) {
diff --git a/queue-5.4/net-tls-use-sg_next-to-walk-sg-entries.patch b/queue-5.4/net-tls-use-sg_next-to-walk-sg-entries.patch
new file mode 100644 (file)
index 0000000..f939952
--- /dev/null
@@ -0,0 +1,78 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:44 -0800
+Subject: net/tls: use sg_next() to walk sg entries
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit c5daa6cccdc2f94aca2c9b3fa5f94e4469997293 ]
+
+Partially sent record cleanup path increments an SG entry
+directly instead of using sg_next(). This should not be a
+problem today, as encrypted messages should be always
+allocated as arrays. But given this is a cleanup path it's
+easy to miss was this ever to change. Use sg_next(), and
+simplify the code.
+
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/tls.h  |    2 +-
+ net/tls/tls_main.c |   13 ++-----------
+ net/tls/tls_sw.c   |    3 ++-
+ 3 files changed, 5 insertions(+), 13 deletions(-)
+
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -395,7 +395,7 @@ int tls_push_sg(struct sock *sk, struct
+               int flags);
+ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+                           int flags);
+-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
++void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
+ static inline struct tls_msg *tls_msg(struct sk_buff *skb)
+ {
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -209,24 +209,15 @@ int tls_push_partial_record(struct sock
+       return tls_push_sg(sk, ctx, sg, offset, flags);
+ }
+-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
++void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+ {
+       struct scatterlist *sg;
+-      sg = ctx->partially_sent_record;
+-      if (!sg)
+-              return false;
+-
+-      while (1) {
++      for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
+               put_page(sg_page(sg));
+               sk_mem_uncharge(sk, sg->length);
+-
+-              if (sg_is_last(sg))
+-                      break;
+-              sg++;
+       }
+       ctx->partially_sent_record = NULL;
+-      return true;
+ }
+ static void tls_write_space(struct sock *sk)
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2084,7 +2084,8 @@ void tls_sw_release_resources_tx(struct
+       /* Free up un-sent records in tx_list. First, free
+        * the partially sent record if any at head of tx_list.
+        */
+-      if (tls_free_partial_record(sk, tls_ctx)) {
++      if (tls_ctx->partially_sent_record) {
++              tls_free_partial_record(sk, tls_ctx);
+               rec = list_first_entry(&ctx->tx_list,
+                                      struct tls_rec, list);
+               list_del(&rec->list);
diff --git a/queue-5.4/openvswitch-drop-unneeded-bug_on-in-ovs_flow_cmd_build_info.patch b/queue-5.4/openvswitch-drop-unneeded-bug_on-in-ovs_flow_cmd_build_info.patch
new file mode 100644 (file)
index 0000000..7b8dacb
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sun, 1 Dec 2019 18:41:24 +0100
+Subject: openvswitch: drop unneeded BUG_ON() in ovs_flow_cmd_build_info()
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 8ffeb03fbba3b599690b361467bfd2373e8c450f ]
+
+All the callers of ovs_flow_cmd_build_info() already deal with
+error return code correctly, so we can handle the error condition
+in a more gracefull way. Still dump a warning to preserve
+debuggability.
+
+v1 -> v2:
+ - clarify the commit message
+ - clean the skb and report the error (DaveM)
+
+Fixes: ccb1352e76cf ("net: Add Open vSwitch kernel components.")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/datapath.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -886,7 +886,10 @@ static struct sk_buff *ovs_flow_cmd_buil
+       retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
+                                       info->snd_portid, info->snd_seq, 0,
+                                       cmd, ufid_flags);
+-      BUG_ON(retval < 0);
++      if (WARN_ON_ONCE(retval < 0)) {
++              kfree_skb(skb);
++              skb = ERR_PTR(retval);
++      }
+       return skb;
+ }
diff --git a/queue-5.4/openvswitch-fix-flow-command-message-size.patch b/queue-5.4/openvswitch-fix-flow-command-message-size.patch
new file mode 100644 (file)
index 0000000..28b775e
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 26 Nov 2019 12:55:50 +0100
+Subject: openvswitch: fix flow command message size
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 4e81c0b3fa93d07653e2415fa71656b080a112fd ]
+
+When user-space sets the OVS_UFID_F_OMIT_* flags, and the relevant
+flow has no UFID, we can exceed the computed size, as
+ovs_nla_put_identifier() will always dump an OVS_FLOW_ATTR_KEY
+attribute.
+Take the above in account when computing the flow command message
+size.
+
+Fixes: 74ed7ab9264c ("openvswitch: Add support for unique flow IDs.")
+Reported-by: Qi Jun Ding <qding@redhat.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/datapath.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -704,9 +704,13 @@ static size_t ovs_flow_cmd_msg_size(cons
+ {
+       size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
+-      /* OVS_FLOW_ATTR_UFID */
++      /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
++       * see ovs_nla_put_identifier()
++       */
+       if (sfid && ovs_identifier_is_ufid(sfid))
+               len += nla_total_size(sfid->ufid_len);
++      else
++              len += nla_total_size(ovs_key_attr_size());
+       /* OVS_FLOW_ATTR_KEY */
+       if (!sfid || should_fill_key(sfid, ufid_flags))
diff --git a/queue-5.4/openvswitch-remove-another-bug_on.patch b/queue-5.4/openvswitch-remove-another-bug_on.patch
new file mode 100644 (file)
index 0000000..6b96478
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sun, 1 Dec 2019 18:41:25 +0100
+Subject: openvswitch: remove another BUG_ON()
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 8a574f86652a4540a2433946ba826ccb87f398cc ]
+
+If we can't build the flow del notification, we can simply delete
+the flow, no need to crash the kernel. Still keep a WARN_ON to
+preserve debuggability.
+
+Note: the BUG_ON() predates the Fixes tag, but this change
+can be applied only after the mentioned commit.
+
+v1 -> v2:
+ - do not leak an skb on error
+
+Fixes: aed067783e50 ("openvswitch: Minimize ovs_flow_cmd_del critical section.")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/datapath.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1353,7 +1353,10 @@ static int ovs_flow_cmd_del(struct sk_bu
+                                                    OVS_FLOW_CMD_DEL,
+                                                    ufid_flags);
+                       rcu_read_unlock();
+-                      BUG_ON(err < 0);
++                      if (WARN_ON_ONCE(err < 0)) {
++                              kfree_skb(reply);
++                              goto out_free;
++                      }
+                       ovs_notify(&dp_flow_genl_family, reply, info);
+               } else {
+@@ -1361,6 +1364,7 @@ static int ovs_flow_cmd_del(struct sk_bu
+               }
+       }
++out_free:
+       ovs_flow_free(flow, true);
+       return 0;
+ unlock:
diff --git a/queue-5.4/r8169-fix-jumbo-configuration-for-rtl8168evl.patch b/queue-5.4/r8169-fix-jumbo-configuration-for-rtl8168evl.patch
new file mode 100644 (file)
index 0000000..532ff76
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Sun, 1 Dec 2019 10:27:14 +0100
+Subject: r8169: fix jumbo configuration for RTL8168evl
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 14012c9f3bb922b9e0751ba43d15cc580a6049bf ]
+
+Alan reported [0] that network is broken since the referenced commit
+when using jumbo frames. This commit isn't wrong, it just revealed
+another issue that has been existing before. According to the vendor
+driver the RTL8168e-specific jumbo config doesn't apply for RTL8168evl.
+
+[0] https://lkml.org/lkml/2019/11/30/119
+
+Fixes: 4ebcb113edcc ("r8169: fix jumbo packet handling on resume from suspend")
+Reported-by: Alan J. Wylie <alan@wylie.me.uk>
+Tested-by: Alan J. Wylie <alan@wylie.me.uk>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4118,7 +4118,7 @@ static void rtl_hw_jumbo_enable(struct r
+       case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+               r8168dp_hw_jumbo_enable(tp);
+               break;
+-      case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
++      case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
+               r8168e_hw_jumbo_enable(tp);
+               break;
+       default:
diff --git a/queue-5.4/r8169-fix-resume-on-cable-plug-in.patch b/queue-5.4/r8169-fix-resume-on-cable-plug-in.patch
new file mode 100644 (file)
index 0000000..f0516e4
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Sun, 1 Dec 2019 10:39:56 +0100
+Subject: r8169: fix resume on cable plug-in
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 398fd408ccfb5e44b1cbe73a209d2281d3efa83c ]
+
+It was reported [0] that network doesn't wake up on cable plug-in with
+certain chip versions. Reason is that on these chip versions the PHY
+doesn't detect cable plug-in when being in power-down mode. So prevent
+the PHY from powering down if WoL is enabled.
+
+[0] https://bugzilla.kernel.org/show_bug.cgi?id=202103
+
+Fixes: 95fb8bb3181b ("net: phy: force phy suspend when calling phy_stop")
+Reported-by: jhdskag3 <jhdskag3@tutanota.com>
+Tested-by: jhdskag3 <jhdskag3@tutanota.com>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -1516,6 +1516,7 @@ static void __rtl8169_set_wol(struct rtl
+       rtl_lock_config_regs(tp);
+       device_set_wakeup_enable(tp_to_dev(tp), wolopts);
++      tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
+ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/queue-5.4/sctp-cache-netns-in-sctp_ep_common.patch b/queue-5.4/sctp-cache-netns-in-sctp_ep_common.patch
new file mode 100644 (file)
index 0000000..b57a2af
--- /dev/null
@@ -0,0 +1,110 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sat, 23 Nov 2019 11:56:49 +0800
+Subject: sctp: cache netns in sctp_ep_common
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 312434617cb16be5166316cf9d08ba760b1042a1 ]
+
+This patch is to fix a data-race reported by syzbot:
+
+  BUG: KCSAN: data-race in sctp_assoc_migrate / sctp_hash_obj
+
+  write to 0xffff8880b67c0020 of 8 bytes by task 18908 on cpu 1:
+    sctp_assoc_migrate+0x1a6/0x290 net/sctp/associola.c:1091
+    sctp_sock_migrate+0x8aa/0x9b0 net/sctp/socket.c:9465
+    sctp_accept+0x3c8/0x470 net/sctp/socket.c:4916
+    inet_accept+0x7f/0x360 net/ipv4/af_inet.c:734
+    __sys_accept4+0x224/0x430 net/socket.c:1754
+    __do_sys_accept net/socket.c:1795 [inline]
+    __se_sys_accept net/socket.c:1792 [inline]
+    __x64_sys_accept+0x4e/0x60 net/socket.c:1792
+    do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290
+    entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+  read to 0xffff8880b67c0020 of 8 bytes by task 12003 on cpu 0:
+    sctp_hash_obj+0x4f/0x2d0 net/sctp/input.c:894
+    rht_key_get_hash include/linux/rhashtable.h:133 [inline]
+    rht_key_hashfn include/linux/rhashtable.h:159 [inline]
+    rht_head_hashfn include/linux/rhashtable.h:174 [inline]
+    head_hashfn lib/rhashtable.c:41 [inline]
+    rhashtable_rehash_one lib/rhashtable.c:245 [inline]
+    rhashtable_rehash_chain lib/rhashtable.c:276 [inline]
+    rhashtable_rehash_table lib/rhashtable.c:316 [inline]
+    rht_deferred_worker+0x468/0xab0 lib/rhashtable.c:420
+    process_one_work+0x3d4/0x890 kernel/workqueue.c:2269
+    worker_thread+0xa0/0x800 kernel/workqueue.c:2415
+    kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253
+    ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
+
+It was caused by rhashtable access asoc->base.sk when sctp_assoc_migrate
+is changing its value. However, what rhashtable wants is netns from asoc
+base.sk, and for an asoc, its netns won't change once set. So we can
+simply fix it by caching netns since created.
+
+Fixes: d6c0256a60e6 ("sctp: add the rhashtable apis for sctp global transport hashtable")
+Reported-by: syzbot+e3b35fe7918ff0ee474e@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/structs.h |    3 +++
+ net/sctp/associola.c       |    1 +
+ net/sctp/endpointola.c     |    1 +
+ net/sctp/input.c           |    4 ++--
+ 4 files changed, 7 insertions(+), 2 deletions(-)
+
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -1239,6 +1239,9 @@ struct sctp_ep_common {
+       /* What socket does this endpoint belong to?  */
+       struct sock *sk;
++      /* Cache netns and it won't change once set */
++      struct net *net;
++
+       /* This is where we receive inbound chunks.  */
+       struct sctp_inq   inqueue;
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -64,6 +64,7 @@ static struct sctp_association *sctp_ass
+       /* Discarding const is appropriate here.  */
+       asoc->ep = (struct sctp_endpoint *)ep;
+       asoc->base.sk = (struct sock *)sk;
++      asoc->base.net = sock_net(sk);
+       sctp_endpoint_hold(asoc->ep);
+       sock_hold(asoc->base.sk);
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -110,6 +110,7 @@ static struct sctp_endpoint *sctp_endpoi
+       /* Remember who we are attached to.  */
+       ep->base.sk = sk;
++      ep->base.net = sock_net(sk);
+       sock_hold(ep->base.sk);
+       return ep;
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -882,7 +882,7 @@ static inline int sctp_hash_cmp(struct r
+       if (!sctp_transport_hold(t))
+               return err;
+-      if (!net_eq(sock_net(t->asoc->base.sk), x->net))
++      if (!net_eq(t->asoc->base.net, x->net))
+               goto out;
+       if (x->lport != htons(t->asoc->base.bind_addr.port))
+               goto out;
+@@ -897,7 +897,7 @@ static inline __u32 sctp_hash_obj(const
+ {
+       const struct sctp_transport *t = data;
+-      return sctp_hashfn(sock_net(t->asoc->base.sk),
++      return sctp_hashfn(t->asoc->base.net,
+                          htons(t->asoc->base.bind_addr.port),
+                          &t->ipaddr, seed);
+ }
diff --git a/queue-5.4/sctp-fix-memory-leak-in-sctp_sf_do_5_2_4_dupcook.patch b/queue-5.4/sctp-fix-memory-leak-in-sctp_sf_do_5_2_4_dupcook.patch
new file mode 100644 (file)
index 0000000..a410417
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Fri, 22 Nov 2019 16:17:56 -0600
+Subject: sctp: Fix memory leak in sctp_sf_do_5_2_4_dupcook
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+[ Upstream commit b6631c6031c746ed004c4221ec0616d7a520f441 ]
+
+In the implementation of sctp_sf_do_5_2_4_dupcook() the allocated
+new_asoc is leaked if security_sctp_assoc_request() fails. Release it
+via sctp_association_free().
+
+Fixes: 2277c7cd75e3 ("sctp: Add LSM hooks")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sm_statefuns.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2160,8 +2160,10 @@ enum sctp_disposition sctp_sf_do_5_2_4_d
+       /* Update socket peer label if first association. */
+       if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
+-                                      chunk->skb))
++                                      chunk->skb)) {
++              sctp_association_free(new_asoc);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++      }
+       /* Set temp so that it won't be added into hashtable */
+       new_asoc->temp = 1;
diff --git a/queue-5.4/selftests-bpf-correct-perror-strings.patch b/queue-5.4/selftests-bpf-correct-perror-strings.patch
new file mode 100644 (file)
index 0000000..b46a9b7
--- /dev/null
@@ -0,0 +1,202 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:46 -0800
+Subject: selftests: bpf: correct perror strings
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit e5dc9dd3258098bf8b5ceb75fc3433b41eff618a ]
+
+perror(str) is basically equivalent to
+print("%s: %s\n", str, strerror(errno)).
+New line or colon at the end of str is
+a mistake/breaks formatting.
+
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_sockmap.c |   38 ++++++++++++++---------------
+ tools/testing/selftests/bpf/xdping.c       |    2 -
+ 2 files changed, 20 insertions(+), 20 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -240,14 +240,14 @@ static int sockmap_init_sockets(int verb
+       addr.sin_port = htons(S1_PORT);
+       err = bind(s1, (struct sockaddr *)&addr, sizeof(addr));
+       if (err < 0) {
+-              perror("bind s1 failed()\n");
++              perror("bind s1 failed()");
+               return errno;
+       }
+       addr.sin_port = htons(S2_PORT);
+       err = bind(s2, (struct sockaddr *)&addr, sizeof(addr));
+       if (err < 0) {
+-              perror("bind s2 failed()\n");
++              perror("bind s2 failed()");
+               return errno;
+       }
+@@ -255,14 +255,14 @@ static int sockmap_init_sockets(int verb
+       addr.sin_port = htons(S1_PORT);
+       err = listen(s1, 32);
+       if (err < 0) {
+-              perror("listen s1 failed()\n");
++              perror("listen s1 failed()");
+               return errno;
+       }
+       addr.sin_port = htons(S2_PORT);
+       err = listen(s2, 32);
+       if (err < 0) {
+-              perror("listen s1 failed()\n");
++              perror("listen s1 failed()");
+               return errno;
+       }
+@@ -270,14 +270,14 @@ static int sockmap_init_sockets(int verb
+       addr.sin_port = htons(S1_PORT);
+       err = connect(c1, (struct sockaddr *)&addr, sizeof(addr));
+       if (err < 0 && errno != EINPROGRESS) {
+-              perror("connect c1 failed()\n");
++              perror("connect c1 failed()");
+               return errno;
+       }
+       addr.sin_port = htons(S2_PORT);
+       err = connect(c2, (struct sockaddr *)&addr, sizeof(addr));
+       if (err < 0 && errno != EINPROGRESS) {
+-              perror("connect c2 failed()\n");
++              perror("connect c2 failed()");
+               return errno;
+       } else if (err < 0) {
+               err = 0;
+@@ -286,13 +286,13 @@ static int sockmap_init_sockets(int verb
+       /* Accept Connecrtions */
+       p1 = accept(s1, NULL, NULL);
+       if (p1 < 0) {
+-              perror("accept s1 failed()\n");
++              perror("accept s1 failed()");
+               return errno;
+       }
+       p2 = accept(s2, NULL, NULL);
+       if (p2 < 0) {
+-              perror("accept s1 failed()\n");
++              perror("accept s1 failed()");
+               return errno;
+       }
+@@ -353,7 +353,7 @@ static int msg_loop_sendpage(int fd, int
+               int sent = sendfile(fd, fp, NULL, iov_length);
+               if (!drop && sent < 0) {
+-                      perror("send loop error:");
++                      perror("send loop error");
+                       close(fp);
+                       return sent;
+               } else if (drop && sent >= 0) {
+@@ -472,7 +472,7 @@ static int msg_loop(int fd, int iov_coun
+                       int sent = sendmsg(fd, &msg, flags);
+                       if (!drop && sent < 0) {
+-                              perror("send loop error:");
++                              perror("send loop error");
+                               goto out_errno;
+                       } else if (drop && sent >= 0) {
+                               printf("send loop error expected: %i\n", sent);
+@@ -508,7 +508,7 @@ static int msg_loop(int fd, int iov_coun
+               total_bytes -= txmsg_pop_total;
+               err = clock_gettime(CLOCK_MONOTONIC, &s->start);
+               if (err < 0)
+-                      perror("recv start time: ");
++                      perror("recv start time");
+               while (s->bytes_recvd < total_bytes) {
+                       if (txmsg_cork) {
+                               timeout.tv_sec = 0;
+@@ -552,7 +552,7 @@ static int msg_loop(int fd, int iov_coun
+                       if (recv < 0) {
+                               if (errno != EWOULDBLOCK) {
+                                       clock_gettime(CLOCK_MONOTONIC, &s->end);
+-                                      perror("recv failed()\n");
++                                      perror("recv failed()");
+                                       goto out_errno;
+                               }
+                       }
+@@ -566,7 +566,7 @@ static int msg_loop(int fd, int iov_coun
+                               errno = msg_verify_data(&msg, recv, chunk_sz);
+                               if (errno) {
+-                                      perror("data verify msg failed\n");
++                                      perror("data verify msg failed");
+                                       goto out_errno;
+                               }
+                               if (recvp) {
+@@ -574,7 +574,7 @@ static int msg_loop(int fd, int iov_coun
+                                                               recvp,
+                                                               chunk_sz);
+                                       if (errno) {
+-                                              perror("data verify msg_peek failed\n");
++                                              perror("data verify msg_peek failed");
+                                               goto out_errno;
+                                       }
+                               }
+@@ -663,7 +663,7 @@ static int sendmsg_test(struct sockmap_o
+                       err = 0;
+               exit(err ? 1 : 0);
+       } else if (rxpid == -1) {
+-              perror("msg_loop_rx: ");
++              perror("msg_loop_rx");
+               return errno;
+       }
+@@ -690,7 +690,7 @@ static int sendmsg_test(struct sockmap_o
+                               s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
+               exit(err ? 1 : 0);
+       } else if (txpid == -1) {
+-              perror("msg_loop_tx: ");
++              perror("msg_loop_tx");
+               return errno;
+       }
+@@ -724,7 +724,7 @@ static int forever_ping_pong(int rate, s
+       /* Ping/Pong data from client to server */
+       sc = send(c1, buf, sizeof(buf), 0);
+       if (sc < 0) {
+-              perror("send failed()\n");
++              perror("send failed()");
+               return sc;
+       }
+@@ -757,7 +757,7 @@ static int forever_ping_pong(int rate, s
+                       rc = recv(i, buf, sizeof(buf), 0);
+                       if (rc < 0) {
+                               if (errno != EWOULDBLOCK) {
+-                                      perror("recv failed()\n");
++                                      perror("recv failed()");
+                                       return rc;
+                               }
+                       }
+@@ -769,7 +769,7 @@ static int forever_ping_pong(int rate, s
+                       sc = send(i, buf, rc, 0);
+                       if (sc < 0) {
+-                              perror("send failed()\n");
++                              perror("send failed()");
+                               return sc;
+                       }
+               }
+--- a/tools/testing/selftests/bpf/xdping.c
++++ b/tools/testing/selftests/bpf/xdping.c
+@@ -45,7 +45,7 @@ static int get_stats(int fd, __u16 count
+       printf("\nXDP RTT data:\n");
+       if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) {
+-              perror("bpf_map_lookup elem: ");
++              perror("bpf_map_lookup elem");
+               return 1;
+       }
diff --git a/queue-5.4/selftests-bpf-test_sockmap-handle-file-creation-failures-gracefully.patch b/queue-5.4/selftests-bpf-test_sockmap-handle-file-creation-failures-gracefully.patch
new file mode 100644 (file)
index 0000000..ee2fbfb
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:45 -0800
+Subject: selftests: bpf: test_sockmap: handle file creation failures gracefully
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 4b67c515036313f3c3ecba3cb2babb9cbddb3f85 ]
+
+test_sockmap creates a temporary file to use for sendpage.
+this may fail for various reasons. Handle the error rather
+than segfault.
+
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_sockmap.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -332,6 +332,10 @@ static int msg_loop_sendpage(int fd, int
+       int i, fp;
+       file = fopen(".sendpage_tst.tmp", "w+");
++      if (!file) {
++              perror("create file for sendpage");
++              return 1;
++      }
+       for (i = 0; i < iov_length * cnt; i++, k++)
+               fwrite(&k, sizeof(char), 1, file);
+       fflush(file);
+@@ -339,6 +343,11 @@ static int msg_loop_sendpage(int fd, int
+       fclose(file);
+       fp = open(".sendpage_tst.tmp", O_RDONLY);
++      if (fp < 0) {
++              perror("reopen file for sendpage");
++              return 1;
++      }
++
+       clock_gettime(CLOCK_MONOTONIC, &s->start);
+       for (i = 0; i < cnt; i++) {
+               int sent = sendfile(fd, fp, NULL, iov_length);
diff --git a/queue-5.4/selftests-pmtu-use-oneline-for-ip-route-list-cache.patch b/queue-5.4/selftests-pmtu-use-oneline-for-ip-route-list-cache.patch
new file mode 100644 (file)
index 0000000..64c73b5
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Date: Thu, 28 Nov 2019 15:58:06 -0300
+Subject: selftests: pmtu: use -oneline for ip route list cache
+
+From: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+
+[ Upstream commit 2745aea6750ff0d2c48285d25bdb00e5b636ec8b ]
+
+Some versions of iproute2 will output more than one line per entry, which
+will cause the test to fail, like:
+
+TEST: ipv6: list and flush cached exceptions                        [FAIL]
+  can't list cached exceptions
+
+That happens, for example, with iproute2 4.15.0. When using the -oneline
+option, this will work just fine:
+
+TEST: ipv6: list and flush cached exceptions                        [ OK ]
+
+This also works just fine with a more recent version of iproute2, like
+5.4.0.
+
+For some reason, two lines are printed for the IPv4 test no matter what
+version of iproute2 is used. Use the same -oneline parameter there instead
+of counting the lines twice.
+
+Fixes: b964641e9925 ("selftests: pmtu: Make list_flush_ipv6_exception test more demanding")
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Acked-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/pmtu.sh |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -1249,8 +1249,7 @@ test_list_flush_ipv4_exception() {
+       done
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst2}"
+-      # Each exception is printed as two lines
+-      if [ "$(${ns_a} ip route list cache | wc -l)" -ne 202 ]; then
++      if [ "$(${ns_a} ip -oneline route list cache | wc -l)" -ne 101 ]; then
+               err "  can't list cached exceptions"
+               fail=1
+       fi
+@@ -1300,7 +1299,7 @@ test_list_flush_ipv6_exception() {
+               run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst_prefix1}${i}"
+       done
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst2}"
+-      if [ "$(${ns_a} ip -6 route list cache | wc -l)" -ne 101 ]; then
++      if [ "$(${ns_a} ip -oneline -6 route list cache | wc -l)" -ne 101 ]; then
+               err "  can't list cached exceptions"
+               fail=1
+       fi
diff --git a/queue-5.4/selftests-tls-add-a-test-for-fragmented-messages.patch b/queue-5.4/selftests-tls-add-a-test-for-fragmented-messages.patch
new file mode 100644 (file)
index 0000000..b313e1c
--- /dev/null
@@ -0,0 +1,96 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Wed, 27 Nov 2019 12:16:42 -0800
+Subject: selftests/tls: add a test for fragmented messages
+
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+
+[ Upstream commit 65190f77424d7b82c4aad7326c9cce6bd91a2fcc ]
+
+Add a sendmsg test with very fragmented messages. This should
+fill up sk_msg and test the boundary conditions.
+
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/tls.c |   60 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -268,6 +268,38 @@ TEST_F(tls, sendmsg_single)
+       EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+ }
++#define MAX_FRAGS     64
++#define SEND_LEN      13
++TEST_F(tls, sendmsg_fragmented)
++{
++      char const *test_str = "test_sendmsg";
++      char buf[SEND_LEN * MAX_FRAGS];
++      struct iovec vec[MAX_FRAGS];
++      struct msghdr msg;
++      int i, frags;
++
++      for (frags = 1; frags <= MAX_FRAGS; frags++) {
++              for (i = 0; i < frags; i++) {
++                      vec[i].iov_base = (char *)test_str;
++                      vec[i].iov_len = SEND_LEN;
++              }
++
++              memset(&msg, 0, sizeof(struct msghdr));
++              msg.msg_iov = vec;
++              msg.msg_iovlen = frags;
++
++              EXPECT_EQ(sendmsg(self->fd, &msg, 0), SEND_LEN * frags);
++              EXPECT_EQ(recv(self->cfd, buf, SEND_LEN * frags, MSG_WAITALL),
++                        SEND_LEN * frags);
++
++              for (i = 0; i < frags; i++)
++                      EXPECT_EQ(memcmp(buf + SEND_LEN * i,
++                                       test_str, SEND_LEN), 0);
++      }
++}
++#undef MAX_FRAGS
++#undef SEND_LEN
++
+ TEST_F(tls, sendmsg_large)
+ {
+       void *mem = malloc(16384);
+@@ -694,6 +726,34 @@ TEST_F(tls, recv_lowat)
+       EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
+ }
++TEST_F(tls, recv_rcvbuf)
++{
++      char send_mem[4096];
++      char recv_mem[4096];
++      int rcv_buf = 1024;
++
++      memset(send_mem, 0x1c, sizeof(send_mem));
++
++      EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF,
++                           &rcv_buf, sizeof(rcv_buf)), 0);
++
++      EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512);
++      memset(recv_mem, 0, sizeof(recv_mem));
++      EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512);
++      EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0);
++
++      if (self->notls)
++              return;
++
++      EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096);
++      memset(recv_mem, 0, sizeof(recv_mem));
++      EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
++      EXPECT_EQ(errno, EMSGSIZE);
++
++      EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
++      EXPECT_EQ(errno, EMSGSIZE);
++}
++
+ TEST_F(tls, bidir)
+ {
+       char const *test_str = "test_read";
index c0437dc00d04e813f2e85b979dbf05516faccaa2..91087085e3b427ca3c026f78e063600f74c1325f 100644 (file)
@@ -12,3 +12,29 @@ usb-serial-ftdi_sio-add-device-ids-for-u-blox-c099-f9p.patch
 mei-bus-prefix-device-names-on-bus-with-the-bus-name.patch
 mei-me-add-comet-point-v-device-id.patch
 thunderbolt-power-cycle-the-router-if-nvm-authentication-fails.patch
+x86-fpu-don-t-cache-access-to-fpu_fpregs_owner_ctx.patch
+gve-fix-the-queue-page-list-allocated-pages-count.patch
+macvlan-schedule-bc_work-even-if-error.patch
+mdio_bus-don-t-use-managed-reset-controller.patch
+net-dsa-sja1105-fix-sja1105_parse_rgmii_delays.patch
+net-macb-add-missed-tasklet_kill.patch
+net-psample-fix-skb_over_panic.patch
+net-sched-fix-tc-s-class-show-no-bstats-on-class-with-nolock-subqueues.patch
+openvswitch-fix-flow-command-message-size.patch
+sctp-fix-memory-leak-in-sctp_sf_do_5_2_4_dupcook.patch
+slip-fix-use-after-free-read-in-slip_open.patch
+sctp-cache-netns-in-sctp_ep_common.patch
+openvswitch-drop-unneeded-bug_on-in-ovs_flow_cmd_build_info.patch
+openvswitch-remove-another-bug_on.patch
+net-tls-take-into-account-that-bpf_exec_tx_verdict-may-free-the-record.patch
+net-tls-free-the-record-on-encryption-error.patch
+net-skmsg-fix-tls-1.3-crash-with-full-sk_msg.patch
+selftests-tls-add-a-test-for-fragmented-messages.patch
+net-tls-remove-the-dead-inplace_crypto-code.patch
+net-tls-use-sg_next-to-walk-sg-entries.patch
+selftests-bpf-test_sockmap-handle-file-creation-failures-gracefully.patch
+selftests-bpf-correct-perror-strings.patch
+tipc-fix-link-name-length-check.patch
+selftests-pmtu-use-oneline-for-ip-route-list-cache.patch
+r8169-fix-jumbo-configuration-for-rtl8168evl.patch
+r8169-fix-resume-on-cable-plug-in.patch
diff --git a/queue-5.4/slip-fix-use-after-free-read-in-slip_open.patch b/queue-5.4/slip-fix-use-after-free-read-in-slip_open.patch
new file mode 100644 (file)
index 0000000..41ab46e
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: Jouni Hogander <jouni.hogander@unikie.com>
+Date: Mon, 25 Nov 2019 14:23:43 +0200
+Subject: slip: Fix use-after-free Read in slip_open
+
+From: Jouni Hogander <jouni.hogander@unikie.com>
+
+[ Upstream commit e58c1912418980f57ba2060017583067f5f71e52 ]
+
+Slip_open doesn't clean-up device which registration failed from the
+slip_devs device list. On next open after failure this list is iterated
+and freed device is accessed. Fix this by calling sl_free_netdev in error
+path.
+
+Here is the trace from the Syzbot:
+
+__dump_stack lib/dump_stack.c:77 [inline]
+dump_stack+0x197/0x210 lib/dump_stack.c:118
+print_address_description.constprop.0.cold+0xd4/0x30b mm/kasan/report.c:374
+__kasan_report.cold+0x1b/0x41 mm/kasan/report.c:506
+kasan_report+0x12/0x20 mm/kasan/common.c:634
+__asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:132
+sl_sync drivers/net/slip/slip.c:725 [inline]
+slip_open+0xecd/0x11b7 drivers/net/slip/slip.c:801
+tty_ldisc_open.isra.0+0xa3/0x110 drivers/tty/tty_ldisc.c:469
+tty_set_ldisc+0x30e/0x6b0 drivers/tty/tty_ldisc.c:596
+tiocsetd drivers/tty/tty_io.c:2334 [inline]
+tty_ioctl+0xe8d/0x14f0 drivers/tty/tty_io.c:2594
+vfs_ioctl fs/ioctl.c:46 [inline]
+file_ioctl fs/ioctl.c:509 [inline]
+do_vfs_ioctl+0xdb6/0x13e0 fs/ioctl.c:696
+ksys_ioctl+0xab/0xd0 fs/ioctl.c:713
+__do_sys_ioctl fs/ioctl.c:720 [inline]
+__se_sys_ioctl fs/ioctl.c:718 [inline]
+__x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:718
+do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290
+entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Fixes: 3b5a39979daf ("slip: Fix memory leak in slip_open error path")
+Reported-by: syzbot+4d5170758f3762109542@syzkaller.appspotmail.com
+Cc: David Miller <davem@davemloft.net>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Signed-off-by: Jouni Hogander <jouni.hogander@unikie.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/slip/slip.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -855,6 +855,7 @@ err_free_chan:
+       sl->tty = NULL;
+       tty->disc_data = NULL;
+       clear_bit(SLF_INUSE, &sl->flags);
++      sl_free_netdev(sl->dev);
+       free_netdev(sl->dev);
+ err_exit:
diff --git a/queue-5.4/tipc-fix-link-name-length-check.patch b/queue-5.4/tipc-fix-link-name-length-check.patch
new file mode 100644 (file)
index 0000000..ec8f441
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Tue 03 Dec 2019 07:38:48 AM CET
+From: John Rutherford <john.rutherford@dektech.com.au>
+Date: Tue, 26 Nov 2019 13:52:55 +1100
+Subject: tipc: fix link name length check
+
+From: John Rutherford <john.rutherford@dektech.com.au>
+
+[ Upstream commit fd567ac20cb0377ff466d3337e6e9ac5d0cb15e4 ]
+
+In commit 4f07b80c9733 ("tipc: check msg->req data len in
+tipc_nl_compat_bearer_disable") the same patch code was copied into
+routines: tipc_nl_compat_bearer_disable(),
+tipc_nl_compat_link_stat_dump() and tipc_nl_compat_link_reset_stats().
+The two link routine occurrences should have been modified to check
+the maximum link name length and not bearer name length.
+
+Fixes: 4f07b80c9733 ("tipc: check msg->reg data len in tipc_nl_compat_bearer_disable")
+Signed-off-by: John Rutherford <john.rutherford@dektech.com.au>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/netlink_compat.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -550,7 +550,7 @@ static int tipc_nl_compat_link_stat_dump
+       if (len <= 0)
+               return -EINVAL;
+-      len = min_t(int, len, TIPC_MAX_BEARER_NAME);
++      len = min_t(int, len, TIPC_MAX_LINK_NAME);
+       if (!string_is_valid(name, len))
+               return -EINVAL;
+@@ -822,7 +822,7 @@ static int tipc_nl_compat_link_reset_sta
+       if (len <= 0)
+               return -EINVAL;
+-      len = min_t(int, len, TIPC_MAX_BEARER_NAME);
++      len = min_t(int, len, TIPC_MAX_LINK_NAME);
+       if (!string_is_valid(name, len))
+               return -EINVAL;
diff --git a/queue-5.4/x86-fpu-don-t-cache-access-to-fpu_fpregs_owner_ctx.patch b/queue-5.4/x86-fpu-don-t-cache-access-to-fpu_fpregs_owner_ctx.patch
new file mode 100644 (file)
index 0000000..65f1211
--- /dev/null
@@ -0,0 +1,113 @@
+From 59c4bd853abcea95eccc167a7d7fd5f1a5f47b98 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 Nov 2019 09:53:06 +0100
+Subject: x86/fpu: Don't cache access to fpu_fpregs_owner_ctx
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 59c4bd853abcea95eccc167a7d7fd5f1a5f47b98 upstream.
+
+The state/owner of the FPU is saved to fpu_fpregs_owner_ctx by pointing
+to the context that is currently loaded. It never changed during the
+lifetime of a task - it remained stable/constant.
+
+After deferred FPU registers loading until return to userland was
+implemented, the content of fpu_fpregs_owner_ctx may change during
+preemption and must not be cached.
+
+This went unnoticed for some time and was now noticed, in particular
+since gcc 9 is caching that load in copy_fpstate_to_sigframe() and
+reusing it in the retry loop:
+
+  copy_fpstate_to_sigframe()
+    load fpu_fpregs_owner_ctx and save on stack
+    fpregs_lock()
+    copy_fpregs_to_sigframe() /* failed */
+    fpregs_unlock()
+         *** PREEMPTION, another uses FPU, changes fpu_fpregs_owner_ctx ***
+
+    fault_in_pages_writeable() /* succeed, retry */
+
+    fpregs_lock()
+       __fpregs_load_activate()
+         fpregs_state_valid() /* uses fpu_fpregs_owner_ctx from stack */
+    copy_fpregs_to_sigframe() /* succeeds, random FPU content */
+
+This is a comparison of the assembly produced by gcc 9, without vs with this
+patch:
+
+| # arch/x86/kernel/fpu/signal.c:173:      if (!access_ok(buf, size))
+|        cmpq    %rdx, %rax      # tmp183, _4
+|        jb      .L190   #,
+|-# arch/x86/include/asm/fpu/internal.h:512:       return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+|-#APP
+|-# 512 "arch/x86/include/asm/fpu/internal.h" 1
+|-       movq %gs:fpu_fpregs_owner_ctx,%rax      #, pfo_ret__
+|-# 0 "" 2
+|-#NO_APP
+|-       movq    %rax, -88(%rbp) # pfo_ret__, %sfp
+…
+|-# arch/x86/include/asm/fpu/internal.h:512:       return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+|-       movq    -88(%rbp), %rcx # %sfp, pfo_ret__
+|-       cmpq    %rcx, -64(%rbp) # pfo_ret__, %sfp
+|+# arch/x86/include/asm/fpu/internal.h:512:       return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+|+#APP
+|+# 512 "arch/x86/include/asm/fpu/internal.h" 1
+|+       movq %gs:fpu_fpregs_owner_ctx(%rip),%rax        # fpu_fpregs_owner_ctx, pfo_ret__
+|+# 0 "" 2
+|+# arch/x86/include/asm/fpu/internal.h:512:       return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+|+#NO_APP
+|+       cmpq    %rax, -64(%rbp) # pfo_ret__, %sfp
+
+Use this_cpu_read() instead this_cpu_read_stable() to avoid caching of
+fpu_fpregs_owner_ctx during preemption points.
+
+The Fixes: tag points to the commit where deferred FPU loading was
+added. Since this commit, the compiler is no longer allowed to move the
+load of fpu_fpregs_owner_ctx somewhere else / outside of the locked
+section. A task preemption will change its value and stale content will
+be observed.
+
+ [ bp: Massage. ]
+
+Debugged-by: Austin Clements <austin@google.com>
+Debugged-by: David Chase <drchase@golang.org>
+Debugged-by: Ian Lance Taylor <ian@airs.com>
+Fixes: 5f409e20b7945 ("x86/fpu: Defer FPU state load until return to userspace")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Aubrey Li <aubrey.li@intel.com>
+Cc: Austin Clements <austin@google.com>
+Cc: Barret Rhoden <brho@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: David Chase <drchase@golang.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: ian@airs.com
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Josh Bleecher Snyder <josharian@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20191128085306.hxfa2o3knqtu4wfn@linutronix.de
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=205663
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/fpu/internal.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpre
+ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
+ {
+-      return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
++      return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+ }
+ /*