From: Sasha Levin Date: Mon, 21 Dec 2020 21:02:44 +0000 (-0500) Subject: Fixes for 4.19 X-Git-Tag: v5.10.3~26 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=f464cd8fef023659bf809ccf768b11d6a5b0ac04;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 4.19 Signed-off-by: Sasha Levin --- diff --git a/queue-4.19/arm-dts-sun8i-v3s-fix-gic-node-memory-range.patch b/queue-4.19/arm-dts-sun8i-v3s-fix-gic-node-memory-range.patch new file mode 100644 index 00000000000..5320d533d4d --- /dev/null +++ b/queue-4.19/arm-dts-sun8i-v3s-fix-gic-node-memory-range.patch @@ -0,0 +1,39 @@ +From e4798c25fe2518417f32191e1a4f12aa89215bfb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 20 Nov 2020 13:08:51 +0800 +Subject: ARM: dts: sun8i: v3s: fix GIC node memory range + +From: Icenowy Zheng + +[ Upstream commit a98fd117a2553ab1a6d2fe3c7acae88c1eca4372 ] + +Currently the GIC node in V3s DTSI follows some old DT examples, and +being broken. This leads a warning at boot. + +Fix this. + +Fixes: f989086ccbc6 ("ARM: dts: sunxi: add dtsi file for V3s SoC") +Signed-off-by: Icenowy Zheng +Signed-off-by: Maxime Ripard +Link: https://lore.kernel.org/r/20201120050851.4123759-1-icenowy@aosc.io +Signed-off-by: Sasha Levin +--- + arch/arm/boot/dts/sun8i-v3s.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi +index 92fcb756a08a9..97cac6d636923 100644 +--- a/arch/arm/boot/dts/sun8i-v3s.dtsi ++++ b/arch/arm/boot/dts/sun8i-v3s.dtsi +@@ -419,7 +419,7 @@ + gic: interrupt-controller@1c81000 { + compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; + reg = <0x01c81000 0x1000>, +- <0x01c82000 0x1000>, ++ <0x01c82000 0x2000>, + <0x01c84000 0x2000>, + <0x01c86000 0x2000>; + interrupt-controller; +-- +2.27.0 + diff --git a/queue-4.19/arm64-syscall-exit-userspace-before-unmasking-except.patch b/queue-4.19/arm64-syscall-exit-userspace-before-unmasking-except.patch new file mode 100644 index 00000000000..feda09a2221 --- /dev/null +++ b/queue-4.19/arm64-syscall-exit-userspace-before-unmasking-except.patch @@ -0,0 +1,53 @@ +From fb5a51c2b527dc4c732cdfbba3d1dfc5020b273c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 30 Nov 2020 11:59:40 +0000 +Subject: arm64: syscall: exit userspace before unmasking exceptions + +From: Mark Rutland + +[ Upstream commit ca1314d73eed493c49bb1932c60a8605530db2e4 ] + +In el0_svc_common() we unmask exceptions before we call user_exit(), and +so there's a window where an IRQ or debug exception can be taken while +RCU is not watching. In do_debug_exception() we account for this in via +debug_exception_{enter,exit}(), but in the el1_irq asm we do not and we +call trace functions which rely on RCU before we have a guarantee that +RCU is watching. + +Let's avoid this by having el0_svc_common() exit userspace before +unmasking exceptions, matching what we do for all other EL0 entry paths. +We can use user_exit_irqoff() to avoid the pointless save/restore of IRQ +flags while we're sure exceptions are masked in DAIF. + +The workaround for Cortex-A76 erratum 1463225 may trigger a debug +exception before this point, but the debug code invoked in this case is +safe even when RCU is not watching. + +Signed-off-by: Mark Rutland +Cc: Catalin Marinas +Cc: James Morse +Cc: Will Deacon +Link: https://lore.kernel.org/r/20201130115950.22492-2-mark.rutland@arm.com +Signed-off-by: Will Deacon +Signed-off-by: Sasha Levin +--- + arch/arm64/kernel/syscall.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c +index 1457a0ba83dbc..f2d2dbbbfca20 100644 +--- a/arch/arm64/kernel/syscall.c ++++ b/arch/arm64/kernel/syscall.c +@@ -102,8 +102,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, + regs->syscallno = scno; + + cortex_a76_erratum_1463225_svc_handler(); ++ user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); +- user_exit(); + + if (has_syscall_work(flags)) { + /* set default errno for user-issued syscall(-1) */ +-- +2.27.0 + diff --git a/queue-4.19/block-factor-out-requeue-handling-from-dispatch-code.patch b/queue-4.19/block-factor-out-requeue-handling-from-dispatch-code.patch new file mode 100644 index 00000000000..9862eeecee8 --- /dev/null +++ b/queue-4.19/block-factor-out-requeue-handling-from-dispatch-code.patch @@ -0,0 +1,70 @@ +From cc4123a79f73a7565db9b9f9100eefe5ec6914b1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 25 Mar 2020 00:24:44 +0900 +Subject: block: factor out requeue handling from dispatch code + +From: Johannes Thumshirn + +[ Upstream commit c92a41031a6d57395889b5c87cea359220a24d2a ] + +Factor out the requeue handling from the dispatch code, this will make +subsequent addition of different requeueing schemes easier. + +Signed-off-by: Johannes Thumshirn +Reviewed-by: Christoph Hellwig +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/blk-mq.c | 29 ++++++++++++++++++----------- + 1 file changed, 18 insertions(+), 11 deletions(-) + +diff --git a/block/blk-mq.c b/block/blk-mq.c +index db2db0b70d34f..0df43515ff949 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -1118,6 +1118,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) + + #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ + ++static void blk_mq_handle_dev_resource(struct request *rq, ++ struct list_head *list) ++{ ++ struct request *next = ++ list_first_entry_or_null(list, struct request, queuelist); ++ ++ /* ++ * If an I/O scheduler has been configured and we got a driver tag for ++ * the next request already, free it. ++ */ ++ if (next) ++ blk_mq_put_driver_tag(next); ++ ++ list_add(&rq->queuelist, list); ++ __blk_mq_requeue_request(rq); ++} ++ + /* + * Returns true if we did some work AND can potentially do more. + */ +@@ -1185,17 +1202,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, + + ret = q->mq_ops->queue_rq(hctx, &bd); + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { +- /* +- * If an I/O scheduler has been configured and we got a +- * driver tag for the next request already, free it +- * again. +- */ +- if (!list_empty(list)) { +- nxt = list_first_entry(list, struct request, queuelist); +- blk_mq_put_driver_tag(nxt); +- } +- list_add(&rq->queuelist, list); +- __blk_mq_requeue_request(rq); ++ blk_mq_handle_dev_resource(rq, list); + break; + } + +-- +2.27.0 + diff --git a/queue-4.19/can-softing-softing_netdev_open-fix-error-handling.patch b/queue-4.19/can-softing-softing_netdev_open-fix-error-handling.patch new file mode 100644 index 00000000000..16ffb84ec88 --- /dev/null +++ b/queue-4.19/can-softing-softing_netdev_open-fix-error-handling.patch @@ -0,0 +1,47 @@ +From d93eaa01c3a9a29342bdccb1f32033089a00b321 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 4 Dec 2020 14:35:06 +0100 +Subject: can: softing: softing_netdev_open(): fix error handling + +From: Zhang Qilong + +[ Upstream commit 4d1be581ec6b92a338bb7ed23e1381f45ddf336f ] + +If softing_netdev_open() fails, we should call close_candev() to avoid +reference leak. + +Fixes: 03fd3cf5a179d ("can: add driver for Softing card") +Signed-off-by: Zhang Qilong +Acked-by: Kurt Van Dijck +Link: https://lore.kernel.org/r/20201202151632.1343786-1-zhangqilong3@huawei.com +Signed-off-by: Marc Kleine-Budde +Link: https://lore.kernel.org/r/20201204133508.742120-2-mkl@pengutronix.de +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/can/softing/softing_main.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c +index e226961905830..bed5ffa75b276 100644 +--- a/drivers/net/can/softing/softing_main.c ++++ b/drivers/net/can/softing/softing_main.c +@@ -393,8 +393,13 @@ static int softing_netdev_open(struct net_device *ndev) + + /* check or determine and set bittime */ + ret = open_candev(ndev); +- if (!ret) +- ret = softing_startstop(ndev, 1); ++ if (ret) ++ return ret; ++ ++ ret = softing_startstop(ndev, 1); ++ if (ret < 0) ++ close_candev(ndev); ++ + return ret; + } + +-- +2.27.0 + diff --git a/queue-4.19/clk-renesas-r9a06g032-drop-__packed-for-portability.patch b/queue-4.19/clk-renesas-r9a06g032-drop-__packed-for-portability.patch new file mode 100644 index 00000000000..45b44a7e082 --- /dev/null +++ b/queue-4.19/clk-renesas-r9a06g032-drop-__packed-for-portability.patch @@ -0,0 +1,55 @@ +From f6202f7b3e5724c039e4a29339d466a9da9a9496 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 30 Nov 2020 09:57:43 +0100 +Subject: clk: renesas: r9a06g032: Drop __packed for portability + +From: Geert Uytterhoeven + +[ Upstream commit ceabbf94c317c6175dee6e91805fca4a6353745a ] + +The R9A06G032 clock driver uses an array of packed structures to reduce +kernel size. However, this array contains pointers, which are no longer +aligned naturally, and cannot be relocated on PPC64. Hence when +compile-testing this driver on PPC64 with CONFIG_RELOCATABLE=y (e.g. +PowerPC allyesconfig), the following warnings are produced: + + WARNING: 136 bad relocations + c000000000616be3 R_PPC64_UADDR64 .rodata+0x00000000000cf338 + c000000000616bfe R_PPC64_UADDR64 .rodata+0x00000000000cf370 + ... + +Fix this by dropping the __packed attribute from the r9a06g032_clkdesc +definition, trading a small size increase for portability. + +This increases the 156-entry clock table by 1 byte per entry, but due to +the compiler generating more efficient code for unpacked accesses, the +net size increase is only 76 bytes (gcc 9.3.0 on arm32). + +Reported-by: Stephen Rothwell +Fixes: 4c3d88526eba2143 ("clk: renesas: Renesas R9A06G032 clock driver") +Signed-off-by: Geert Uytterhoeven +Link: https://lore.kernel.org/r/20201130085743.1656317-1-geert+renesas@glider.be +Tested-by: Stephen Rothwell # PowerPC allyesconfig build +Acked-by: Stephen Boyd +Signed-off-by: Stephen Boyd +Signed-off-by: Sasha Levin +--- + drivers/clk/renesas/r9a06g032-clocks.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c +index 6d2b568915597..6e03b467395b2 100644 +--- a/drivers/clk/renesas/r9a06g032-clocks.c ++++ b/drivers/clk/renesas/r9a06g032-clocks.c +@@ -51,7 +51,7 @@ struct r9a06g032_clkdesc { + u16 sel, g1, r1, g2, r2; + } dual; + }; +-} __packed; ++}; + + #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \ + { .gate = _clk, .reset = _rst, \ +-- +2.27.0 + diff --git a/queue-4.19/dm-table-remove-bug_on-in_interrupt.patch b/queue-4.19/dm-table-remove-bug_on-in_interrupt.patch new file mode 100644 index 00000000000..7079c34bfe5 --- /dev/null +++ b/queue-4.19/dm-table-remove-bug_on-in_interrupt.patch @@ -0,0 +1,45 @@ +From ac8610a81632fe9f5a54c52200ac61dffae214da Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 13 Nov 2020 15:19:10 +0100 +Subject: dm table: Remove BUG_ON(in_interrupt()) + +From: Thomas Gleixner + +[ Upstream commit e7b624183d921b49ef0a96329f21647d38865ee9 ] + +The BUG_ON(in_interrupt()) in dm_table_event() is a historic leftover from +a rework of the dm table code which changed the calling context. + +Issuing a BUG for a wrong calling context is frowned upon and +in_interrupt() is deprecated and only covering parts of the wrong +contexts. The sanity check for the context is covered by +CONFIG_DEBUG_ATOMIC_SLEEP and other debug facilities already. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Mike Snitzer +Signed-off-by: Sasha Levin +--- + drivers/md/dm-table.c | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 36275c59e4e7b..f849db3035a05 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -1336,12 +1336,6 @@ void dm_table_event_callback(struct dm_table *t, + + void dm_table_event(struct dm_table *t) + { +- /* +- * You can no longer call dm_table_event() from interrupt +- * context, use a bottom half instead. +- */ +- BUG_ON(in_interrupt()); +- + mutex_lock(&_event_lock); + if (t->event_fn) + t->event_fn(t->event_context); +-- +2.27.0 + diff --git a/queue-4.19/drm-tegra-replace-idr_init-by-idr_init_base.patch b/queue-4.19/drm-tegra-replace-idr_init-by-idr_init_base.patch new file mode 100644 index 00000000000..87829325da7 --- /dev/null +++ b/queue-4.19/drm-tegra-replace-idr_init-by-idr_init_base.patch @@ -0,0 +1,39 @@ +From 046de3e72d911f243809b8378af74d08247306e4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 5 Nov 2020 23:29:28 +0530 +Subject: drm/tegra: replace idr_init() by idr_init_base() + +From: Deepak R Varma + +[ Upstream commit 41f71629b4c432f8dd47d70ace813be5f79d4d75 ] + +idr_init() uses base 0 which is an invalid identifier for this driver. +The new function idr_init_base allows IDR to set the ID lookup from +base 1. This avoids all lookups that otherwise starts from 0 since +0 is always unused. + +References: commit 6ce711f27500 ("idr: Make 1-based IDRs more efficient") + +Signed-off-by: Deepak R Varma +Signed-off-by: Thierry Reding +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/tegra/drm.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c +index a2bd5876c6335..00808a3d67832 100644 +--- a/drivers/gpu/drm/tegra/drm.c ++++ b/drivers/gpu/drm/tegra/drm.c +@@ -242,7 +242,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) + if (!fpriv) + return -ENOMEM; + +- idr_init(&fpriv->contexts); ++ idr_init_base(&fpriv->contexts, 1); + mutex_init(&fpriv->lock); + filp->driver_priv = fpriv; + +-- +2.27.0 + diff --git a/queue-4.19/drm-tegra-sor-disable-clocks-on-error-in-tegra_sor_i.patch b/queue-4.19/drm-tegra-sor-disable-clocks-on-error-in-tegra_sor_i.patch new file mode 100644 index 00000000000..9a8d0f9c143 --- /dev/null +++ b/queue-4.19/drm-tegra-sor-disable-clocks-on-error-in-tegra_sor_i.patch @@ -0,0 +1,52 @@ +From b4aa7e5e13d34eca4999a7f186a4b635e2b9027c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 30 Oct 2020 09:34:24 +0800 +Subject: drm/tegra: sor: Disable clocks on error in tegra_sor_init() + +From: Qinglang Miao + +[ Upstream commit bf3a3cdcad40e5928a22ea0fd200d17fd6d6308d ] + +Fix the missing clk_disable_unprepare() before return from +tegra_sor_init() in the error handling case. + +Signed-off-by: Qinglang Miao +Signed-off-by: Thierry Reding +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/tegra/sor.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c +index 89cb70da2bfe6..83108e2430501 100644 +--- a/drivers/gpu/drm/tegra/sor.c ++++ b/drivers/gpu/drm/tegra/sor.c +@@ -2668,17 +2668,23 @@ static int tegra_sor_init(struct host1x_client *client) + if (err < 0) { + dev_err(sor->dev, "failed to deassert SOR reset: %d\n", + err); ++ clk_disable_unprepare(sor->clk); + return err; + } + } + + err = clk_prepare_enable(sor->clk_safe); +- if (err < 0) ++ if (err < 0) { ++ clk_disable_unprepare(sor->clk); + return err; ++ } + + err = clk_prepare_enable(sor->clk_dp); +- if (err < 0) ++ if (err < 0) { ++ clk_disable_unprepare(sor->clk_safe); ++ clk_disable_unprepare(sor->clk); + return err; ++ } + + return 0; + } +-- +2.27.0 + diff --git a/queue-4.19/gpio-eic-sprd-break-loop-when-getting-null-device-re.patch b/queue-4.19/gpio-eic-sprd-break-loop-when-getting-null-device-re.patch new file mode 100644 index 00000000000..1eaab67e9a5 --- /dev/null +++ b/queue-4.19/gpio-eic-sprd-break-loop-when-getting-null-device-re.patch @@ -0,0 +1,40 @@ +From c6c7ef3f6797a14c01ec1c326d60c8e0a0ee5eab Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Dec 2020 13:51:06 +0800 +Subject: gpio: eic-sprd: break loop when getting NULL device resource + +From: Chunyan Zhang + +[ Upstream commit 263ade7166a2e589c5b605272690c155c0637dcb ] + +EIC controller have unfixed numbers of banks on different Spreadtrum SoCs, +and each bank has its own base address, the loop of getting there base +address in driver should break if the resource gotten via +platform_get_resource() is NULL already. The later ones would be all NULL +even if the loop continues. + +Fixes: 25518e024e3a ("gpio: Add Spreadtrum EIC driver support") +Signed-off-by: Chunyan Zhang +Link: https://lore.kernel.org/r/20201209055106.840100-1-zhang.lyra@gmail.com +Signed-off-by: Linus Walleij +Signed-off-by: Sasha Levin +--- + drivers/gpio/gpio-eic-sprd.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c +index 4935cda5301ea..4f1af323ec03b 100644 +--- a/drivers/gpio/gpio-eic-sprd.c ++++ b/drivers/gpio/gpio-eic-sprd.c +@@ -599,7 +599,7 @@ static int sprd_eic_probe(struct platform_device *pdev) + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) +- continue; ++ break; + + sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sprd_eic->base[i])) +-- +2.27.0 + diff --git a/queue-4.19/gpio-mvebu-fix-potential-user-after-free-on-probe.patch b/queue-4.19/gpio-mvebu-fix-potential-user-after-free-on-probe.patch new file mode 100644 index 00000000000..c36f4bb98ac --- /dev/null +++ b/queue-4.19/gpio-mvebu-fix-potential-user-after-free-on-probe.patch @@ -0,0 +1,69 @@ +From 42e2c01aa9c8fdd49e9eaff98c3a75fe83321edb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 2 Dec 2020 09:15:32 +0200 +Subject: gpio: mvebu: fix potential user-after-free on probe + +From: Baruch Siach + +[ Upstream commit 7ee1a01e47403f72b9f38839a737692f6991263e ] + +When mvebu_pwm_probe() fails IRQ domain is not released. Move pwm probe +before IRQ domain allocation. Add pwm cleanup code to the failure path. + +Fixes: 757642f9a584 ("gpio: mvebu: Add limited PWM support") +Reported-by: Andrew Lunn +Signed-off-by: Baruch Siach +Signed-off-by: Bartosz Golaszewski +Signed-off-by: Sasha Levin +--- + drivers/gpio/gpio-mvebu.c | 16 +++++++++++----- + 1 file changed, 11 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c +index adc768f908f1a..3b78dcda47364 100644 +--- a/drivers/gpio/gpio-mvebu.c ++++ b/drivers/gpio/gpio-mvebu.c +@@ -1191,6 +1191,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev) + + devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); + ++ /* Some MVEBU SoCs have simple PWM support for GPIO lines */ ++ if (IS_ENABLED(CONFIG_PWM)) { ++ err = mvebu_pwm_probe(pdev, mvchip, id); ++ if (err) ++ return err; ++ } ++ + /* Some gpio controllers do not provide irq support */ + if (!have_irqs) + return 0; +@@ -1200,7 +1207,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) + if (!mvchip->domain) { + dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", + mvchip->chip.label); +- return -ENODEV; ++ err = -ENODEV; ++ goto err_pwm; + } + + err = irq_alloc_domain_generic_chips( +@@ -1248,14 +1256,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev) + mvchip); + } + +- /* Some MVEBU SoCs have simple PWM support for GPIO lines */ +- if (IS_ENABLED(CONFIG_PWM)) +- return mvebu_pwm_probe(pdev, mvchip, id); +- + return 0; + + err_domain: + irq_domain_remove(mvchip->domain); ++err_pwm: ++ pwmchip_remove(&mvchip->mvpwm->chip); + + return err; + } +-- +2.27.0 + diff --git a/queue-4.19/ixgbe-avoid-premature-rx-buffer-reuse.patch b/queue-4.19/ixgbe-avoid-premature-rx-buffer-reuse.patch new file mode 100644 index 00000000000..8c0d61bc9bc --- /dev/null +++ b/queue-4.19/ixgbe-avoid-premature-rx-buffer-reuse.patch @@ -0,0 +1,114 @@ +From 73b596303008259209ffb101ca6e91c79e013d91 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Aug 2020 19:27:35 +0200 +Subject: ixgbe: avoid premature Rx buffer reuse +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Björn Töpel + +[ Upstream commit a06316dc87bdc000f7f39a315476957af2ba0f05 ] + +The page recycle code, incorrectly, relied on that a page fragment +could not be freed inside xdp_do_redirect(). This assumption leads to +that page fragments that are used by the stack/XDP redirect can be +reused and overwritten. + +To avoid this, store the page count prior invoking xdp_do_redirect(). + +Fixes: 6453073987ba ("ixgbe: add initial support for xdp redirect") +Reported-and-analyzed-by: Li RongQing +Signed-off-by: Björn Töpel +Tested-by: Sandeep Penigalapati +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 24 +++++++++++++------ + 1 file changed, 17 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index 4243ff4ec4b1d..faee77fa08044 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -1943,7 +1943,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page) + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + } + +-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) ++static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, ++ int rx_buffer_pgcnt) + { + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; +@@ -1954,7 +1955,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) + + #if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ +- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) ++ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; + #else + /* The last offset is a bit aggressive in that we assume the +@@ -2019,11 +2020,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, + static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff **skb, +- const unsigned int size) ++ const unsigned int size, ++ int *rx_buffer_pgcnt) + { + struct ixgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; ++ *rx_buffer_pgcnt = ++#if (PAGE_SIZE < 8192) ++ page_count(rx_buffer->page); ++#else ++ 0; ++#endif + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + +@@ -2053,9 +2061,10 @@ static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + + static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, +- struct sk_buff *skb) ++ struct sk_buff *skb, ++ int rx_buffer_pgcnt) + { +- if (ixgbe_can_reuse_rx_page(rx_buffer)) { ++ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { +@@ -2299,6 +2308,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; ++ int rx_buffer_pgcnt; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ +@@ -2318,7 +2328,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + */ + dma_rmb(); + +- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); ++ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); + + /* retrieve a buffer from the ring */ + if (!skb) { +@@ -2360,7 +2370,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + break; + } + +- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); ++ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ +-- +2.27.0 + diff --git a/queue-4.19/kernel-cpu-add-arch-override-for-clear_tasks_mm_cpum.patch b/queue-4.19/kernel-cpu-add-arch-override-for-clear_tasks_mm_cpum.patch new file mode 100644 index 00000000000..f2d1d0b11ed --- /dev/null +++ b/queue-4.19/kernel-cpu-add-arch-override-for-clear_tasks_mm_cpum.patch @@ -0,0 +1,55 @@ +From e3fcac12013ddffa1e1fde28a4bd89fe11ec4092 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 26 Nov 2020 20:25:29 +1000 +Subject: kernel/cpu: add arch override for clear_tasks_mm_cpumask() mm + handling + +From: Nicholas Piggin + +[ Upstream commit 8ff00399b153440c1c83e20c43020385b416415b ] + +powerpc/64s keeps a counter in the mm which counts bits set in +mm_cpumask as well as other things. This means it can't use generic code +to clear bits out of the mask and doesn't adjust the arch specific +counter. + +Add an arch override that allows powerpc/64s to use +clear_tasks_mm_cpumask(). + +Signed-off-by: Nicholas Piggin +Reviewed-by: Aneesh Kumar K.V +Acked-by: Peter Zijlstra (Intel) +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20201126102530.691335-4-npiggin@gmail.com +Signed-off-by: Sasha Levin +--- + kernel/cpu.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 08b9d6ba0807f..9a39a24f60253 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -776,6 +776,10 @@ void __init cpuhp_threads_init(void) + } + + #ifdef CONFIG_HOTPLUG_CPU ++#ifndef arch_clear_mm_cpumask_cpu ++#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) ++#endif ++ + /** + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU + * @cpu: a CPU id +@@ -811,7 +815,7 @@ void clear_tasks_mm_cpumask(int cpu) + t = find_lock_task_mm(p); + if (!t) + continue; +- cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); ++ arch_clear_mm_cpumask_cpu(cpu, t->mm); + task_unlock(t); + } + rcu_read_unlock(); +-- +2.27.0 + diff --git a/queue-4.19/netfilter-x_tables-switch-synchronization-to-rcu.patch b/queue-4.19/netfilter-x_tables-switch-synchronization-to-rcu.patch new file mode 100644 index 00000000000..8910659f89a --- /dev/null +++ b/queue-4.19/netfilter-x_tables-switch-synchronization-to-rcu.patch @@ -0,0 +1,394 @@ +From ddb07b21ddd74fb99398a4e5ad03b277f7d83dcc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 25 Nov 2020 11:27:22 -0700 +Subject: netfilter: x_tables: Switch synchronization to RCU + +From: Subash Abhinov Kasiviswanathan + +[ Upstream commit cc00bcaa589914096edef7fb87ca5cee4a166b5c ] + +When running concurrent iptables rules replacement with data, the per CPU +sequence count is checked after the assignment of the new information. +The sequence count is used to synchronize with the packet path without the +use of any explicit locking. If there are any packets in the packet path using +the table information, the sequence count is incremented to an odd value and +is incremented to an even after the packet process completion. + +The new table value assignment is followed by a write memory barrier so every +CPU should see the latest value. If the packet path has started with the old +table information, the sequence counter will be odd and the iptables +replacement will wait till the sequence count is even prior to freeing the +old table info. + +However, this assumes that the new table information assignment and the memory +barrier is actually executed prior to the counter check in the replacement +thread. If CPU decides to execute the assignment later as there is no user of +the table information prior to the sequence check, the packet path in another +CPU may use the old table information. The replacement thread would then free +the table information under it leading to a use after free in the packet +processing context- + +Unable to handle kernel NULL pointer dereference at virtual +address 000000000000008e +pc : ip6t_do_table+0x5d0/0x89c +lr : ip6t_do_table+0x5b8/0x89c +ip6t_do_table+0x5d0/0x89c +ip6table_filter_hook+0x24/0x30 +nf_hook_slow+0x84/0x120 +ip6_input+0x74/0xe0 +ip6_rcv_finish+0x7c/0x128 +ipv6_rcv+0xac/0xe4 +__netif_receive_skb+0x84/0x17c +process_backlog+0x15c/0x1b8 +napi_poll+0x88/0x284 +net_rx_action+0xbc/0x23c +__do_softirq+0x20c/0x48c + +This could be fixed by forcing instruction order after the new table +information assignment or by switching to RCU for the synchronization. + +Fixes: 80055dab5de0 ("netfilter: x_tables: make xt_replace_table wait until old rules are not used anymore") +Reported-by: Sean Tranchetti +Reported-by: kernel test robot +Suggested-by: Florian Westphal +Signed-off-by: Subash Abhinov Kasiviswanathan +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + include/linux/netfilter/x_tables.h | 5 ++- + net/ipv4/netfilter/arp_tables.c | 14 ++++----- + net/ipv4/netfilter/ip_tables.c | 14 ++++----- + net/ipv6/netfilter/ip6_tables.c | 14 ++++----- + net/netfilter/x_tables.c | 49 +++++++++--------------------- + 5 files changed, 40 insertions(+), 56 deletions(-) + +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h +index 9077b3ebea08c..728d7716bf4f4 100644 +--- a/include/linux/netfilter/x_tables.h ++++ b/include/linux/netfilter/x_tables.h +@@ -227,7 +227,7 @@ struct xt_table { + unsigned int valid_hooks; + + /* Man behind the curtain... */ +- struct xt_table_info *private; ++ struct xt_table_info __rcu *private; + + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ + struct module *me; +@@ -449,6 +449,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) + + struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); + ++struct xt_table_info ++*xt_table_get_private_protected(const struct xt_table *table); ++ + #ifdef CONFIG_COMPAT + #include + +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index 10d8f95eb7712..ca20efe775ee4 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -202,7 +202,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, + + local_bh_disable(); + addend = xt_write_recseq_begin(); +- private = READ_ONCE(table->private); /* Address dependency. */ ++ private = rcu_access_pointer(table->private); + cpu = smp_processor_id(); + table_base = private->entries; + jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; +@@ -648,7 +648,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) + { + unsigned int countersize; + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + + /* We need atomic snapshot of counters: rest doesn't change + * (other than comefrom, which userspace doesn't care +@@ -672,7 +672,7 @@ static int copy_entries_to_user(unsigned int total_size, + unsigned int off, num; + const struct arpt_entry *e; + struct xt_counters *counters; +- struct xt_table_info *private = table->private; ++ struct xt_table_info *private = xt_table_get_private_protected(table); + int ret = 0; + void *loc_cpu_entry; + +@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, + t = xt_request_find_table_lock(net, NFPROTO_ARP, name); + if (!IS_ERR(t)) { + struct arpt_getinfo info; +- const struct xt_table_info *private = t->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(t); + #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + +@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, + + t = xt_find_table_lock(net, NFPROTO_ARP, get.name); + if (!IS_ERR(t)) { +- const struct xt_table_info *private = t->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(t); + + if (get.size == private->size) + ret = copy_entries_to_user(private->size, +@@ -1019,7 +1019,7 @@ static int do_add_counters(struct net *net, const void __user *user, + } + + local_bh_disable(); +- private = t->private; ++ private = xt_table_get_private_protected(t); + if (private->number != tmp.num_counters) { + ret = -EINVAL; + goto unlock_up_free; +@@ -1356,7 +1356,7 @@ static int compat_copy_entries_to_user(unsigned int total_size, + void __user *userptr) + { + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + void __user *pos; + unsigned int size; + int ret = 0; +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index e77872c93c206..115d48049686f 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -261,7 +261,7 @@ ipt_do_table(struct sk_buff *skb, + WARN_ON(!(table->valid_hooks & (1 << hook))); + local_bh_disable(); + addend = xt_write_recseq_begin(); +- private = READ_ONCE(table->private); /* Address dependency. */ ++ private = rcu_access_pointer(table->private); + cpu = smp_processor_id(); + table_base = private->entries; + jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; +@@ -794,7 +794,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) + { + unsigned int countersize; + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + + /* We need atomic snapshot of counters: rest doesn't change + (other than comefrom, which userspace doesn't care +@@ -818,7 +818,7 @@ copy_entries_to_user(unsigned int total_size, + unsigned int off, num; + const struct ipt_entry *e; + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + int ret = 0; + const void *loc_cpu_entry; + +@@ -968,7 +968,7 @@ static int get_info(struct net *net, void __user *user, + t = xt_request_find_table_lock(net, AF_INET, name); + if (!IS_ERR(t)) { + struct ipt_getinfo info; +- const struct xt_table_info *private = t->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(t); + #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + +@@ -1022,7 +1022,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, + + t = xt_find_table_lock(net, AF_INET, get.name); + if (!IS_ERR(t)) { +- const struct xt_table_info *private = t->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(t); + if (get.size == private->size) + ret = copy_entries_to_user(private->size, + t, uptr->entrytable); +@@ -1178,7 +1178,7 @@ do_add_counters(struct net *net, const void __user *user, + } + + local_bh_disable(); +- private = t->private; ++ private = xt_table_get_private_protected(t); + if (private->number != tmp.num_counters) { + ret = -EINVAL; + goto unlock_up_free; +@@ -1573,7 +1573,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, + void __user *userptr) + { + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + void __user *pos; + unsigned int size; + int ret = 0; +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index daf2e9e9193d1..b1441349e1517 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -283,7 +283,7 @@ ip6t_do_table(struct sk_buff *skb, + + local_bh_disable(); + addend = xt_write_recseq_begin(); +- private = READ_ONCE(table->private); /* Address dependency. */ ++ private = rcu_access_pointer(table->private); + cpu = smp_processor_id(); + table_base = private->entries; + jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; +@@ -810,7 +810,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) + { + unsigned int countersize; + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + + /* We need atomic snapshot of counters: rest doesn't change + (other than comefrom, which userspace doesn't care +@@ -834,7 +834,7 @@ copy_entries_to_user(unsigned int total_size, + unsigned int off, num; + const struct ip6t_entry *e; + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + int ret = 0; + const void *loc_cpu_entry; + +@@ -984,7 +984,7 @@ static int get_info(struct net *net, void __user *user, + t = xt_request_find_table_lock(net, AF_INET6, name); + if (!IS_ERR(t)) { + struct ip6t_getinfo info; +- const struct xt_table_info *private = t->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(t); + #ifdef CONFIG_COMPAT + struct xt_table_info tmp; + +@@ -1039,7 +1039,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, + + t = xt_find_table_lock(net, AF_INET6, get.name); + if (!IS_ERR(t)) { +- struct xt_table_info *private = t->private; ++ struct xt_table_info *private = xt_table_get_private_protected(t); + if (get.size == private->size) + ret = copy_entries_to_user(private->size, + t, uptr->entrytable); +@@ -1194,7 +1194,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, + } + + local_bh_disable(); +- private = t->private; ++ private = xt_table_get_private_protected(t); + if (private->number != tmp.num_counters) { + ret = -EINVAL; + goto unlock_up_free; +@@ -1582,7 +1582,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, + void __user *userptr) + { + struct xt_counters *counters; +- const struct xt_table_info *private = table->private; ++ const struct xt_table_info *private = xt_table_get_private_protected(table); + void __user *pos; + unsigned int size; + int ret = 0; +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c +index 3bab89dbc3717..6a7d0303d058f 100644 +--- a/net/netfilter/x_tables.c ++++ b/net/netfilter/x_tables.c +@@ -1354,6 +1354,14 @@ struct xt_counters *xt_counters_alloc(unsigned int counters) + } + EXPORT_SYMBOL(xt_counters_alloc); + ++struct xt_table_info ++*xt_table_get_private_protected(const struct xt_table *table) ++{ ++ return rcu_dereference_protected(table->private, ++ mutex_is_locked(&xt[table->af].mutex)); ++} ++EXPORT_SYMBOL(xt_table_get_private_protected); ++ + struct xt_table_info * + xt_replace_table(struct xt_table *table, + unsigned int num_counters, +@@ -1361,7 +1369,6 @@ xt_replace_table(struct xt_table *table, + int *error) + { + struct xt_table_info *private; +- unsigned int cpu; + int ret; + + ret = xt_jumpstack_alloc(newinfo); +@@ -1371,47 +1378,20 @@ xt_replace_table(struct xt_table *table, + } + + /* Do the substitution. */ +- local_bh_disable(); +- private = table->private; ++ private = xt_table_get_private_protected(table); + + /* Check inside lock: is the old number correct? */ + if (num_counters != private->number) { + pr_debug("num_counters != table->private->number (%u/%u)\n", + num_counters, private->number); +- local_bh_enable(); + *error = -EAGAIN; + return NULL; + } + + newinfo->initial_entries = private->initial_entries; +- /* +- * Ensure contents of newinfo are visible before assigning to +- * private. +- */ +- smp_wmb(); +- table->private = newinfo; +- +- /* make sure all cpus see new ->private value */ +- smp_wmb(); + +- /* +- * Even though table entries have now been swapped, other CPU's +- * may still be using the old entries... +- */ +- local_bh_enable(); +- +- /* ... so wait for even xt_recseq on all cpus */ +- for_each_possible_cpu(cpu) { +- seqcount_t *s = &per_cpu(xt_recseq, cpu); +- u32 seq = raw_read_seqcount(s); +- +- if (seq & 1) { +- do { +- cond_resched(); +- cpu_relax(); +- } while (seq == raw_read_seqcount(s)); +- } +- } ++ rcu_assign_pointer(table->private, newinfo); ++ synchronize_rcu(); + + #ifdef CONFIG_AUDIT + if (audit_enabled) { +@@ -1452,12 +1432,12 @@ struct xt_table *xt_register_table(struct net *net, + } + + /* Simplifies replace_table code. */ +- table->private = bootstrap; ++ rcu_assign_pointer(table->private, bootstrap); + + if (!xt_replace_table(table, 0, newinfo, &ret)) + goto unlock; + +- private = table->private; ++ private = xt_table_get_private_protected(table); + pr_debug("table->private->number = %u\n", private->number); + + /* save number of initial entries */ +@@ -1480,7 +1460,8 @@ void *xt_unregister_table(struct xt_table *table) + struct xt_table_info *private; + + mutex_lock(&xt[table->af].mutex); +- private = table->private; ++ private = xt_table_get_private_protected(table); ++ RCU_INIT_POINTER(table->private, NULL); + list_del(&table->list); + mutex_unlock(&xt[table->af].mutex); + kfree(table); +-- +2.27.0 + diff --git a/queue-4.19/pinctrl-baytrail-avoid-clearing-debounce-value-when-.patch b/queue-4.19/pinctrl-baytrail-avoid-clearing-debounce-value-when-.patch new file mode 100644 index 00000000000..163f44fefe2 --- /dev/null +++ b/queue-4.19/pinctrl-baytrail-avoid-clearing-debounce-value-when-.patch @@ -0,0 +1,72 @@ +From b7a839d3b9d8c24f7dc9f207a6b7f0c11a244da6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 12 Nov 2020 21:03:01 +0200 +Subject: pinctrl: baytrail: Avoid clearing debounce value when turning it off + +From: Andy Shevchenko + +[ Upstream commit 0b74e40a4e41f3cbad76dff4c50850d47b525b26 ] + +Baytrail pin control has a common register to set up debounce timeout. +When a pin configuration requested debounce to be disabled, the rest +of the pins may still want to have debounce enabled and thus rely on +the common timeout value. Avoid clearing debounce value when turning +it off for one pin while others may still use it. + +Fixes: 658b476c742f ("pinctrl: baytrail: Add debounce configuration") +Depends-on: 04ff5a095d66 ("pinctrl: baytrail: Rectify debounce support") +Depends-on: 827e1579e1d5 ("pinctrl: baytrail: Rectify debounce support (part 2)") +Signed-off-by: Andy Shevchenko +Acked-by: Mika Westerberg +Signed-off-by: Sasha Levin +--- + drivers/pinctrl/intel/pinctrl-baytrail.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c +index 1b00a3f3b419c..b3d478edbbb1c 100644 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c +@@ -1258,7 +1258,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, + break; + case PIN_CONFIG_INPUT_DEBOUNCE: + debounce = readl(db_reg); +- debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + + if (arg) + conf |= BYT_DEBOUNCE_EN; +@@ -1267,24 +1266,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, + + switch (arg) { + case 375: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_375US; + break; + case 750: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_750US; + break; + case 1500: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_1500US; + break; + case 3000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_3MS; + break; + case 6000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_6MS; + break; + case 12000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_12MS; + break; + case 24000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_24MS; + break; + default: +-- +2.27.0 + diff --git a/queue-4.19/pinctrl-merrifield-set-default-bias-in-case-no-parti.patch b/queue-4.19/pinctrl-merrifield-set-default-bias-in-case-no-parti.patch new file mode 100644 index 00000000000..04d3e57ccbd --- /dev/null +++ b/queue-4.19/pinctrl-merrifield-set-default-bias-in-case-no-parti.patch @@ -0,0 +1,58 @@ +From 2436cd52d5b9c4ef0b00fa16fca91407fcaa4ea1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 11 Nov 2020 14:06:05 +0200 +Subject: pinctrl: merrifield: Set default bias in case no particular value + given + +From: Andy Shevchenko + +[ Upstream commit 0fa86fc2e28227f1e64f13867e73cf864c6d25ad ] + +When GPIO library asks pin control to set the bias, it doesn't pass +any value of it and argument is considered boolean (and this is true +for ACPI GpioIo() / GpioInt() resources, by the way). Thus, individual +drivers must behave well, when they got the resistance value of 1 Ohm, +i.e. transforming it to sane default. + +In case of Intel Merrifield pin control hardware the 20 kOhm sounds plausible +because it gives a good trade off between weakness and minimization of leakage +current (will be only 50 uA with the above choice). + +Fixes: 4e80c8f50574 ("pinctrl: intel: Add Intel Merrifield pin controller support") +Depends-on: 2956b5d94a76 ("pinctrl / gpio: Introduce .set_config() callback for GPIO chips") +Signed-off-by: Andy Shevchenko +Acked-by: Mika Westerberg +Signed-off-by: Sasha Levin +--- + drivers/pinctrl/intel/pinctrl-merrifield.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c +index 4fa69f988c7b7..6b2312e73f23f 100644 +--- a/drivers/pinctrl/intel/pinctrl-merrifield.c ++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c +@@ -729,6 +729,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + bits |= BUFCFG_PU_EN; + ++ /* Set default strength value in case none is given */ ++ if (arg == 1) ++ arg = 20000; ++ + switch (arg) { + case 50000: + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; +@@ -749,6 +753,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + bits |= BUFCFG_PD_EN; + ++ /* Set default strength value in case none is given */ ++ if (arg == 1) ++ arg = 20000; ++ + switch (arg) { + case 50000: + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; +-- +2.27.0 + diff --git a/queue-4.19/rdma-cm-fix-an-attempt-to-use-non-valid-pointer-when.patch b/queue-4.19/rdma-cm-fix-an-attempt-to-use-non-valid-pointer-when.patch new file mode 100644 index 00000000000..6478a5ce3f4 --- /dev/null +++ b/queue-4.19/rdma-cm-fix-an-attempt-to-use-non-valid-pointer-when.patch @@ -0,0 +1,79 @@ +From b7a7e0cdca20535144ffd8e273afbb589e046cc1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 4 Dec 2020 08:42:05 +0200 +Subject: RDMA/cm: Fix an attempt to use non-valid pointer when cleaning + timewait +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Leon Romanovsky + +[ Upstream commit 340b940ea0ed12d9adbb8f72dea17d516b2019e8 ] + +If cm_create_timewait_info() fails, the timewait_info pointer will contain +an error value and will be used in cm_remove_remote() later. + + general protection fault, probably for non-canonical address 0xdffffc0000000024: 0000 [#1] SMP KASAN PTI + KASAN: null-ptr-deref in range [0×0000000000000120-0×0000000000000127] + CPU: 2 PID: 12446 Comm: syz-executor.3 Not tainted 5.10.0-rc5-5d4c0742a60e #27 + Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 + RIP: 0010:cm_remove_remote.isra.0+0x24/0×170 drivers/infiniband/core/cm.c:978 + Code: 84 00 00 00 00 00 41 54 55 53 48 89 fb 48 8d ab 2d 01 00 00 e8 7d bf 4b fe 48 89 ea 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 <0f> b6 04 02 48 89 ea 83 e2 07 38 d0 7f 08 84 c0 0f 85 fc 00 00 00 + RSP: 0018:ffff888013127918 EFLAGS: 00010006 + RAX: dffffc0000000000 RBX: fffffffffffffff4 RCX: ffffc9000a18b000 + RDX: 0000000000000024 RSI: ffffffff82edc573 RDI: fffffffffffffff4 + RBP: 0000000000000121 R08: 0000000000000001 R09: ffffed1002624f1d + R10: 0000000000000003 R11: ffffed1002624f1c R12: ffff888107760c70 + R13: ffff888107760c40 R14: fffffffffffffff4 R15: ffff888107760c9c + FS: 00007fe1ffcc1700(0000) GS:ffff88811a600000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000001b2ff21000 CR3: 000000010f504001 CR4: 0000000000370ee0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + Call Trace: + cm_destroy_id+0x189/0×15b0 drivers/infiniband/core/cm.c:1155 + cma_connect_ib drivers/infiniband/core/cma.c:4029 [inline] + rdma_connect_locked+0x1100/0×17c0 drivers/infiniband/core/cma.c:4107 + rdma_connect+0x2a/0×40 drivers/infiniband/core/cma.c:4140 + ucma_connect+0x277/0×340 drivers/infiniband/core/ucma.c:1069 + ucma_write+0x236/0×2f0 drivers/infiniband/core/ucma.c:1724 + vfs_write+0x220/0×830 fs/read_write.c:603 + ksys_write+0x1df/0×240 fs/read_write.c:658 + do_syscall_64+0x33/0×40 arch/x86/entry/common.c:46 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 + +Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") +Link: https://lore.kernel.org/r/20201204064205.145795-1-leon@kernel.org +Reviewed-by: Maor Gottlieb +Reported-by: Amit Matityahu +Signed-off-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Signed-off-by: Sasha Levin +--- + drivers/infiniband/core/cm.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index 4ebf63360a697..9bdb3fd97d264 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -1443,6 +1443,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, + id.local_id); + if (IS_ERR(cm_id_priv->timewait_info)) { + ret = PTR_ERR(cm_id_priv->timewait_info); ++ cm_id_priv->timewait_info = NULL; + goto out; + } + +@@ -1969,6 +1970,7 @@ static int cm_req_handler(struct cm_work *work) + id.local_id); + if (IS_ERR(cm_id_priv->timewait_info)) { + ret = PTR_ERR(cm_id_priv->timewait_info); ++ cm_id_priv->timewait_info = NULL; + goto destroy; + } + cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; +-- +2.27.0 + diff --git a/queue-4.19/scsi-bnx2i-requires-mmu.patch b/queue-4.19/scsi-bnx2i-requires-mmu.patch new file mode 100644 index 00000000000..a0bd2a64106 --- /dev/null +++ b/queue-4.19/scsi-bnx2i-requires-mmu.patch @@ -0,0 +1,52 @@ +From 693e88c92e0b73d569fcb41fbd259631589ded8e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 28 Nov 2020 23:09:16 -0800 +Subject: scsi: bnx2i: Requires MMU + +From: Randy Dunlap + +[ Upstream commit 2d586494c4a001312650f0b919d534e429dd1e09 ] + +The SCSI_BNX2_ISCSI kconfig symbol selects CNIC and CNIC selects UIO, which +depends on MMU. + +Since 'select' does not follow dependency chains, add the same MMU +dependency to SCSI_BNX2_ISCSI. + +Quietens this kconfig warning: + +WARNING: unmet direct dependencies detected for CNIC + Depends on [n]: NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_BROADCOM [=y] && PCI [=y] && (IPV6 [=m] || IPV6 [=m]=n) && MMU [=n] + Selected by [m]: + - SCSI_BNX2_ISCSI [=m] && SCSI_LOWLEVEL [=y] && SCSI [=y] && NET [=y] && PCI [=y] && (IPV6 [=m] || IPV6 [=m]=n) + +Link: https://lore.kernel.org/r/20201129070916.3919-1-rdunlap@infradead.org +Fixes: cf4e6363859d ("[SCSI] bnx2i: Add bnx2i iSCSI driver.") +Cc: linux-scsi@vger.kernel.org +Cc: Nilesh Javali +Cc: Manish Rangankar +Cc: GR-QLogic-Storage-Upstream@marvell.com +Cc: "James E.J. Bottomley" +Cc: "Martin K. Petersen" +Signed-off-by: Randy Dunlap +Signed-off-by: Martin K. Petersen +Signed-off-by: Sasha Levin +--- + drivers/scsi/bnx2i/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig +index ba30ff86d5818..b27a3738d940c 100644 +--- a/drivers/scsi/bnx2i/Kconfig ++++ b/drivers/scsi/bnx2i/Kconfig +@@ -3,6 +3,7 @@ config SCSI_BNX2_ISCSI + depends on NET + depends on PCI + depends on (IPV6 || IPV6=n) ++ depends on MMU + select SCSI_ISCSI_ATTRS + select NETDEVICES + select ETHERNET +-- +2.27.0 + diff --git a/queue-4.19/scsi-mpt3sas-increase-iocinit-request-timeout-to-30s.patch b/queue-4.19/scsi-mpt3sas-increase-iocinit-request-timeout-to-30s.patch new file mode 100644 index 00000000000..bb0190691b3 --- /dev/null +++ b/queue-4.19/scsi-mpt3sas-increase-iocinit-request-timeout-to-30s.patch @@ -0,0 +1,38 @@ +From 94eaa92f9fac220d0b4feff4a1a5c841ee1ea969 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 30 Nov 2020 13:57:33 +0530 +Subject: scsi: mpt3sas: Increase IOCInit request timeout to 30s + +From: Sreekanth Reddy + +[ Upstream commit 85dad327d9b58b4c9ce08189a2707167de392d23 ] + +Currently the IOCInit request message timeout is set to 10s. This is not +sufficient in some scenarios such as during HBA FW downgrade operations. + +Increase the IOCInit request timeout to 30s. + +Link: https://lore.kernel.org/r/20201130082733.26120-1-sreekanth.reddy@broadcom.com +Signed-off-by: Sreekanth Reddy +Signed-off-by: Martin K. Petersen +Signed-off-by: Sasha Levin +--- + drivers/scsi/mpt3sas/mpt3sas_base.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 9fbe20e38ad07..07959047d4dc4 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -5771,7 +5771,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) + + r = _base_handshake_req_reply_wait(ioc, + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, +- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); ++ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", +-- +2.27.0 + diff --git a/queue-4.19/selftests-bpf-test_offload.py-reset-ethtool-features.patch b/queue-4.19/selftests-bpf-test_offload.py-reset-ethtool-features.patch new file mode 100644 index 00000000000..c5bbb709a72 --- /dev/null +++ b/queue-4.19/selftests-bpf-test_offload.py-reset-ethtool-features.patch @@ -0,0 +1,43 @@ +From 94799dc16a3ab496ca8679c911e9a3fce2970f23 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Dec 2020 14:57:42 +0100 +Subject: selftests/bpf/test_offload.py: Reset ethtool features after failed + setting +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Toke Høiland-Jørgensen + +[ Upstream commit 766e62b7fcd2cf1d43e6594ba37c659dc48f7ddb ] + +When setting the ethtool feature flag fails (as expected for the test), the +kernel now tracks that the feature was requested to be 'off' and refuses to +subsequently disable it again. So reset it back to 'on' so a subsequent +disable (that's not supposed to fail) can succeed. + +Fixes: 417ec26477a5 ("selftests/bpf: add offload test based on netdevsim") +Signed-off-by: Toke Høiland-Jørgensen +Signed-off-by: Daniel Borkmann +Acked-by: Jakub Kicinski +Link: https://lore.kernel.org/bpf/160752226280.110217.10696241563705667871.stgit@toke.dk +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/bpf/test_offload.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py +index d59642e70f562..2229e55216a97 100755 +--- a/tools/testing/selftests/bpf/test_offload.py ++++ b/tools/testing/selftests/bpf/test_offload.py +@@ -787,6 +787,7 @@ try: + start_test("Test disabling TC offloads is rejected while filters installed...") + ret, _ = sim.set_ethtool_tc_offloads(False, fail=False) + fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...") ++ sim.set_ethtool_tc_offloads(True) + + start_test("Test qdisc removal frees things...") + sim.tc_flush_filters() +-- +2.27.0 + diff --git a/queue-4.19/series b/queue-4.19/series index 6315447f63d..ce3a916371f 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -52,3 +52,25 @@ arm64-lse-fix-lse-atomics-with-llvm.patch arm64-change-.weak-to-sym_func_start_weak_pi-for-arch-arm64-lib-mem-.s.patch x86-resctrl-remove-unused-struct-mbm_state-chunks_bw.patch x86-resctrl-fix-incorrect-local-bandwidth-when-mba_sc-is-enabled.patch +pinctrl-merrifield-set-default-bias-in-case-no-parti.patch +pinctrl-baytrail-avoid-clearing-debounce-value-when-.patch +arm-dts-sun8i-v3s-fix-gic-node-memory-range.patch +gpio-mvebu-fix-potential-user-after-free-on-probe.patch +scsi-bnx2i-requires-mmu.patch +xsk-fix-xsk_poll-s-return-type.patch +can-softing-softing_netdev_open-fix-error-handling.patch +clk-renesas-r9a06g032-drop-__packed-for-portability.patch +block-factor-out-requeue-handling-from-dispatch-code.patch +netfilter-x_tables-switch-synchronization-to-rcu.patch +gpio-eic-sprd-break-loop-when-getting-null-device-re.patch +selftests-bpf-test_offload.py-reset-ethtool-features.patch +rdma-cm-fix-an-attempt-to-use-non-valid-pointer-when.patch +ixgbe-avoid-premature-rx-buffer-reuse.patch +drm-tegra-replace-idr_init-by-idr_init_base.patch +kernel-cpu-add-arch-override-for-clear_tasks_mm_cpum.patch +drm-tegra-sor-disable-clocks-on-error-in-tegra_sor_i.patch +arm64-syscall-exit-userspace-before-unmasking-except.patch +vxlan-add-needed_headroom-for-lower-device.patch +vxlan-copy-needed_tailroom-from-lowerdev.patch +scsi-mpt3sas-increase-iocinit-request-timeout-to-30s.patch +dm-table-remove-bug_on-in_interrupt.patch diff --git a/queue-4.19/vxlan-add-needed_headroom-for-lower-device.patch b/queue-4.19/vxlan-add-needed_headroom-for-lower-device.patch new file mode 100644 index 00000000000..cec42ff46e4 --- /dev/null +++ b/queue-4.19/vxlan-add-needed_headroom-for-lower-device.patch @@ -0,0 +1,51 @@ +From 5d05684752a80cb63b7fb3c793ed6378edfc9cb8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 26 Nov 2020 13:52:46 +0100 +Subject: vxlan: Add needed_headroom for lower device + +From: Sven Eckelmann + +[ Upstream commit 0a35dc41fea67ac4495ce7584406bf9557a6e7d0 ] + +It was observed that sending data via batadv over vxlan (on top of +wireguard) reduced the performance massively compared to raw ethernet or +batadv on raw ethernet. A check of perf data showed that the +vxlan_build_skb was calling all the time pskb_expand_head to allocate +enough headroom for: + + min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + + VXLAN_HLEN + iphdr_len; + +But the vxlan_config_apply only requested needed headroom for: + + lowerdev->hard_header_len + VXLAN6_HEADROOM or VXLAN_HEADROOM + +So it completely ignored the needed_headroom of the lower device. The first +caller of net_dev_xmit could therefore never make sure that enough headroom +was allocated for the rest of the transmit path. + +Cc: Annika Wickert +Signed-off-by: Sven Eckelmann +Tested-by: Annika Wickert +Link: https://lore.kernel.org/r/20201126125247.1047977-1-sven@narfation.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/vxlan.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index abf85f0ab72fc..8481a21fe7afb 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -3180,6 +3180,7 @@ static void vxlan_config_apply(struct net_device *dev, + dev->gso_max_segs = lowerdev->gso_max_segs; + + needed_headroom = lowerdev->hard_header_len; ++ needed_headroom += lowerdev->needed_headroom; + + max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : + VXLAN_HEADROOM); +-- +2.27.0 + diff --git a/queue-4.19/vxlan-copy-needed_tailroom-from-lowerdev.patch b/queue-4.19/vxlan-copy-needed_tailroom-from-lowerdev.patch new file mode 100644 index 00000000000..4f57e60a1f6 --- /dev/null +++ b/queue-4.19/vxlan-copy-needed_tailroom-from-lowerdev.patch @@ -0,0 +1,37 @@ +From 67622e85a33a0a9f2a888b8de68fb867965304c2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 26 Nov 2020 13:52:47 +0100 +Subject: vxlan: Copy needed_tailroom from lowerdev + +From: Sven Eckelmann + +[ Upstream commit a5e74021e84bb5eadf760aaf2c583304f02269be ] + +While vxlan doesn't need any extra tailroom, the lowerdev might need it. In +that case, copy it over to reduce the chance for additional (re)allocations +in the transmit path. + +Signed-off-by: Sven Eckelmann +Link: https://lore.kernel.org/r/20201126125247.1047977-2-sven@narfation.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/vxlan.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 8481a21fe7afb..66fffbd64a33f 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -3182,6 +3182,8 @@ static void vxlan_config_apply(struct net_device *dev, + needed_headroom = lowerdev->hard_header_len; + needed_headroom += lowerdev->needed_headroom; + ++ dev->needed_tailroom = lowerdev->needed_tailroom; ++ + max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : + VXLAN_HEADROOM); + if (max_mtu < ETH_MIN_MTU) +-- +2.27.0 + diff --git a/queue-4.19/xsk-fix-xsk_poll-s-return-type.patch b/queue-4.19/xsk-fix-xsk_poll-s-return-type.patch new file mode 100644 index 00000000000..0945fd043ea --- /dev/null +++ b/queue-4.19/xsk-fix-xsk_poll-s-return-type.patch @@ -0,0 +1,56 @@ +From c9d89fa4147d403acf80052eeba9b1714264e3cf Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Nov 2019 01:10:42 +0100 +Subject: xsk: Fix xsk_poll()'s return type +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Luc Van Oostenryck + +[ Upstream commit 5d946c5abbaf68083fa6a41824dd79e1f06286d8 ] + +xsk_poll() is defined as returning 'unsigned int' but the +.poll method is declared as returning '__poll_t', a bitwise type. + +Fix this by using the proper return type and using the EPOLL +constants instead of the POLL ones, as required for __poll_t. + +Signed-off-by: Luc Van Oostenryck +Signed-off-by: Daniel Borkmann +Acked-by: Björn Töpel +Link: https://lore.kernel.org/bpf/20191120001042.30830-1-luc.vanoostenryck@gmail.com +Signed-off-by: Sasha Levin +--- + net/xdp/xsk.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c +index 9ff2ab63e6392..6bb0649c028c4 100644 +--- a/net/xdp/xsk.c ++++ b/net/xdp/xsk.c +@@ -289,17 +289,17 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) + return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len); + } + +-static unsigned int xsk_poll(struct file *file, struct socket *sock, ++static __poll_t xsk_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait) + { +- unsigned int mask = datagram_poll(file, sock, wait); ++ __poll_t mask = datagram_poll(file, sock, wait); + struct sock *sk = sock->sk; + struct xdp_sock *xs = xdp_sk(sk); + + if (xs->rx && !xskq_empty_desc(xs->rx)) +- mask |= POLLIN | POLLRDNORM; ++ mask |= EPOLLIN | EPOLLRDNORM; + if (xs->tx && !xskq_full_desc(xs->tx)) +- mask |= POLLOUT | POLLWRNORM; ++ mask |= EPOLLOUT | EPOLLWRNORM; + + return mask; + } +-- +2.27.0 +