From b03ff3b1c3d084569ffc8a37f42c00ab7cce9af6 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Wed, 18 Mar 2020 16:57:34 -0400 Subject: [PATCH] Fixes for 5.5 Signed-off-by: Sasha Levin --- ...atchdog-allow-disabling-wdat-at-boot.patch | 74 ++++ ...atchdog-set-default-timeout-in-probe.patch | 68 ++++ ...ssthrough-request-into-hctx-dispatch.patch | 181 +++++++++ ...eg_rule-for-null-in-handle_channel_c.patch | 37 ++ ...u-fix-memory-leak-during-tdr-test-v2.patch | 43 +++ ...pport-for-recent-firmware-on-magic-k.patch | 37 ++ ...ff-call-hid_hw_stop-in-case-of-error.patch | 77 ++++ ...-fix-general-protection-fault-caused.patch | 56 +++ ...-fix-race-condition-for-scheduled-wo.patch | 62 +++ ...trekstor-surfbook-e11b-to-descriptor.patch | 43 +++ ...hinic-fix-a-bug-of-rss-configuration.patch | 36 ++ ...hinic-fix-a-bug-of-setting-hw_ioctxt.patch | 63 +++ queue-5.5/hinic-fix-a-irq-affinity-bug.patch | 58 +++ ...l_list-race-for-setup_iopoll-setup_s.patch | 161 ++++++++ ...p-link-work-on-submit-reference-drop.patch | 58 +++ ...ix-data-races-at-struct-journal_head.patch | 110 ++++++ ...nding_check-to-phony-in-a-correct-pl.patch | 46 +++ .../kbuild-add-dtbs_check-to-phony.patch | 41 ++ ...it-run-kunit_tool-from-any-directory.patch | 64 ++++ ...avoid-rcu-list-traversal-under-mutex.patch | 39 ++ ...vdso-time-functionality-on-micromips.patch | 64 ++++ ...ild-time-check-that-no-jalr-t9-calls.patch | 67 ++++ ...-vdso-fix-jalr-t9-crash-in-vdso-code.patch | 64 ++++ ...o-wrap-mexplicit-relocs-in-cc-option.patch | 57 +++ ...onger-before-accessing-the-device-af.patch | 45 +++ ...ar-id_mode_dis-in-ext_rgmii_oob_ctrl.patch | 39 ++ ...mv88e6xxx-fix-masking-of-egress-port.patch | 44 +++ ...8851-ml-fix-irq-handling-and-locking.patch | 102 +++++ ...-more-error-handling-of-dma_map_sing.patch | 78 ++++ ...c-fix-race-condition-causing-tx-hang.patch | 69 ++++ ...-rx-buffer-descriptor-handling-on-gf.patch | 242 ++++++++++++ ...dle-dma-halt-condition-caused-by-buf.patch | 176 +++++++++ .../net-phy-mscc-fix-firmware-paths.patch | 43 +++ ...-allow-to-change-mux-id-if-mux-id-is.patch | 47 +++ .../net-rmnet-fix-bridge-mode-bugs.patch | 360 ++++++++++++++++++ ...ll-pointer-dereference-in-rmnet_chan.patch | 88 +++++ ...ll-pointer-dereference-in-rmnet_newl.patch | 82 ++++ ...cket-forwarding-in-rmnet-bridge-mode.patch | 61 +++ .../net-rmnet-fix-suspicious-rcu-usage.patch | 163 ++++++++ ...-rcu_read_lock-in-rmnet_force_unasso.patch | 95 +++++ ...se-upper-lower-device-infrastructure.patch | 199 ++++++++++ ...-restore-mtu-min-max-values-after-ra.patch | 51 +++ ...nfiguration-if-kernel-is-running-in-.patch | 75 ++++ ...ibfc-free-response-frame-from-gpn_id.patch | 37 ++ ...sts-rseq-fix-out-of-tree-compilation.patch | 48 +++ queue-5.5/series | 49 +++ ...p-reconstruction-at-16-bit-rollover-.patch | 81 ++++ ...ble-atomic-counter-increments-for-us.patch | 125 ++++++ ...ee_netdev-before-rtnl_unlock-in-slip.patch | 38 ++ ...er-printing-bug-in-print_synth_event.patch | 91 +++++ 50 files changed, 4134 insertions(+) create mode 100644 queue-5.5/acpi-watchdog-allow-disabling-wdat-at-boot.patch create mode 100644 queue-5.5/acpi-watchdog-set-default-timeout-in-probe.patch create mode 100644 queue-5.5/blk-mq-insert-passthrough-request-into-hctx-dispatch.patch create mode 100644 queue-5.5/cfg80211-check-reg_rule-for-null-in-handle_channel_c.patch create mode 100644 queue-5.5/drm-amdgpu-fix-memory-leak-during-tdr-test-v2.patch create mode 100644 queue-5.5/hid-apple-add-support-for-recent-firmware-on-magic-k.patch create mode 100644 queue-5.5/hid-hid-bigbenff-call-hid_hw_stop-in-case-of-error.patch create mode 100644 queue-5.5/hid-hid-bigbenff-fix-general-protection-fault-caused.patch create mode 100644 queue-5.5/hid-hid-bigbenff-fix-race-condition-for-scheduled-wo.patch create mode 100644 queue-5.5/hid-i2c-hid-add-trekstor-surfbook-e11b-to-descriptor.patch create mode 100644 queue-5.5/hinic-fix-a-bug-of-rss-configuration.patch create mode 100644 queue-5.5/hinic-fix-a-bug-of-setting-hw_ioctxt.patch create mode 100644 queue-5.5/hinic-fix-a-irq-affinity-bug.patch create mode 100644 queue-5.5/io_uring-fix-poll_list-race-for-setup_iopoll-setup_s.patch create mode 100644 queue-5.5/io_uring-pick-up-link-work-on-submit-reference-drop.patch create mode 100644 queue-5.5/jbd2-fix-data-races-at-struct-journal_head.patch create mode 100644 queue-5.5/kbuild-add-dt_binding_check-to-phony-in-a-correct-pl.patch create mode 100644 queue-5.5/kbuild-add-dtbs_check-to-phony.patch create mode 100644 queue-5.5/kunit-run-kunit_tool-from-any-directory.patch create mode 100644 queue-5.5/mac80211-rx-avoid-rcu-list-traversal-under-mutex.patch create mode 100644 queue-5.5/mips-disable-vdso-time-functionality-on-micromips.patch create mode 100644 queue-5.5/mips-vdso-add-build-time-check-that-no-jalr-t9-calls.patch create mode 100644 queue-5.5/mips-vdso-fix-jalr-t9-crash-in-vdso-code.patch create mode 100644 queue-5.5/mips-vdso-wrap-mexplicit-relocs-in-cc-option.patch create mode 100644 queue-5.5/mlxsw-pci-wait-longer-before-accessing-the-device-af.patch create mode 100644 queue-5.5/net-bcmgenet-clear-id_mode_dis-in-ext_rgmii_oob_ctrl.patch create mode 100644 queue-5.5/net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch create mode 100644 queue-5.5/net-ks8851-ml-fix-irq-handling-and-locking.patch create mode 100644 queue-5.5/net-ll_temac-add-more-error-handling-of-dma_map_sing.patch create mode 100644 queue-5.5/net-ll_temac-fix-race-condition-causing-tx-hang.patch create mode 100644 queue-5.5/net-ll_temac-fix-rx-buffer-descriptor-handling-on-gf.patch create mode 100644 queue-5.5/net-ll_temac-handle-dma-halt-condition-caused-by-buf.patch create mode 100644 queue-5.5/net-phy-mscc-fix-firmware-paths.patch create mode 100644 queue-5.5/net-rmnet-do-not-allow-to-change-mux-id-if-mux-id-is.patch create mode 100644 queue-5.5/net-rmnet-fix-bridge-mode-bugs.patch create mode 100644 queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_chan.patch create mode 100644 queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_newl.patch create mode 100644 queue-5.5/net-rmnet-fix-packet-forwarding-in-rmnet-bridge-mode.patch create mode 100644 queue-5.5/net-rmnet-fix-suspicious-rcu-usage.patch create mode 100644 queue-5.5/net-rmnet-remove-rcu_read_lock-in-rmnet_force_unasso.patch create mode 100644 queue-5.5/net-rmnet-use-upper-lower-device-infrastructure.patch create mode 100644 queue-5.5/net-usb-qmi_wwan-restore-mtu-min-max-values-after-ra.patch create mode 100644 queue-5.5/riscv-set-pmp-configuration-if-kernel-is-running-in-.patch create mode 100644 queue-5.5/scsi-libfc-free-response-frame-from-gpn_id.patch create mode 100644 queue-5.5/selftests-rseq-fix-out-of-tree-compilation.patch create mode 100644 queue-5.5/sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch create mode 100644 queue-5.5/signal-avoid-double-atomic-counter-increments-for-us.patch create mode 100644 queue-5.5/slip-not-call-free_netdev-before-rtnl_unlock-in-slip.patch create mode 100644 queue-5.5/tracing-fix-number-printing-bug-in-print_synth_event.patch diff --git a/queue-5.5/acpi-watchdog-allow-disabling-wdat-at-boot.patch b/queue-5.5/acpi-watchdog-allow-disabling-wdat-at-boot.patch new file mode 100644 index 00000000000..7de8b981e5d --- /dev/null +++ b/queue-5.5/acpi-watchdog-allow-disabling-wdat-at-boot.patch @@ -0,0 +1,74 @@ +From 0e00f8b4606585db69c6a942a3dde4f333edd8ab Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 Feb 2020 16:58:45 +0100 +Subject: ACPI: watchdog: Allow disabling WDAT at boot + +From: Jean Delvare + +[ Upstream commit 3f9e12e0df012c4a9a7fd7eb0d3ae69b459d6b2c ] + +In case the WDAT interface is broken, give the user an option to +ignore it to let a native driver bind to the watchdog device instead. + +Signed-off-by: Jean Delvare +Acked-by: Mika Westerberg +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + Documentation/admin-guide/kernel-parameters.txt | 4 ++++ + drivers/acpi/acpi_watchdog.c | 12 +++++++++++- + 2 files changed, 15 insertions(+), 1 deletion(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index ade4e6ec23e03..727a03fb26c99 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -136,6 +136,10 @@ + dynamic table installation which will install SSDT + tables to /sys/firmware/acpi/tables/dynamic. + ++ acpi_no_watchdog [HW,ACPI,WDT] ++ Ignore the ACPI-based watchdog interface (WDAT) and let ++ a native driver control the watchdog device instead. ++ + acpi_rsdp= [ACPI,EFI,KEXEC] + Pass the RSDP address to the kernel, mostly used + on machines running EFI runtime service to boot the +diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c +index d827a4a3e9460..6e9ec6e3fe47d 100644 +--- a/drivers/acpi/acpi_watchdog.c ++++ b/drivers/acpi/acpi_watchdog.c +@@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat) + } + #endif + ++static bool acpi_no_watchdog; ++ + static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) + { + const struct acpi_table_wdat *wdat = NULL; + acpi_status status; + +- if (acpi_disabled) ++ if (acpi_disabled || acpi_no_watchdog) + return NULL; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, +@@ -88,6 +90,14 @@ bool acpi_has_watchdog(void) + } + EXPORT_SYMBOL_GPL(acpi_has_watchdog); + ++/* ACPI watchdog can be disabled on boot command line */ ++static int __init disable_acpi_watchdog(char *str) ++{ ++ acpi_no_watchdog = true; ++ return 1; ++} ++__setup("acpi_no_watchdog", disable_acpi_watchdog); ++ + void __init acpi_watchdog_init(void) + { + const struct acpi_wdat_entry *entries; +-- +2.20.1 + diff --git a/queue-5.5/acpi-watchdog-set-default-timeout-in-probe.patch b/queue-5.5/acpi-watchdog-set-default-timeout-in-probe.patch new file mode 100644 index 00000000000..d9be0e4f3f3 --- /dev/null +++ b/queue-5.5/acpi-watchdog-set-default-timeout-in-probe.patch @@ -0,0 +1,68 @@ +From cab26bf0dc115fae9296ccd65432765e1efdae96 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 Feb 2020 17:59:41 +0300 +Subject: ACPI: watchdog: Set default timeout in probe + +From: Mika Westerberg + +[ Upstream commit cabe17d0173ab04bd3f87b8199ae75f43f1ea473 ] + +If the BIOS default timeout for the watchdog is too small userspace may +not have enough time to configure new timeout after opening the device +before the system is already reset. For this reason program default +timeout of 30 seconds in the driver probe and allow userspace to change +this from command line or through module parameter (wdat_wdt.timeout). + +Reported-by: Jean Delvare +Signed-off-by: Mika Westerberg +Reviewed-by: Jean Delvare +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/watchdog/wdat_wdt.c | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c +index e1b1fcfc02af8..3065dd670a182 100644 +--- a/drivers/watchdog/wdat_wdt.c ++++ b/drivers/watchdog/wdat_wdt.c +@@ -54,6 +54,13 @@ module_param(nowayout, bool, 0); + MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + ++#define WDAT_DEFAULT_TIMEOUT 30 ++ ++static int timeout = WDAT_DEFAULT_TIMEOUT; ++module_param(timeout, int, 0); ++MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" ++ __MODULE_STRING(WDAT_DEFAULT_TIMEOUT) ")"); ++ + static int wdat_wdt_read(struct wdat_wdt *wdat, + const struct wdat_instruction *instr, u32 *value) + { +@@ -438,6 +445,22 @@ static int wdat_wdt_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, wdat); + ++ /* ++ * Set initial timeout so that userspace has time to configure the ++ * watchdog properly after it has opened the device. In some cases ++ * the BIOS default is too short and causes immediate reboot. ++ */ ++ if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms || ++ timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) { ++ dev_warn(dev, "Invalid timeout %d given, using %d\n", ++ timeout, WDAT_DEFAULT_TIMEOUT); ++ timeout = WDAT_DEFAULT_TIMEOUT; ++ } ++ ++ ret = wdat_wdt_set_timeout(&wdat->wdd, timeout); ++ if (ret) ++ return ret; ++ + watchdog_set_nowayout(&wdat->wdd, nowayout); + return devm_watchdog_register_device(dev, &wdat->wdd); + } +-- +2.20.1 + diff --git a/queue-5.5/blk-mq-insert-passthrough-request-into-hctx-dispatch.patch b/queue-5.5/blk-mq-insert-passthrough-request-into-hctx-dispatch.patch new file mode 100644 index 00000000000..613926b96ce --- /dev/null +++ b/queue-5.5/blk-mq-insert-passthrough-request-into-hctx-dispatch.patch @@ -0,0 +1,181 @@ +From ce401416e57670279cb4c30863ce08e6c9075396 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Feb 2020 09:04:32 +0800 +Subject: blk-mq: insert passthrough request into hctx->dispatch directly + +From: Ming Lei + +[ Upstream commit 01e99aeca3979600302913cef3f89076786f32c8 ] + +For some reason, device may be in one situation which can't handle +FS request, so STS_RESOURCE is always returned and the FS request +will be added to hctx->dispatch. However passthrough request may +be required at that time for fixing the problem. If passthrough +request is added to scheduler queue, there isn't any chance for +blk-mq to dispatch it given we prioritize requests in hctx->dispatch. +Then the FS IO request may never be completed, and IO hang is caused. + +So passthrough request has to be added to hctx->dispatch directly +for fixing the IO hang. + +Fix this issue by inserting passthrough request into hctx->dispatch +directly together withing adding FS request to the tail of +hctx->dispatch in blk_mq_dispatch_rq_list(). Actually we add FS request +to tail of hctx->dispatch at default, see blk_mq_request_bypass_insert(). + +Then it becomes consistent with original legacy IO request +path, in which passthrough request is always added to q->queue_head. + +Cc: Dongli Zhang +Cc: Christoph Hellwig +Cc: Ewan D. Milne +Signed-off-by: Ming Lei +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/blk-flush.c | 2 +- + block/blk-mq-sched.c | 22 +++++++++++++++------- + block/blk-mq.c | 18 +++++++++++------- + block/blk-mq.h | 3 ++- + 4 files changed, 29 insertions(+), 16 deletions(-) + +diff --git a/block/blk-flush.c b/block/blk-flush.c +index 3f977c517960e..5cc775bdb06ac 100644 +--- a/block/blk-flush.c ++++ b/block/blk-flush.c +@@ -412,7 +412,7 @@ void blk_insert_flush(struct request *rq) + */ + if ((policy & REQ_FSEQ_DATA) && + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { +- blk_mq_request_bypass_insert(rq, false); ++ blk_mq_request_bypass_insert(rq, false, false); + return; + } + +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c +index ca22afd47b3dc..856356b1619e8 100644 +--- a/block/blk-mq-sched.c ++++ b/block/blk-mq-sched.c +@@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, + bool has_sched, + struct request *rq) + { +- /* dispatch flush rq directly */ +- if (rq->rq_flags & RQF_FLUSH_SEQ) { +- spin_lock(&hctx->lock); +- list_add(&rq->queuelist, &hctx->dispatch); +- spin_unlock(&hctx->lock); ++ /* ++ * dispatch flush and passthrough rq directly ++ * ++ * passthrough request has to be added to hctx->dispatch directly. ++ * For some reason, device may be in one situation which can't ++ * handle FS request, so STS_RESOURCE is always returned and the ++ * FS request will be added to hctx->dispatch. However passthrough ++ * request may be required at that time for fixing the problem. If ++ * passthrough request is added to scheduler queue, there isn't any ++ * chance to dispatch it given we prioritize requests in hctx->dispatch. ++ */ ++ if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) + return true; +- } + + if (has_sched) + rq->rq_flags |= RQF_SORTED; +@@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, + + WARN_ON(e && (rq->tag != -1)); + +- if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) ++ if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { ++ blk_mq_request_bypass_insert(rq, at_head, false); + goto run; ++ } + + if (e && e->type->ops.insert_requests) { + LIST_HEAD(list); +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 323c9cb28066b..329df7986bf60 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -727,7 +727,7 @@ static void blk_mq_requeue_work(struct work_struct *work) + * merge. + */ + if (rq->rq_flags & RQF_DONTPREP) +- blk_mq_request_bypass_insert(rq, false); ++ blk_mq_request_bypass_insert(rq, false, false); + else + blk_mq_sched_insert_request(rq, true, false, false); + } +@@ -1278,7 +1278,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, + q->mq_ops->commit_rqs(hctx); + + spin_lock(&hctx->lock); +- list_splice_init(list, &hctx->dispatch); ++ list_splice_tail_init(list, &hctx->dispatch); + spin_unlock(&hctx->lock); + + /* +@@ -1629,12 +1629,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, + * Should only be used carefully, when the caller knows we want to + * bypass a potential IO scheduler on the target device. + */ +-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) ++void blk_mq_request_bypass_insert(struct request *rq, bool at_head, ++ bool run_queue) + { + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; + + spin_lock(&hctx->lock); +- list_add_tail(&rq->queuelist, &hctx->dispatch); ++ if (at_head) ++ list_add(&rq->queuelist, &hctx->dispatch); ++ else ++ list_add_tail(&rq->queuelist, &hctx->dispatch); + spin_unlock(&hctx->lock); + + if (run_queue) +@@ -1824,7 +1828,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, + if (bypass_insert) + return BLK_STS_RESOURCE; + +- blk_mq_request_bypass_insert(rq, run_queue); ++ blk_mq_request_bypass_insert(rq, false, run_queue); + return BLK_STS_OK; + } + +@@ -1840,7 +1844,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, + + ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) +- blk_mq_request_bypass_insert(rq, true); ++ blk_mq_request_bypass_insert(rq, false, true); + else if (ret != BLK_STS_OK) + blk_mq_end_request(rq, ret); + +@@ -1874,7 +1878,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, + if (ret != BLK_STS_OK) { + if (ret == BLK_STS_RESOURCE || + ret == BLK_STS_DEV_RESOURCE) { +- blk_mq_request_bypass_insert(rq, ++ blk_mq_request_bypass_insert(rq, false, + list_empty(list)); + break; + } +diff --git a/block/blk-mq.h b/block/blk-mq.h +index eaaca8fc1c287..c0fa34378eb2f 100644 +--- a/block/blk-mq.h ++++ b/block/blk-mq.h +@@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, + */ + void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, + bool at_head); +-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); ++void blk_mq_request_bypass_insert(struct request *rq, bool at_head, ++ bool run_queue); + void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, + struct list_head *list); + +-- +2.20.1 + diff --git a/queue-5.5/cfg80211-check-reg_rule-for-null-in-handle_channel_c.patch b/queue-5.5/cfg80211-check-reg_rule-for-null-in-handle_channel_c.patch new file mode 100644 index 00000000000..f295d7e4ca4 --- /dev/null +++ b/queue-5.5/cfg80211-check-reg_rule-for-null-in-handle_channel_c.patch @@ -0,0 +1,37 @@ +From fbc9e57981df2139f11914205030ecbd3aa7ec08 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 10:44:50 +0100 +Subject: cfg80211: check reg_rule for NULL in handle_channel_custom() + +From: Johannes Berg + +[ Upstream commit a7ee7d44b57c9ae174088e53a668852b7f4f452d ] + +We may end up with a NULL reg_rule after the loop in +handle_channel_custom() if the bandwidth didn't fit, +check if this is the case and bail out if so. + +Signed-off-by: Johannes Berg +Link: https://lore.kernel.org/r/20200221104449.3b558a50201c.I4ad3725c4dacaefd2d18d3cc65ba6d18acd5dbfe@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/wireless/reg.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index fff9a74891fc4..1a8218f1bbe07 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -2276,7 +2276,7 @@ static void handle_channel_custom(struct wiphy *wiphy, + break; + } + +- if (IS_ERR(reg_rule)) { ++ if (IS_ERR_OR_NULL(reg_rule)) { + pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n", + chan->center_freq); + if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { +-- +2.20.1 + diff --git a/queue-5.5/drm-amdgpu-fix-memory-leak-during-tdr-test-v2.patch b/queue-5.5/drm-amdgpu-fix-memory-leak-during-tdr-test-v2.patch new file mode 100644 index 00000000000..72b8a9b204c --- /dev/null +++ b/queue-5.5/drm-amdgpu-fix-memory-leak-during-tdr-test-v2.patch @@ -0,0 +1,43 @@ +From 6a1ef2dab877553e52ecb1fef6332965eff4b6e7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 8 Feb 2020 19:01:21 +0800 +Subject: drm/amdgpu: fix memory leak during TDR test(v2) + +From: Monk Liu + +[ Upstream commit 4829f89855f1d3a3d8014e74cceab51b421503db ] + +fix system memory leak + +v2: +fix coding style + +Signed-off-by: Monk Liu +Reviewed-by: Hawking Zhang +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +index 8b13d18c6414c..e4149e6b68b39 100644 +--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c ++++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +@@ -948,8 +948,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) + struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; + int ret = 0; + +- max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), ++ if (!smu->smu_table.max_sustainable_clocks) ++ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), + GFP_KERNEL); ++ else ++ max_sustainable_clocks = smu->smu_table.max_sustainable_clocks; ++ + smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks; + + max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; +-- +2.20.1 + diff --git a/queue-5.5/hid-apple-add-support-for-recent-firmware-on-magic-k.patch b/queue-5.5/hid-apple-add-support-for-recent-firmware-on-magic-k.patch new file mode 100644 index 00000000000..1975de88816 --- /dev/null +++ b/queue-5.5/hid-apple-add-support-for-recent-firmware-on-magic-k.patch @@ -0,0 +1,37 @@ +From 8fbc3543778879bb48dd24945b6bd03e07fdabd4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 29 Jan 2020 17:26:31 +1100 +Subject: HID: apple: Add support for recent firmware on Magic Keyboards + +From: Mansour Behabadi + +[ Upstream commit e433be929e63265b7412478eb7ff271467aee2d7 ] + +Magic Keyboards with more recent firmware (0x0100) report Fn key differently. +Without this patch, Fn key may not behave as expected and may not be +configurable via hid_apple fnmode module parameter. + +Signed-off-by: Mansour Behabadi +Signed-off-by: Jiri Kosina +Signed-off-by: Sasha Levin +--- + drivers/hid/hid-apple.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 6ac8becc2372e..d732d1d10cafb 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -340,7 +340,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, + unsigned long **bit, int *max) + { + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || +- usage->hid == (HID_UP_MSVENDOR | 0x0003)) { ++ usage->hid == (HID_UP_MSVENDOR | 0x0003) || ++ usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { + /* The fn key on Apple USB keyboards */ + set_bit(EV_REP, hi->input->evbit); + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); +-- +2.20.1 + diff --git a/queue-5.5/hid-hid-bigbenff-call-hid_hw_stop-in-case-of-error.patch b/queue-5.5/hid-hid-bigbenff-call-hid_hw_stop-in-case-of-error.patch new file mode 100644 index 00000000000..ea34783a71c --- /dev/null +++ b/queue-5.5/hid-hid-bigbenff-call-hid_hw_stop-in-case-of-error.patch @@ -0,0 +1,77 @@ +From 612db9f706200567feaa1e4284681e9b8d1bdbe9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 18 Feb 2020 12:38:34 +0100 +Subject: HID: hid-bigbenff: call hid_hw_stop() in case of error + +From: Hanno Zulla + +[ Upstream commit 976a54d0f4202cb412a3b1fc7f117e1d97db35f3 ] + +It's required to call hid_hw_stop() once hid_hw_start() was called +previously, so error cases need to handle this. Also, hid_hw_close() is +not necessary during removal. + +Signed-off-by: Hanno Zulla +Signed-off-by: Benjamin Tissoires +Signed-off-by: Sasha Levin +--- + drivers/hid/hid-bigbenff.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c +index f7e85bacb6889..f8c552b64a899 100644 +--- a/drivers/hid/hid-bigbenff.c ++++ b/drivers/hid/hid-bigbenff.c +@@ -305,7 +305,6 @@ static void bigben_remove(struct hid_device *hid) + struct bigben_device *bigben = hid_get_drvdata(hid); + + cancel_work_sync(&bigben->worker); +- hid_hw_close(hid); + hid_hw_stop(hid); + } + +@@ -350,7 +349,7 @@ static int bigben_probe(struct hid_device *hid, + error = input_ff_create_memless(hidinput->input, NULL, + hid_bigben_play_effect); + if (error) +- return error; ++ goto error_hw_stop; + + name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1; + +@@ -360,8 +359,10 @@ static int bigben_probe(struct hid_device *hid, + sizeof(struct led_classdev) + name_sz, + GFP_KERNEL + ); +- if (!led) +- return -ENOMEM; ++ if (!led) { ++ error = -ENOMEM; ++ goto error_hw_stop; ++ } + name = (void *)(&led[1]); + snprintf(name, name_sz, + "%s:red:bigben%d", +@@ -375,7 +376,7 @@ static int bigben_probe(struct hid_device *hid, + bigben->leds[n] = led; + error = devm_led_classdev_register(&hid->dev, led); + if (error) +- return error; ++ goto error_hw_stop; + } + + /* initial state: LED1 is on, no rumble effect */ +@@ -389,6 +390,10 @@ static int bigben_probe(struct hid_device *hid, + hid_info(hid, "LED and force feedback support for BigBen gamepad\n"); + + return 0; ++ ++error_hw_stop: ++ hid_hw_stop(hid); ++ return error; + } + + static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, +-- +2.20.1 + diff --git a/queue-5.5/hid-hid-bigbenff-fix-general-protection-fault-caused.patch b/queue-5.5/hid-hid-bigbenff-fix-general-protection-fault-caused.patch new file mode 100644 index 00000000000..b932152efbc --- /dev/null +++ b/queue-5.5/hid-hid-bigbenff-fix-general-protection-fault-caused.patch @@ -0,0 +1,56 @@ +From 4246c0279485de3165ed780df881d3b889daa520 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 18 Feb 2020 12:37:47 +0100 +Subject: HID: hid-bigbenff: fix general protection fault caused by double + kfree + +From: Hanno Zulla + +[ Upstream commit 789a2c250340666220fa74bc6c8f58497e3863b3 ] + +The struct *bigben was allocated via devm_kzalloc() and then used as a +parameter in input_ff_create_memless(). This caused a double kfree +during removal of the device, since both the managed resource API and +ml_ff_destroy() in drivers/input/ff-memless.c would call kfree() on it. + +Signed-off-by: Hanno Zulla +Signed-off-by: Benjamin Tissoires +Signed-off-by: Sasha Levin +--- + drivers/hid/hid-bigbenff.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c +index 3f6abd190df43..f7e85bacb6889 100644 +--- a/drivers/hid/hid-bigbenff.c ++++ b/drivers/hid/hid-bigbenff.c +@@ -220,10 +220,16 @@ static void bigben_worker(struct work_struct *work) + static int hid_bigben_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) + { +- struct bigben_device *bigben = data; ++ struct hid_device *hid = input_get_drvdata(dev); ++ struct bigben_device *bigben = hid_get_drvdata(hid); + u8 right_motor_on; + u8 left_motor_force; + ++ if (!bigben) { ++ hid_err(hid, "no device data\n"); ++ return 0; ++ } ++ + if (effect->type != FF_RUMBLE) + return 0; + +@@ -341,7 +347,7 @@ static int bigben_probe(struct hid_device *hid, + + INIT_WORK(&bigben->worker, bigben_worker); + +- error = input_ff_create_memless(hidinput->input, bigben, ++ error = input_ff_create_memless(hidinput->input, NULL, + hid_bigben_play_effect); + if (error) + return error; +-- +2.20.1 + diff --git a/queue-5.5/hid-hid-bigbenff-fix-race-condition-for-scheduled-wo.patch b/queue-5.5/hid-hid-bigbenff-fix-race-condition-for-scheduled-wo.patch new file mode 100644 index 00000000000..14dca2b2531 --- /dev/null +++ b/queue-5.5/hid-hid-bigbenff-fix-race-condition-for-scheduled-wo.patch @@ -0,0 +1,62 @@ +From 22aae3bed1b1102c0c60bf89375d0bcac39efc44 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 18 Feb 2020 12:39:31 +0100 +Subject: HID: hid-bigbenff: fix race condition for scheduled work during + removal + +From: Hanno Zulla + +[ Upstream commit 4eb1b01de5b9d8596d6c103efcf1a15cfc1bedf7 ] + +It's possible that there is scheduled work left while the device is +already being removed, which can cause a kernel crash. Adding a flag +will avoid this. + +Signed-off-by: Hanno Zulla +Signed-off-by: Benjamin Tissoires +Signed-off-by: Sasha Levin +--- + drivers/hid/hid-bigbenff.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c +index f8c552b64a899..db6da21ade063 100644 +--- a/drivers/hid/hid-bigbenff.c ++++ b/drivers/hid/hid-bigbenff.c +@@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = { + struct bigben_device { + struct hid_device *hid; + struct hid_report *report; ++ bool removed; + u8 led_state; /* LED1 = 1 .. LED4 = 8 */ + u8 right_motor_on; /* right motor off/on 0/1 */ + u8 left_motor_force; /* left motor force 0-255 */ +@@ -190,6 +191,9 @@ static void bigben_worker(struct work_struct *work) + struct bigben_device, worker); + struct hid_field *report_field = bigben->report->field[0]; + ++ if (bigben->removed) ++ return; ++ + if (bigben->work_led) { + bigben->work_led = false; + report_field->value[0] = 0x01; /* 1 = led message */ +@@ -304,6 +308,7 @@ static void bigben_remove(struct hid_device *hid) + { + struct bigben_device *bigben = hid_get_drvdata(hid); + ++ bigben->removed = true; + cancel_work_sync(&bigben->worker); + hid_hw_stop(hid); + } +@@ -324,6 +329,7 @@ static int bigben_probe(struct hid_device *hid, + return -ENOMEM; + hid_set_drvdata(hid, bigben); + bigben->hid = hid; ++ bigben->removed = false; + + error = hid_parse(hid); + if (error) { +-- +2.20.1 + diff --git a/queue-5.5/hid-i2c-hid-add-trekstor-surfbook-e11b-to-descriptor.patch b/queue-5.5/hid-i2c-hid-add-trekstor-surfbook-e11b-to-descriptor.patch new file mode 100644 index 00000000000..a348f33dd21 --- /dev/null +++ b/queue-5.5/hid-i2c-hid-add-trekstor-surfbook-e11b-to-descriptor.patch @@ -0,0 +1,43 @@ +From c4c00f59b7510345ea9faccf3f8fcde3f5a9bbc2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 14 Feb 2020 14:53:07 +0800 +Subject: HID: i2c-hid: add Trekstor Surfbook E11B to descriptor override + +From: Kai-Heng Feng + +[ Upstream commit be0aba826c4a6ba5929def1962a90d6127871969 ] + +The Surfbook E11B uses the SIPODEV SP1064 touchpad, which does not supply +descriptors, so it has to be added to the override list. + +BugLink: https://bugs.launchpad.net/bugs/1858299 +Signed-off-by: Kai-Heng Feng +Reviewed-by: Hans de Goede +Signed-off-by: Benjamin Tissoires +Signed-off-by: Sasha Levin +--- + drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +index d31ea82b84c17..a66f08041a1aa 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +@@ -341,6 +341,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + }, + .driver_data = (void *)&sipodev_desc + }, ++ { ++ .ident = "Trekstor SURFBOOK E11B", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"), ++ }, ++ .driver_data = (void *)&sipodev_desc ++ }, + { + .ident = "Direkt-Tek DTLAPY116-2", + .matches = { +-- +2.20.1 + diff --git a/queue-5.5/hinic-fix-a-bug-of-rss-configuration.patch b/queue-5.5/hinic-fix-a-bug-of-rss-configuration.patch new file mode 100644 index 00000000000..a9ca5c7cb4b --- /dev/null +++ b/queue-5.5/hinic-fix-a-bug-of-rss-configuration.patch @@ -0,0 +1,36 @@ +From 47fea2edd9ff5453d801abebbc96d8f509b05f3b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 06:34:44 +0000 +Subject: hinic: fix a bug of rss configuration + +From: Luo bin + +[ Upstream commit 386d4716fd91869e07c731657f2cde5a33086516 ] + +should use real receive queue number to configure hw rss +indirect table rather than maximal queue number + +Signed-off-by: Luo bin +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/huawei/hinic/hinic_main.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c +index 2411ad270c98e..42d00b049c6e8 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c +@@ -356,7 +356,8 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev) + if (!num_cpus) + num_cpus = num_online_cpus(); + +- nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus); ++ nic_dev->num_qps = hinic_hwdev_num_qps(hwdev); ++ nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); + + nic_dev->rss_limit = nic_dev->num_qps; + nic_dev->num_rss = nic_dev->num_qps; +-- +2.20.1 + diff --git a/queue-5.5/hinic-fix-a-bug-of-setting-hw_ioctxt.patch b/queue-5.5/hinic-fix-a-bug-of-setting-hw_ioctxt.patch new file mode 100644 index 00000000000..f59e1f8a097 --- /dev/null +++ b/queue-5.5/hinic-fix-a-bug-of-setting-hw_ioctxt.patch @@ -0,0 +1,63 @@ +From 3e8f1a53c630ec7fcf9b169fad3fd1cba729f968 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 06:34:43 +0000 +Subject: hinic: fix a bug of setting hw_ioctxt + +From: Luo bin + +[ Upstream commit d2ed69ce9ed3477e2a9527e6b89fe4689d99510e ] + +a reserved field is used to signify prime physical function index +in the latest firmware version, so we must assign a value to it +correctly + +Signed-off-by: Luo bin +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 1 + + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 2 +- + drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 1 + + 3 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +index 6f2cf569a283c..79b3d53f2fbfa 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +@@ -297,6 +297,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, + } + + hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); ++ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); + + hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; + hw_ioctxt.cmdq_depth = 0; +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +index b069045de416c..66fd2340d4479 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +@@ -151,8 +151,8 @@ struct hinic_cmd_hw_ioctxt { + + u8 lro_en; + u8 rsvd3; ++ u8 ppf_idx; + u8 rsvd4; +- u8 rsvd5; + + u16 rq_depth; + u16 rx_buf_sz_idx; +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +index 517794509eb29..c7bb9ceca72ca 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +@@ -137,6 +137,7 @@ + #define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) + #define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) + #define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx) ++#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) + + #define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) + #define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) +-- +2.20.1 + diff --git a/queue-5.5/hinic-fix-a-irq-affinity-bug.patch b/queue-5.5/hinic-fix-a-irq-affinity-bug.patch new file mode 100644 index 00000000000..f409f32d1ce --- /dev/null +++ b/queue-5.5/hinic-fix-a-irq-affinity-bug.patch @@ -0,0 +1,58 @@ +From 63ce242114cf835b4888fba47ce74dd26137c764 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 06:34:42 +0000 +Subject: hinic: fix a irq affinity bug + +From: Luo bin + +[ Upstream commit 0bff777bd0cba73ad4cd0145696ad284d7e6a99f ] + +can not use a local variable as an input parameter of +irq_set_affinity_hint + +Signed-off-by: Luo bin +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 1 + + drivers/net/ethernet/huawei/hinic/hinic_rx.c | 5 ++--- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +index f4a339b10b10b..79091e1314181 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +@@ -94,6 +94,7 @@ struct hinic_rq { + + struct hinic_wq *wq; + ++ struct cpumask affinity_mask; + u32 irq; + u16 msix_entry; + +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c +index 56ea6d692f1c3..2695ad69fca60 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c +@@ -475,7 +475,6 @@ static int rx_request_irq(struct hinic_rxq *rxq) + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_rq *rq = rxq->rq; + struct hinic_qp *qp; +- struct cpumask mask; + int err; + + rx_add_napi(rxq); +@@ -492,8 +491,8 @@ static int rx_request_irq(struct hinic_rxq *rxq) + } + + qp = container_of(rq, struct hinic_qp, rq); +- cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask); +- return irq_set_affinity_hint(rq->irq, &mask); ++ cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); ++ return irq_set_affinity_hint(rq->irq, &rq->affinity_mask); + } + + static void rx_free_irq(struct hinic_rxq *rxq) +-- +2.20.1 + diff --git a/queue-5.5/io_uring-fix-poll_list-race-for-setup_iopoll-setup_s.patch b/queue-5.5/io_uring-fix-poll_list-race-for-setup_iopoll-setup_s.patch new file mode 100644 index 00000000000..e804c17dba8 --- /dev/null +++ b/queue-5.5/io_uring-fix-poll_list-race-for-setup_iopoll-setup_s.patch @@ -0,0 +1,161 @@ +From ecd25e9b665f96e9d32f8fc006e61dbfebb49b08 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Feb 2020 22:12:08 +0800 +Subject: io_uring: fix poll_list race for SETUP_IOPOLL|SETUP_SQPOLL + +From: Xiaoguang Wang + +[ Upstream commit bdcd3eab2a9ae0ac93f27275b6895dd95e5bf360 ] + +After making ext4 support iopoll method: + let ext4_file_operations's iopoll method be iomap_dio_iopoll(), +we found fio can easily hang in fio_ioring_getevents() with below fio +job: + rm -f testfile; sync; + sudo fio -name=fiotest -filename=testfile -iodepth=128 -thread +-rw=write -ioengine=io_uring -hipri=1 -sqthread_poll=1 -direct=1 +-bs=4k -size=10G -numjobs=8 -runtime=2000 -group_reporting +with IORING_SETUP_SQPOLL and IORING_SETUP_IOPOLL enabled. + +There are two issues that results in this hang, one reason is that +when IORING_SETUP_SQPOLL and IORING_SETUP_IOPOLL are enabled, fio +does not use io_uring_enter to get completed events, it relies on +kernel io_sq_thread to poll for completed events. + +Another reason is that there is a race: when io_submit_sqes() in +io_sq_thread() submits a batch of sqes, variable 'inflight' will +record the number of submitted reqs, then io_sq_thread will poll for +reqs which have been added to poll_list. But note, if some previous +reqs have been punted to io worker, these reqs will won't be in +poll_list timely. io_sq_thread() will only poll for a part of previous +submitted reqs, and then find poll_list is empty, reset variable +'inflight' to be zero. If app just waits these deferred reqs and does +not wake up io_sq_thread again, then hang happens. + +For app that entirely relies on io_sq_thread to poll completed requests, +let io_iopoll_req_issued() wake up io_sq_thread properly when adding new +element to poll_list, and when io_sq_thread prepares to sleep, check +whether poll_list is empty again, if not empty, continue to poll. + +Signed-off-by: Xiaoguang Wang +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + fs/io_uring.c | 59 +++++++++++++++++++++++---------------------------- + 1 file changed, 27 insertions(+), 32 deletions(-) + +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 60a4832089982..c8f8cc2463986 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -1435,6 +1435,10 @@ static void io_iopoll_req_issued(struct io_kiocb *req) + list_add(&req->list, &ctx->poll_list); + else + list_add_tail(&req->list, &ctx->poll_list); ++ ++ if ((ctx->flags & IORING_SETUP_SQPOLL) && ++ wq_has_sleeper(&ctx->sqo_wait)) ++ wake_up(&ctx->sqo_wait); + } + + static void io_file_put(struct io_submit_state *state) +@@ -3857,9 +3861,8 @@ static int io_sq_thread(void *data) + const struct cred *old_cred; + mm_segment_t old_fs; + DEFINE_WAIT(wait); +- unsigned inflight; + unsigned long timeout; +- int ret; ++ int ret = 0; + + complete(&ctx->completions[1]); + +@@ -3867,39 +3870,19 @@ static int io_sq_thread(void *data) + set_fs(USER_DS); + old_cred = override_creds(ctx->creds); + +- ret = timeout = inflight = 0; ++ timeout = jiffies + ctx->sq_thread_idle; + while (!kthread_should_park()) { + unsigned int to_submit; + +- if (inflight) { ++ if (!list_empty(&ctx->poll_list)) { + unsigned nr_events = 0; + +- if (ctx->flags & IORING_SETUP_IOPOLL) { +- /* +- * inflight is the count of the maximum possible +- * entries we submitted, but it can be smaller +- * if we dropped some of them. If we don't have +- * poll entries available, then we know that we +- * have nothing left to poll for. Reset the +- * inflight count to zero in that case. +- */ +- mutex_lock(&ctx->uring_lock); +- if (!list_empty(&ctx->poll_list)) +- io_iopoll_getevents(ctx, &nr_events, 0); +- else +- inflight = 0; +- mutex_unlock(&ctx->uring_lock); +- } else { +- /* +- * Normal IO, just pretend everything completed. +- * We don't have to poll completions for that. +- */ +- nr_events = inflight; +- } +- +- inflight -= nr_events; +- if (!inflight) ++ mutex_lock(&ctx->uring_lock); ++ if (!list_empty(&ctx->poll_list)) ++ io_iopoll_getevents(ctx, &nr_events, 0); ++ else + timeout = jiffies + ctx->sq_thread_idle; ++ mutex_unlock(&ctx->uring_lock); + } + + to_submit = io_sqring_entries(ctx); +@@ -3928,7 +3911,7 @@ static int io_sq_thread(void *data) + * more IO, we should wait for the application to + * reap events and wake us up. + */ +- if (inflight || ++ if (!list_empty(&ctx->poll_list) || + (!time_after(jiffies, timeout) && ret != -EBUSY && + !percpu_ref_is_dying(&ctx->refs))) { + cond_resched(); +@@ -3938,6 +3921,19 @@ static int io_sq_thread(void *data) + prepare_to_wait(&ctx->sqo_wait, &wait, + TASK_INTERRUPTIBLE); + ++ /* ++ * While doing polled IO, before going to sleep, we need ++ * to check if there are new reqs added to poll_list, it ++ * is because reqs may have been punted to io worker and ++ * will be added to poll_list later, hence check the ++ * poll_list again. ++ */ ++ if ((ctx->flags & IORING_SETUP_IOPOLL) && ++ !list_empty_careful(&ctx->poll_list)) { ++ finish_wait(&ctx->sqo_wait, &wait); ++ continue; ++ } ++ + /* Tell userspace we may need a wakeup call */ + ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; + /* make sure to read SQ tail after writing flags */ +@@ -3966,8 +3962,7 @@ static int io_sq_thread(void *data) + mutex_lock(&ctx->uring_lock); + ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true); + mutex_unlock(&ctx->uring_lock); +- if (ret > 0) +- inflight += ret; ++ timeout = jiffies + ctx->sq_thread_idle; + } + + set_fs(old_fs); +-- +2.20.1 + diff --git a/queue-5.5/io_uring-pick-up-link-work-on-submit-reference-drop.patch b/queue-5.5/io_uring-pick-up-link-work-on-submit-reference-drop.patch new file mode 100644 index 00000000000..0265e6b174c --- /dev/null +++ b/queue-5.5/io_uring-pick-up-link-work-on-submit-reference-drop.patch @@ -0,0 +1,58 @@ +From 090087c4facf5b56532cb2e7f98cf0448bd7adaf Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Feb 2020 13:25:41 -0700 +Subject: io_uring: pick up link work on submit reference drop + +From: Jens Axboe + +[ Upstream commit 2a44f46781617c5040372b59da33553a02b1f46d ] + +If work completes inline, then we should pick up a dependent link item +in __io_queue_sqe() as well. If we don't do so, we're forced to go async +with that item, which is suboptimal. + +This also fixes an issue with io_put_req_find_next(), which always looks +up the next work item. That should only be done if we're dropping the +last reference to the request, to prevent multiple lookups of the same +work item. + +Outside of being a fix, this also enables a good cleanup series for 5.7, +where we never have to pass 'nxt' around or into the work handlers. + +Reviewed-by: Pavel Begunkov +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + fs/io_uring.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/fs/io_uring.c b/fs/io_uring.c +index c8f8cc2463986..2547c6395d5e4 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -1099,10 +1099,10 @@ static void io_free_req(struct io_kiocb *req) + __attribute__((nonnull)) + static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr) + { +- io_req_find_next(req, nxtptr); +- +- if (refcount_dec_and_test(&req->refs)) ++ if (refcount_dec_and_test(&req->refs)) { ++ io_req_find_next(req, nxtptr); + __io_free_req(req); ++ } + } + + static void io_put_req(struct io_kiocb *req) +@@ -3569,7 +3569,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) + + err: + /* drop submission reference */ +- io_put_req(req); ++ io_put_req_find_next(req, &nxt); + + if (linked_timeout) { + if (!ret) +-- +2.20.1 + diff --git a/queue-5.5/jbd2-fix-data-races-at-struct-journal_head.patch b/queue-5.5/jbd2-fix-data-races-at-struct-journal_head.patch new file mode 100644 index 00000000000..2101b3279d4 --- /dev/null +++ b/queue-5.5/jbd2-fix-data-races-at-struct-journal_head.patch @@ -0,0 +1,110 @@ +From 4e3a0980e2b1d5708d602a38b717e10afc2c72cc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 23:31:11 -0500 +Subject: jbd2: fix data races at struct journal_head + +From: Qian Cai + +[ Upstream commit 6c5d911249290f41f7b50b43344a7520605b1acb ] + +journal_head::b_transaction and journal_head::b_next_transaction could +be accessed concurrently as noticed by KCSAN, + + LTP: starting fsync04 + /dev/zero: Can't open blockdev + EXT4-fs (loop0): mounting ext3 file system using the ext4 subsystem + EXT4-fs (loop0): mounted filesystem with ordered data mode. Opts: (null) + ================================================================== + BUG: KCSAN: data-race in __jbd2_journal_refile_buffer [jbd2] / jbd2_write_access_granted [jbd2] + + write to 0xffff99f9b1bd0e30 of 8 bytes by task 25721 on cpu 70: + __jbd2_journal_refile_buffer+0xdd/0x210 [jbd2] + __jbd2_journal_refile_buffer at fs/jbd2/transaction.c:2569 + jbd2_journal_commit_transaction+0x2d15/0x3f20 [jbd2] + (inlined by) jbd2_journal_commit_transaction at fs/jbd2/commit.c:1034 + kjournald2+0x13b/0x450 [jbd2] + kthread+0x1cd/0x1f0 + ret_from_fork+0x27/0x50 + + read to 0xffff99f9b1bd0e30 of 8 bytes by task 25724 on cpu 68: + jbd2_write_access_granted+0x1b2/0x250 [jbd2] + jbd2_write_access_granted at fs/jbd2/transaction.c:1155 + jbd2_journal_get_write_access+0x2c/0x60 [jbd2] + __ext4_journal_get_write_access+0x50/0x90 [ext4] + ext4_mb_mark_diskspace_used+0x158/0x620 [ext4] + ext4_mb_new_blocks+0x54f/0xca0 [ext4] + ext4_ind_map_blocks+0xc79/0x1b40 [ext4] + ext4_map_blocks+0x3b4/0x950 [ext4] + _ext4_get_block+0xfc/0x270 [ext4] + ext4_get_block+0x3b/0x50 [ext4] + __block_write_begin_int+0x22e/0xae0 + __block_write_begin+0x39/0x50 + ext4_write_begin+0x388/0xb50 [ext4] + generic_perform_write+0x15d/0x290 + ext4_buffered_write_iter+0x11f/0x210 [ext4] + ext4_file_write_iter+0xce/0x9e0 [ext4] + new_sync_write+0x29c/0x3b0 + __vfs_write+0x92/0xa0 + vfs_write+0x103/0x260 + ksys_write+0x9d/0x130 + __x64_sys_write+0x4c/0x60 + do_syscall_64+0x91/0xb05 + entry_SYSCALL_64_after_hwframe+0x49/0xbe + + 5 locks held by fsync04/25724: + #0: ffff99f9911093f8 (sb_writers#13){.+.+}, at: vfs_write+0x21c/0x260 + #1: ffff99f9db4c0348 (&sb->s_type->i_mutex_key#15){+.+.}, at: ext4_buffered_write_iter+0x65/0x210 [ext4] + #2: ffff99f5e7dfcf58 (jbd2_handle){++++}, at: start_this_handle+0x1c1/0x9d0 [jbd2] + #3: ffff99f9db4c0168 (&ei->i_data_sem){++++}, at: ext4_map_blocks+0x176/0x950 [ext4] + #4: ffffffff99086b40 (rcu_read_lock){....}, at: jbd2_write_access_granted+0x4e/0x250 [jbd2] + irq event stamp: 1407125 + hardirqs last enabled at (1407125): [] __find_get_block+0x107/0x790 + hardirqs last disabled at (1407124): [] __find_get_block+0x49/0x790 + softirqs last enabled at (1405528): [] __do_softirq+0x34c/0x57c + softirqs last disabled at (1405521): [] irq_exit+0xa2/0xc0 + + Reported by Kernel Concurrency Sanitizer on: + CPU: 68 PID: 25724 Comm: fsync04 Tainted: G L 5.6.0-rc2-next-20200221+ #7 + Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019 + +The plain reads are outside of jh->b_state_lock critical section which result +in data races. Fix them by adding pairs of READ|WRITE_ONCE(). + +Reviewed-by: Jan Kara +Signed-off-by: Qian Cai +Link: https://lore.kernel.org/r/20200222043111.2227-1-cai@lca.pw +Signed-off-by: Theodore Ts'o +Signed-off-by: Sasha Levin +--- + fs/jbd2/transaction.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index ab1078e85a58e..295e3cdb4461b 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1150,8 +1150,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh, + /* For undo access buffer must have data copied */ + if (undo && !jh->b_committed_data) + goto out; +- if (jh->b_transaction != handle->h_transaction && +- jh->b_next_transaction != handle->h_transaction) ++ if (READ_ONCE(jh->b_transaction) != handle->h_transaction && ++ READ_ONCE(jh->b_next_transaction) != handle->h_transaction) + goto out; + /* + * There are two reasons for the barrier here: +@@ -2569,8 +2569,8 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh) + * our jh reference and thus __jbd2_journal_file_buffer() must not + * take a new one. + */ +- jh->b_transaction = jh->b_next_transaction; +- jh->b_next_transaction = NULL; ++ WRITE_ONCE(jh->b_transaction, jh->b_next_transaction); ++ WRITE_ONCE(jh->b_next_transaction, NULL); + if (buffer_freed(bh)) + jlist = BJ_Forget; + else if (jh->b_modified) +-- +2.20.1 + diff --git a/queue-5.5/kbuild-add-dt_binding_check-to-phony-in-a-correct-pl.patch b/queue-5.5/kbuild-add-dt_binding_check-to-phony-in-a-correct-pl.patch new file mode 100644 index 00000000000..4ae98ddd12b --- /dev/null +++ b/queue-5.5/kbuild-add-dt_binding_check-to-phony-in-a-correct-pl.patch @@ -0,0 +1,46 @@ +From 6a9d751478b268cdaa36c95a3fa7464a6b86ecf9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 23 Feb 2020 04:04:34 +0900 +Subject: kbuild: add dt_binding_check to PHONY in a correct place + +From: Masahiro Yamada + +[ Upstream commit c473a8d03ea8e03ca9d649b0b6d72fbcf6212c05 ] + +The dt_binding_check is added to PHONY, but it is invisible when +$(dtstree) is empty. So, it is not specified as phony for +ARCH=x86 etc. + +Add it to PHONY outside the ifneq ... endif block. + +Signed-off-by: Masahiro Yamada +Acked-by: Rob Herring +Signed-off-by: Sasha Levin +--- + Makefile | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index 362f1e2ca63ff..dcc0053ed18ad 100644 +--- a/Makefile ++++ b/Makefile +@@ -1239,7 +1239,7 @@ ifneq ($(dtstree),) + %.dtb: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ + +-PHONY += dtbs dtbs_install dtbs_check dt_binding_check ++PHONY += dtbs dtbs_install dtbs_check + dtbs dtbs_check: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) + +@@ -1259,6 +1259,7 @@ PHONY += scripts_dtc + scripts_dtc: scripts_basic + $(Q)$(MAKE) $(build)=scripts/dtc + ++PHONY += dt_binding_check + dt_binding_check: scripts_dtc + $(Q)$(MAKE) $(build)=Documentation/devicetree/bindings + +-- +2.20.1 + diff --git a/queue-5.5/kbuild-add-dtbs_check-to-phony.patch b/queue-5.5/kbuild-add-dtbs_check-to-phony.patch new file mode 100644 index 00000000000..d1cd420fa27 --- /dev/null +++ b/queue-5.5/kbuild-add-dtbs_check-to-phony.patch @@ -0,0 +1,41 @@ +From de9a1cec3527ba6cb7a566c4b08b91f8f9d8f454 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 23 Feb 2020 04:04:33 +0900 +Subject: kbuild: add dtbs_check to PHONY + +From: Masahiro Yamada + +[ Upstream commit 964a596db8db8c77c9903dd05655696696e6b3ad ] + +The dtbs_check should be a phony target, but currently it is not +specified so. + +'make dtbs_check' works even if a file named 'dtbs_check' exists +because it depends on another phony target, scripts_dtc, but we +should not rely on it. + +Add dtbs_check to PHONY. + +Signed-off-by: Masahiro Yamada +Acked-by: Rob Herring +Signed-off-by: Sasha Levin +--- + Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index 5d0fdaf900e9d..362f1e2ca63ff 100644 +--- a/Makefile ++++ b/Makefile +@@ -1239,7 +1239,7 @@ ifneq ($(dtstree),) + %.dtb: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ + +-PHONY += dtbs dtbs_install dt_binding_check ++PHONY += dtbs dtbs_install dtbs_check dt_binding_check + dtbs dtbs_check: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) + +-- +2.20.1 + diff --git a/queue-5.5/kunit-run-kunit_tool-from-any-directory.patch b/queue-5.5/kunit-run-kunit_tool-from-any-directory.patch new file mode 100644 index 00000000000..7c262801c3c --- /dev/null +++ b/queue-5.5/kunit-run-kunit_tool-from-any-directory.patch @@ -0,0 +1,64 @@ +From d9143224e7e187b284b31c7ac57f2c3d47257d41 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 18 Feb 2020 14:19:16 -0800 +Subject: kunit: run kunit_tool from any directory + +From: Heidi Fahim + +[ Upstream commit be886ba90cce2fb2f5a4dbcda8f3be3fd1b2f484 ] + +Implemented small fix so that the script changes work directories to the +root of the linux kernel source tree from which kunit.py is run. This +enables the user to run kunit from any working directory. Originally +considered using os.path.join but this is more error prone as we would +have to find all file path usages and modify them accordingly. Using +os.chdir ensures that the entire script is run within /linux. + +Signed-off-by: Heidi Fahim +Reviewed-by: Brendan Higgins +Signed-off-by: Shuah Khan +Signed-off-by: Sasha Levin +--- + tools/testing/kunit/kunit.py | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py +index e59eb9e7f9236..180ad1e1b04f9 100755 +--- a/tools/testing/kunit/kunit.py ++++ b/tools/testing/kunit/kunit.py +@@ -24,6 +24,8 @@ KunitResult = namedtuple('KunitResult', ['status','result']) + + KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', 'build_dir', 'defconfig']) + ++KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0] ++ + class KunitStatus(Enum): + SUCCESS = auto() + CONFIG_FAILURE = auto() +@@ -35,6 +37,13 @@ def create_default_kunitconfig(): + shutil.copyfile('arch/um/configs/kunit_defconfig', + kunit_kernel.kunitconfig_path) + ++def get_kernel_root_path(): ++ parts = sys.argv[0] if not __file__ else __file__ ++ parts = os.path.realpath(parts).split('tools/testing/kunit') ++ if len(parts) != 2: ++ sys.exit(1) ++ return parts[0] ++ + def run_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitRequest) -> KunitResult: + config_start = time.time() +@@ -114,6 +123,9 @@ def main(argv, linux=None): + cli_args = parser.parse_args(argv) + + if cli_args.subcommand == 'run': ++ if get_kernel_root_path(): ++ os.chdir(get_kernel_root_path()) ++ + if cli_args.build_dir: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) +-- +2.20.1 + diff --git a/queue-5.5/mac80211-rx-avoid-rcu-list-traversal-under-mutex.patch b/queue-5.5/mac80211-rx-avoid-rcu-list-traversal-under-mutex.patch new file mode 100644 index 00000000000..979ff3014a6 --- /dev/null +++ b/queue-5.5/mac80211-rx-avoid-rcu-list-traversal-under-mutex.patch @@ -0,0 +1,39 @@ +From 1cc5362d9bee3244701581eaa2e8876a7ef945d7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 23 Feb 2020 20:03:02 +0530 +Subject: mac80211: rx: avoid RCU list traversal under mutex + +From: Madhuparna Bhowmik + +[ Upstream commit 253216ffb2a002a682c6f68bd3adff5b98b71de8 ] + +local->sta_mtx is held in __ieee80211_check_fast_rx_iface(). +No need to use list_for_each_entry_rcu() as it also requires +a cond argument to avoid false lockdep warnings when not used in +RCU read-side section (with CONFIG_PROVE_RCU_LIST). +Therefore use list_for_each_entry(); + +Signed-off-by: Madhuparna Bhowmik +Link: https://lore.kernel.org/r/20200223143302.15390-1-madhuparnabhowmik10@gmail.com +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/rx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 0e05ff0376726..0ba98ad9bc854 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4114,7 +4114,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) + + lockdep_assert_held(&local->sta_mtx); + +- list_for_each_entry_rcu(sta, &local->sta_list, list) { ++ list_for_each_entry(sta, &local->sta_list, list) { + if (sdata != sta->sdata && + (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) + continue; +-- +2.20.1 + diff --git a/queue-5.5/mips-disable-vdso-time-functionality-on-micromips.patch b/queue-5.5/mips-disable-vdso-time-functionality-on-micromips.patch new file mode 100644 index 00000000000..294cd2c02f8 --- /dev/null +++ b/queue-5.5/mips-disable-vdso-time-functionality-on-micromips.patch @@ -0,0 +1,64 @@ +From b9e03b8337d2ad0c2f4e3b57e894be67269bd701 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 15 Feb 2020 12:38:36 -0800 +Subject: MIPS: Disable VDSO time functionality on microMIPS + +From: Paul Burton + +[ Upstream commit 07015d7a103c4420b69a287b8ef4d2535c0f4106 ] + +A check we're about to add to pick up on function calls that depend on +bogus use of the GOT in the VDSO picked up on instances of such function +calls in microMIPS builds. Since the code appears genuinely problematic, +and given the relatively small amount of use & testing that microMIPS +sees, go ahead & disable the VDSO for microMIPS builds. + +Signed-off-by: Paul Burton +Signed-off-by: Sasha Levin +--- + arch/mips/vdso/Makefile | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile +index 96afd73c94e8a..e8585a22b925c 100644 +--- a/arch/mips/vdso/Makefile ++++ b/arch/mips/vdso/Makefile +@@ -48,6 +48,8 @@ endif + + CFLAGS_REMOVE_vgettimeofday.o = -pg + ++DISABLE_VDSO := n ++ + # + # For the pre-R6 code in arch/mips/vdso/vdso.h for locating + # the base address of VDSO, the linker will emit a R_MIPS_PC32 +@@ -61,11 +63,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg + ifndef CONFIG_CPU_MIPSR6 + ifeq ($(call ld-ifversion, -lt, 225000000, y),y) + $(warning MIPS VDSO requires binutils >= 2.25) +- obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y)) +- ccflags-vdso += -DDISABLE_MIPS_VDSO ++ DISABLE_VDSO := y + endif + endif + ++# ++# GCC (at least up to version 9.2) appears to emit function calls that make use ++# of the GOT when targeting microMIPS, which we can't use in the VDSO due to ++# the lack of relocations. As such, we disable the VDSO for microMIPS builds. ++# ++ifdef CONFIG_CPU_MICROMIPS ++ DISABLE_VDSO := y ++endif ++ ++ifeq ($(DISABLE_VDSO),y) ++ obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y)) ++ ccflags-vdso += -DDISABLE_MIPS_VDSO ++endif ++ + # VDSO linker flags. + VDSO_LDFLAGS := \ + -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ +-- +2.20.1 + diff --git a/queue-5.5/mips-vdso-add-build-time-check-that-no-jalr-t9-calls.patch b/queue-5.5/mips-vdso-add-build-time-check-that-no-jalr-t9-calls.patch new file mode 100644 index 00000000000..95ed4a47346 --- /dev/null +++ b/queue-5.5/mips-vdso-add-build-time-check-that-no-jalr-t9-calls.patch @@ -0,0 +1,67 @@ +From 1fbf79f866d7e31660067ca9f375d5e2d97e9e4d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 11 Feb 2020 11:24:34 -0800 +Subject: mips: vdso: add build time check that no 'jalr t9' calls left + +From: Victor Kamensky + +[ Upstream commit 976c23af3ee5bd3447a7bfb6c356ceb4acf264a6 ] + +vdso shared object cannot have GOT based PIC 'jalr t9' calls +because nobody set GOT table in vdso. Contributing into vdso +.o files are compiled in PIC mode and as result for internal +static functions calls compiler will generate 'jalr t9' +instructions. Those are supposed to be converted into PC +relative 'bal' calls by linker when relocation are processed. + +Mips global GOT entries do have dynamic relocations and they +will be caught by cmd_vdso_check Makefile rule. Static PIC +calls go through mips local GOT entries that do not have +dynamic relocations. For those 'jalr t9' calls could be present +but without dynamic relocations and they need to be converted +to 'bal' calls by linker. + +Add additional build time check to make sure that no 'jalr t9' +slip through because of some toolchain misconfiguration that +prevents 'jalr t9' to 'bal' conversion. + +Signed-off-by: Victor Kamensky +Signed-off-by: Paul Burton +Cc: linux-mips@vger.kernel.org +Cc: Ralf Baechle +Cc: James Hogan +Cc: Vincenzo Frascino +Cc: bruce.ashfield@gmail.com +Cc: richard.purdie@linuxfoundation.org +Signed-off-by: Sasha Levin +--- + arch/mips/vdso/Makefile | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile +index e8585a22b925c..bfb65b2d57c7f 100644 +--- a/arch/mips/vdso/Makefile ++++ b/arch/mips/vdso/Makefile +@@ -93,12 +93,18 @@ GCOV_PROFILE := n + UBSAN_SANITIZE := n + KCOV_INSTRUMENT := n + ++# Check that we don't have PIC 'jalr t9' calls left ++quiet_cmd_vdso_mips_check = VDSOCHK $@ ++ cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \ ++ then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \ ++ rm -f $@; /bin/false); fi ++ + # + # Shared build commands. + # + + quiet_cmd_vdsold_and_vdso_check = LD $@ +- cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) ++ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check) + + quiet_cmd_vdsold = VDSO $@ + cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \ +-- +2.20.1 + diff --git a/queue-5.5/mips-vdso-fix-jalr-t9-crash-in-vdso-code.patch b/queue-5.5/mips-vdso-fix-jalr-t9-crash-in-vdso-code.patch new file mode 100644 index 00000000000..e16e6832c13 --- /dev/null +++ b/queue-5.5/mips-vdso-fix-jalr-t9-crash-in-vdso-code.patch @@ -0,0 +1,64 @@ +From a09c04c400058169c123f72dd09d384d7b23a069 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 11 Feb 2020 11:24:33 -0800 +Subject: mips: vdso: fix 'jalr t9' crash in vdso code + +From: Victor Kamensky + +[ Upstream commit d3f703c4359ff06619b2322b91f69710453e6b6d ] + +Observed that when kernel is built with Yocto mips64-poky-linux-gcc, +and mips64-poky-linux-gnun32-gcc toolchain, resulting vdso contains +'jalr t9' instructions in its code and since in vdso case nobody +sets GOT table code crashes when instruction reached. On other hand +observed that when kernel is built mips-poky-linux-gcc toolchain, the +same 'jalr t9' instruction are replaced with PC relative function +calls using 'bal' instructions. + +The difference boils down to -mrelax-pic-calls and -mexplicit-relocs +gcc options that gets different default values depending on gcc +target triplets and corresponding binutils. -mrelax-pic-calls got +enabled by default only in mips-poky-linux-gcc case. MIPS binutils +ld relies on R_MIPS_JALR relocation to convert 'jalr t9' into 'bal' +and such relocation is generated only if -mrelax-pic-calls option +is on. + +Please note 'jalr t9' conversion to 'bal' can happen only to static +functions. These static PIC calls use mips local GOT entries that +are supposed to be filled with start of DSO value by run-time linker +(missing in VDSO case) and they do not have dynamic relocations. +Global mips GOT entries must have dynamic relocations and they should +be prevented by cmd_vdso_check Makefile rule. + +Solution call out -mrelax-pic-calls and -mexplicit-relocs options +explicitly while compiling MIPS vdso code. That would get correct +and consistent between different toolchains behaviour. + +Reported-by: Bruce Ashfield +Signed-off-by: Victor Kamensky +Signed-off-by: Paul Burton +Cc: linux-mips@vger.kernel.org +Cc: Ralf Baechle +Cc: James Hogan +Cc: Vincenzo Frascino +Cc: richard.purdie@linuxfoundation.org +Signed-off-by: Sasha Levin +--- + arch/mips/vdso/Makefile | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile +index e05938997e696..96afd73c94e8a 100644 +--- a/arch/mips/vdso/Makefile ++++ b/arch/mips/vdso/Makefile +@@ -29,6 +29,7 @@ endif + cflags-vdso := $(ccflags-vdso) \ + $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ + -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ ++ -mrelax-pic-calls -mexplicit-relocs \ + -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ + $(call cc-option, -fno-asynchronous-unwind-tables) \ + $(call cc-option, -fno-stack-protector) +-- +2.20.1 + diff --git a/queue-5.5/mips-vdso-wrap-mexplicit-relocs-in-cc-option.patch b/queue-5.5/mips-vdso-wrap-mexplicit-relocs-in-cc-option.patch new file mode 100644 index 00000000000..2df4c2118fb --- /dev/null +++ b/queue-5.5/mips-vdso-wrap-mexplicit-relocs-in-cc-option.patch @@ -0,0 +1,57 @@ +From 8259d900bbaa98a35737a0063e057dab173b1acc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 17 Feb 2020 14:11:49 -0700 +Subject: MIPS: vdso: Wrap -mexplicit-relocs in cc-option + +From: Nathan Chancellor + +[ Upstream commit 72cf3b3df423c1bbd8fa1056fed009d3a260f8a9 ] + +Clang does not support this option and errors out: + +clang-11: error: unknown argument: '-mexplicit-relocs' + +Clang does not appear to need this flag like GCC does because the jalr +check that was added in commit 976c23af3ee5 ("mips: vdso: add build +time check that no 'jalr t9' calls left") passes just fine with + +$ make ARCH=mips CC=clang CROSS_COMPILE=mipsel-linux-gnu- malta_defconfig arch/mips/vdso/ + +even before commit d3f703c4359f ("mips: vdso: fix 'jalr t9' crash in +vdso code"). + +-mrelax-pic-calls has been supported since clang 9, which is the +earliest version that could build a working MIPS kernel, and it is the +default for clang so just leave it be. + +Fixes: d3f703c4359f ("mips: vdso: fix 'jalr t9' crash in vdso code") +Link: https://github.com/ClangBuiltLinux/linux/issues/890 +Signed-off-by: Nathan Chancellor +Reviewed-by: Nick Desaulniers +Tested-by: Nick Desaulniers +Signed-off-by: Paul Burton +Cc: Ralf Baechle +Cc: linux-mips@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Cc: clang-built-linux@googlegroups.com +Signed-off-by: Sasha Levin +--- + arch/mips/vdso/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile +index bfb65b2d57c7f..2cf4b6131d88d 100644 +--- a/arch/mips/vdso/Makefile ++++ b/arch/mips/vdso/Makefile +@@ -29,7 +29,7 @@ endif + cflags-vdso := $(ccflags-vdso) \ + $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ + -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ +- -mrelax-pic-calls -mexplicit-relocs \ ++ -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \ + -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ + $(call cc-option, -fno-asynchronous-unwind-tables) \ + $(call cc-option, -fno-stack-protector) +-- +2.20.1 + diff --git a/queue-5.5/mlxsw-pci-wait-longer-before-accessing-the-device-af.patch b/queue-5.5/mlxsw-pci-wait-longer-before-accessing-the-device-af.patch new file mode 100644 index 00000000000..ab8e96288b2 --- /dev/null +++ b/queue-5.5/mlxsw-pci-wait-longer-before-accessing-the-device-af.patch @@ -0,0 +1,45 @@ +From 3d355cf38fc39e83836dae8801ccc74ee5ad6387 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 21:07:53 +0100 +Subject: mlxsw: pci: Wait longer before accessing the device after reset + +From: Amit Cohen + +[ Upstream commit ac004e84164e27d69017731a97b11402a69d854b ] + +During initialization the driver issues a reset to the device and waits +for 100ms before checking if the firmware is ready. The waiting is +necessary because before that the device is irresponsive and the first +read can result in a completion timeout. + +While 100ms is sufficient for Spectrum-1 and Spectrum-2, it is +insufficient for Spectrum-3. + +Fix this by increasing the timeout to 200ms. + +Fixes: da382875c616 ("mlxsw: spectrum: Extend to support Spectrum-3 ASIC") +Signed-off-by: Amit Cohen +Signed-off-by: Ido Schimmel +Signed-off-by: Jiri Pirko +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +index e0d7d2d9a0c81..43fa8c85b5d9f 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +@@ -28,7 +28,7 @@ + #define MLXSW_PCI_SW_RESET 0xF0010 + #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) + #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000 +-#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 ++#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200 + #define MLXSW_PCI_FW_READY 0xA1844 + #define MLXSW_PCI_FW_READY_MASK 0xFFFF + #define MLXSW_PCI_FW_READY_MAGIC 0x5E +-- +2.20.1 + diff --git a/queue-5.5/net-bcmgenet-clear-id_mode_dis-in-ext_rgmii_oob_ctrl.patch b/queue-5.5/net-bcmgenet-clear-id_mode_dis-in-ext_rgmii_oob_ctrl.patch new file mode 100644 index 00000000000..cd3bc3a8251 --- /dev/null +++ b/queue-5.5/net-bcmgenet-clear-id_mode_dis-in-ext_rgmii_oob_ctrl.patch @@ -0,0 +1,39 @@ +From 690913a4f70d6caa182b3196418b239866f9e2f3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 25 Feb 2020 14:11:59 +0100 +Subject: net: bcmgenet: Clear ID_MODE_DIS in EXT_RGMII_OOB_CTRL when not + needed + +From: Nicolas Saenz Julienne + +[ Upstream commit 402482a6a78e5c61d8a2ec6311fc5b4aca392cd6 ] + +Outdated Raspberry Pi 4 firmware might configure the external PHY as +rgmii although the kernel currently sets it as rgmii-rxid. This makes +connections unreliable as ID_MODE_DIS is left enabled. To avoid this, +explicitly clear that bit whenever we don't need it. + +Fixes: da38802211cc ("net: bcmgenet: Add RGMII_RXID support") +Signed-off-by: Nicolas Saenz Julienne +Acked-by: Florian Fainelli +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/genet/bcmmii.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c +index 6392a25301838..10244941a7a60 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c +@@ -294,6 +294,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); ++ reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; +-- +2.20.1 + diff --git a/queue-5.5/net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch b/queue-5.5/net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch new file mode 100644 index 00000000000..ce9c7808215 --- /dev/null +++ b/queue-5.5/net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch @@ -0,0 +1,44 @@ +From ead68c54b14cfc42bba37c6f7b7d9cce19ba7953 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 21:20:49 +0100 +Subject: net: dsa: mv88e6xxx: Fix masking of egress port + +From: Andrew Lunn + +[ Upstream commit 3ee339eb28959629db33aaa2b8cde4c63c6289eb ] + +Add missing ~ to the usage of the mask. + +Reported-by: Kevin Benson +Reported-by: Chris Healy +Fixes: 5c74c54ce6ff ("net: dsa: mv88e6xxx: Split monitor port configuration") +Signed-off-by: Andrew Lunn +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/dsa/mv88e6xxx/global1.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c +index b016cc205f81f..ca3a7a7a73c32 100644 +--- a/drivers/net/dsa/mv88e6xxx/global1.c ++++ b/drivers/net/dsa/mv88e6xxx/global1.c +@@ -278,13 +278,13 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, + switch (direction) { + case MV88E6XXX_EGRESS_DIR_INGRESS: + dest_port_chip = &chip->ingress_dest_port; +- reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; ++ reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; + reg |= port << + __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK); + break; + case MV88E6XXX_EGRESS_DIR_EGRESS: + dest_port_chip = &chip->egress_dest_port; +- reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; ++ reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; + reg |= port << + __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); + break; +-- +2.20.1 + diff --git a/queue-5.5/net-ks8851-ml-fix-irq-handling-and-locking.patch b/queue-5.5/net-ks8851-ml-fix-irq-handling-and-locking.patch new file mode 100644 index 00000000000..2499c8f89f8 --- /dev/null +++ b/queue-5.5/net-ks8851-ml-fix-irq-handling-and-locking.patch @@ -0,0 +1,102 @@ +From ea61b3ccdc0e2d7fc859a7b431e96f4291da1e4c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 23 Feb 2020 14:38:40 +0100 +Subject: net: ks8851-ml: Fix IRQ handling and locking + +From: Marek Vasut + +[ Upstream commit 44343418d0f2f623cb9da6f5000df793131cbe3b ] + +The KS8851 requires that packet RX and TX are mutually exclusive. +Currently, the driver hopes to achieve this by disabling interrupt +from the card by writing the card registers and by disabling the +interrupt on the interrupt controller. This however is racy on SMP. + +Replace this approach by expanding the spinlock used around the +ks_start_xmit() TX path to ks_irq() RX path to assure true mutual +exclusion and remove the interrupt enabling/disabling, which is +now not needed anymore. Furthermore, disable interrupts also in +ks_net_stop(), which was missing before. + +Note that a massive improvement here would be to re-use the KS8851 +driver approach, which is to move the TX path into a worker thread, +interrupt handling to threaded interrupt, and synchronize everything +with mutexes, but that would be a much bigger rework, for a separate +patch. + +Signed-off-by: Marek Vasut +Cc: David S. Miller +Cc: Lukas Wunner +Cc: Petr Stetiar +Cc: YueHaibing +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/micrel/ks8851_mll.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c +index 1c9e70c8cc30f..58579baf3f7a0 100644 +--- a/drivers/net/ethernet/micrel/ks8851_mll.c ++++ b/drivers/net/ethernet/micrel/ks8851_mll.c +@@ -513,14 +513,17 @@ static irqreturn_t ks_irq(int irq, void *pw) + { + struct net_device *netdev = pw; + struct ks_net *ks = netdev_priv(netdev); ++ unsigned long flags; + u16 status; + ++ spin_lock_irqsave(&ks->statelock, flags); + /*this should be the first in IRQ handler */ + ks_save_cmd_reg(ks); + + status = ks_rdreg16(ks, KS_ISR); + if (unlikely(!status)) { + ks_restore_cmd_reg(ks); ++ spin_unlock_irqrestore(&ks->statelock, flags); + return IRQ_NONE; + } + +@@ -546,6 +549,7 @@ static irqreturn_t ks_irq(int irq, void *pw) + ks->netdev->stats.rx_over_errors++; + /* this should be the last in IRQ handler*/ + ks_restore_cmd_reg(ks); ++ spin_unlock_irqrestore(&ks->statelock, flags); + return IRQ_HANDLED; + } + +@@ -615,6 +619,7 @@ static int ks_net_stop(struct net_device *netdev) + + /* shutdown RX/TX QMU */ + ks_disable_qmu(ks); ++ ks_disable_int(ks); + + /* set powermode to soft power down to save power */ + ks_set_powermode(ks, PMECR_PM_SOFTDOWN); +@@ -671,10 +676,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) + { + netdev_tx_t retv = NETDEV_TX_OK; + struct ks_net *ks = netdev_priv(netdev); ++ unsigned long flags; + +- disable_irq(netdev->irq); +- ks_disable_int(ks); +- spin_lock(&ks->statelock); ++ spin_lock_irqsave(&ks->statelock, flags); + + /* Extra space are required: + * 4 byte for alignment, 4 for status/length, 4 for CRC +@@ -688,9 +692,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) + dev_kfree_skb(skb); + } else + retv = NETDEV_TX_BUSY; +- spin_unlock(&ks->statelock); +- ks_enable_int(ks); +- enable_irq(netdev->irq); ++ spin_unlock_irqrestore(&ks->statelock, flags); + return retv; + } + +-- +2.20.1 + diff --git a/queue-5.5/net-ll_temac-add-more-error-handling-of-dma_map_sing.patch b/queue-5.5/net-ll_temac-add-more-error-handling-of-dma_map_sing.patch new file mode 100644 index 00000000000..aca9766adb3 --- /dev/null +++ b/queue-5.5/net-ll_temac-add-more-error-handling-of-dma_map_sing.patch @@ -0,0 +1,78 @@ +From 220e89976bd275a4dea6e32f0e1a8140413f5ad7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 07:47:33 +0100 +Subject: net: ll_temac: Add more error handling of dma_map_single() calls + +From: Esben Haabendal + +[ Upstream commit d07c849cd2b97d6809430dfb7e738ad31088037a ] + +This adds error handling to the remaining dma_map_single() calls, so that +behavior is well defined if/when we run out of DMA memory. + +Fixes: 92744989533c ("net: add Xilinx ll_temac device driver") +Signed-off-by: Esben Haabendal +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/xilinx/ll_temac_main.c | 26 +++++++++++++++++++-- + 1 file changed, 24 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index fd578568b3bff..fd4231493449b 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -367,6 +367,8 @@ static int temac_dma_bd_init(struct net_device *ndev) + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, + XTE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); ++ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) ++ goto out; + lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); + lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); + lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); +@@ -863,12 +865,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + cur_p->len = cpu_to_be32(skb_headlen(skb)); ++ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) ++ return NETDEV_TX_BUSY; + cur_p->phys = cpu_to_be32(skb_dma_addr); + ptr_to_txbd((void *)skb, cur_p); + + for (ii = 0; ii < num_frag; ii++) { +- lp->tx_bd_tail++; +- if (lp->tx_bd_tail >= TX_BD_NUM) ++ if (++lp->tx_bd_tail >= TX_BD_NUM) + lp->tx_bd_tail = 0; + + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; +@@ -876,6 +879,25 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + skb_frag_address(frag), + skb_frag_size(frag), + DMA_TO_DEVICE); ++ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { ++ if (--lp->tx_bd_tail < 0) ++ lp->tx_bd_tail = TX_BD_NUM - 1; ++ cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; ++ while (--ii >= 0) { ++ --frag; ++ dma_unmap_single(ndev->dev.parent, ++ be32_to_cpu(cur_p->phys), ++ skb_frag_size(frag), ++ DMA_TO_DEVICE); ++ if (--lp->tx_bd_tail < 0) ++ lp->tx_bd_tail = TX_BD_NUM - 1; ++ cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; ++ } ++ dma_unmap_single(ndev->dev.parent, ++ be32_to_cpu(cur_p->phys), ++ skb_headlen(skb), DMA_TO_DEVICE); ++ return NETDEV_TX_BUSY; ++ } + cur_p->phys = cpu_to_be32(skb_dma_addr); + cur_p->len = cpu_to_be32(skb_frag_size(frag)); + cur_p->app0 = 0; +-- +2.20.1 + diff --git a/queue-5.5/net-ll_temac-fix-race-condition-causing-tx-hang.patch b/queue-5.5/net-ll_temac-fix-race-condition-causing-tx-hang.patch new file mode 100644 index 00000000000..9e492a55a91 --- /dev/null +++ b/queue-5.5/net-ll_temac-fix-race-condition-causing-tx-hang.patch @@ -0,0 +1,69 @@ +From cb2cd369e6022e3acd79681753f49950d8bbe742 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 07:47:21 +0100 +Subject: net: ll_temac: Fix race condition causing TX hang + +From: Esben Haabendal + +[ Upstream commit 84823ff80f7403752b59e00bb198724100dc611c ] + +It is possible that the interrupt handler fires and frees up space in +the TX ring in between checking for sufficient TX ring space and +stopping the TX queue in temac_start_xmit. If this happens, the +queue wake from the interrupt handler will occur before the queue is +stopped, causing a lost wakeup and the adapter's transmit hanging. + +To avoid this, after stopping the queue, check again whether there is +sufficient space in the TX ring. If so, wake up the queue again. + +This is a port of the similar fix in axienet driver, +commit 7de44285c1f6 ("net: axienet: Fix race condition causing TX hang"). + +Fixes: 23ecc4bde21f ("net: ll_temac: fix checksum offload logic") +Signed-off-by: Esben Haabendal +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/xilinx/ll_temac_main.c | 19 ++++++++++++++++--- + 1 file changed, 16 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index 21c1b4322ea78..fd578568b3bff 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -788,6 +788,9 @@ static void temac_start_xmit_done(struct net_device *ndev) + stat = be32_to_cpu(cur_p->app0); + } + ++ /* Matches barrier in temac_start_xmit */ ++ smp_mb(); ++ + netif_wake_queue(ndev); + } + +@@ -830,9 +833,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + + if (temac_check_tx_bd_space(lp, num_frag + 1)) { +- if (!netif_queue_stopped(ndev)) +- netif_stop_queue(ndev); +- return NETDEV_TX_BUSY; ++ if (netif_queue_stopped(ndev)) ++ return NETDEV_TX_BUSY; ++ ++ netif_stop_queue(ndev); ++ ++ /* Matches barrier in temac_start_xmit_done */ ++ smp_mb(); ++ ++ /* Space might have just been freed - check again */ ++ if (temac_check_tx_bd_space(lp, num_frag)) ++ return NETDEV_TX_BUSY; ++ ++ netif_wake_queue(ndev); + } + + cur_p->app0 = 0; +-- +2.20.1 + diff --git a/queue-5.5/net-ll_temac-fix-rx-buffer-descriptor-handling-on-gf.patch b/queue-5.5/net-ll_temac-fix-rx-buffer-descriptor-handling-on-gf.patch new file mode 100644 index 00000000000..8438d12a619 --- /dev/null +++ b/queue-5.5/net-ll_temac-fix-rx-buffer-descriptor-handling-on-gf.patch @@ -0,0 +1,242 @@ +From 45a3d2f35e76cf154b3b71d72e222fc2c2fe75dd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 07:47:45 +0100 +Subject: net: ll_temac: Fix RX buffer descriptor handling on GFP_ATOMIC + pressure + +From: Esben Haabendal + +[ Upstream commit 770d9c67974c4c71af4beb786dc43162ad2a15ba ] + +Failures caused by GFP_ATOMIC memory pressure have been observed, and +due to the missing error handling, results in kernel crash such as + +[1876998.350133] kernel BUG at mm/slub.c:3952! +[1876998.350141] invalid opcode: 0000 [#1] PREEMPT SMP PTI +[1876998.350147] CPU: 2 PID: 0 Comm: swapper/2 Not tainted 5.3.0-scnxt #1 +[1876998.350150] Hardware name: N/A N/A/COMe-bIP2, BIOS CCR2R920 03/01/2017 +[1876998.350160] RIP: 0010:kfree+0x1ca/0x220 +[1876998.350164] Code: 85 db 74 49 48 8b 95 68 01 00 00 48 31 c2 48 89 10 e9 d7 fe ff ff 49 8b 04 24 a9 00 00 01 00 75 0b 49 8b 44 24 08 a8 01 75 02 <0f> 0b 49 8b 04 24 31 f6 a9 00 00 01 00 74 06 41 0f b6 74 24 + 5b +[1876998.350172] RSP: 0018:ffffc900000f0df0 EFLAGS: 00010246 +[1876998.350177] RAX: ffffea00027f0708 RBX: ffff888008d78000 RCX: 0000000000391372 +[1876998.350181] RDX: 0000000000000000 RSI: ffffe8ffffd01400 RDI: ffff888008d78000 +[1876998.350185] RBP: ffff8881185a5d00 R08: ffffc90000087dd8 R09: 000000000000280a +[1876998.350189] R10: 0000000000000002 R11: 0000000000000000 R12: ffffea0000235e00 +[1876998.350193] R13: ffff8881185438a0 R14: 0000000000000000 R15: ffff888118543870 +[1876998.350198] FS: 0000000000000000(0000) GS:ffff88811f300000(0000) knlGS:0000000000000000 +[1876998.350203] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +s#1 Part1 +[1876998.350206] CR2: 00007f8dac7b09f0 CR3: 000000011e20a006 CR4: 00000000001606e0 +[1876998.350210] Call Trace: +[1876998.350215] +[1876998.350224] ? __netif_receive_skb_core+0x70a/0x920 +[1876998.350229] kfree_skb+0x32/0xb0 +[1876998.350234] __netif_receive_skb_core+0x70a/0x920 +[1876998.350240] __netif_receive_skb_one_core+0x36/0x80 +[1876998.350245] process_backlog+0x8b/0x150 +[1876998.350250] net_rx_action+0xf7/0x340 +[1876998.350255] __do_softirq+0x10f/0x353 +[1876998.350262] irq_exit+0xb2/0xc0 +[1876998.350265] do_IRQ+0x77/0xd0 +[1876998.350271] common_interrupt+0xf/0xf +[1876998.350274] + +In order to handle such failures more graceful, this change splits the +receive loop into one for consuming the received buffers, and one for +allocating new buffers. + +When GFP_ATOMIC allocations fail, the receive will continue with the +buffers that is still there, and with the expectation that the allocations +will succeed in a later call to receive. + +Fixes: 92744989533c ("net: add Xilinx ll_temac device driver") +Signed-off-by: Esben Haabendal +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/xilinx/ll_temac.h | 1 + + drivers/net/ethernet/xilinx/ll_temac_main.c | 112 ++++++++++++++------ + 2 files changed, 82 insertions(+), 31 deletions(-) + +diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h +index 276292bca334d..99fe059e5c7f3 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac.h ++++ b/drivers/net/ethernet/xilinx/ll_temac.h +@@ -375,6 +375,7 @@ struct temac_local { + int tx_bd_next; + int tx_bd_tail; + int rx_bd_ci; ++ int rx_bd_tail; + + /* DMA channel control setup */ + u32 tx_chnl_ctrl; +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index fd4231493449b..2e3f59dae586e 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -389,12 +389,13 @@ static int temac_dma_bd_init(struct net_device *ndev) + lp->tx_bd_next = 0; + lp->tx_bd_tail = 0; + lp->rx_bd_ci = 0; ++ lp->rx_bd_tail = RX_BD_NUM - 1; + + /* Enable RX DMA transfers */ + wmb(); + lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); + lp->dma_out(lp, RX_TAILDESC_PTR, +- lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); ++ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); + + /* Prepare for TX DMA transfer */ + lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); +@@ -923,27 +924,41 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + static void ll_temac_recv(struct net_device *ndev) + { + struct temac_local *lp = netdev_priv(ndev); +- struct sk_buff *skb, *new_skb; +- unsigned int bdstat; +- struct cdmac_bd *cur_p; +- dma_addr_t tail_p, skb_dma_addr; +- int length; + unsigned long flags; ++ int rx_bd; ++ bool update_tail = false; + + spin_lock_irqsave(&lp->rx_lock, flags); + +- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; +- cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; +- +- bdstat = be32_to_cpu(cur_p->app0); +- while ((bdstat & STS_CTRL_APP0_CMPLT)) { ++ /* Process all received buffers, passing them on network ++ * stack. After this, the buffer descriptors will be in an ++ * un-allocated stage, where no skb is allocated for it, and ++ * they are therefore not available for TEMAC/DMA. ++ */ ++ do { ++ struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; ++ struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; ++ unsigned int bdstat = be32_to_cpu(bd->app0); ++ int length; ++ ++ /* While this should not normally happen, we can end ++ * here when GFP_ATOMIC allocations fail, and we ++ * therefore have un-allocated buffers. ++ */ ++ if (!skb) ++ break; + +- skb = lp->rx_skb[lp->rx_bd_ci]; +- length = be32_to_cpu(cur_p->app4) & 0x3FFF; ++ /* Loop over all completed buffer descriptors */ ++ if (!(bdstat & STS_CTRL_APP0_CMPLT)) ++ break; + +- dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), ++ dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), + XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); ++ /* The buffer is not valid for DMA anymore */ ++ bd->phys = 0; ++ bd->len = 0; + ++ length = be32_to_cpu(bd->app4) & 0x3FFF; + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); +@@ -958,39 +973,74 @@ static void ll_temac_recv(struct net_device *ndev) + * (back) for proper IP checksum byte order + * (be16). + */ +- skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF); ++ skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); ++ /* The skb buffer is now owned by network stack above */ ++ lp->rx_skb[lp->rx_bd_ci] = NULL; + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; + +- new_skb = netdev_alloc_skb_ip_align(ndev, +- XTE_MAX_JUMBO_FRAME_SIZE); +- if (!new_skb) { +- spin_unlock_irqrestore(&lp->rx_lock, flags); +- return; ++ rx_bd = lp->rx_bd_ci; ++ if (++lp->rx_bd_ci >= RX_BD_NUM) ++ lp->rx_bd_ci = 0; ++ } while (rx_bd != lp->rx_bd_tail); ++ ++ /* Allocate new buffers for those buffer descriptors that were ++ * passed to network stack. Note that GFP_ATOMIC allocations ++ * can fail (e.g. when a larger burst of GFP_ATOMIC ++ * allocations occurs), so while we try to allocate all ++ * buffers in the same interrupt where they were processed, we ++ * continue with what we could get in case of allocation ++ * failure. Allocation of remaining buffers will be retried ++ * in following calls. ++ */ ++ while (1) { ++ struct sk_buff *skb; ++ struct cdmac_bd *bd; ++ dma_addr_t skb_dma_addr; ++ ++ rx_bd = lp->rx_bd_tail + 1; ++ if (rx_bd >= RX_BD_NUM) ++ rx_bd = 0; ++ bd = &lp->rx_bd_v[rx_bd]; ++ ++ if (bd->phys) ++ break; /* All skb's allocated */ ++ ++ skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); ++ if (!skb) { ++ dev_warn(&ndev->dev, "skb alloc failed\n"); ++ break; + } + +- cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); +- skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data, ++ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, + XTE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); +- cur_p->phys = cpu_to_be32(skb_dma_addr); +- cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); +- lp->rx_skb[lp->rx_bd_ci] = new_skb; ++ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, ++ skb_dma_addr))) { ++ dev_kfree_skb_any(skb); ++ break; ++ } + +- lp->rx_bd_ci++; +- if (lp->rx_bd_ci >= RX_BD_NUM) +- lp->rx_bd_ci = 0; ++ bd->phys = cpu_to_be32(skb_dma_addr); ++ bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); ++ bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); ++ lp->rx_skb[rx_bd] = skb; ++ ++ lp->rx_bd_tail = rx_bd; ++ update_tail = true; ++ } + +- cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; +- bdstat = be32_to_cpu(cur_p->app0); ++ /* Move tail pointer when buffers have been allocated */ ++ if (update_tail) { ++ lp->dma_out(lp, RX_TAILDESC_PTR, ++ lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); + } +- lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); + + spin_unlock_irqrestore(&lp->rx_lock, flags); + } +-- +2.20.1 + diff --git a/queue-5.5/net-ll_temac-handle-dma-halt-condition-caused-by-buf.patch b/queue-5.5/net-ll_temac-handle-dma-halt-condition-caused-by-buf.patch new file mode 100644 index 00000000000..dd31e964443 --- /dev/null +++ b/queue-5.5/net-ll_temac-handle-dma-halt-condition-caused-by-buf.patch @@ -0,0 +1,176 @@ +From 9a2f0d5c3e38a378b3d1190c431876c7273056ac Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 07:47:58 +0100 +Subject: net: ll_temac: Handle DMA halt condition caused by buffer underrun + +From: Esben Haabendal + +[ Upstream commit 1d63b8d66d146deaaedbe16c80de105f685ea012 ] + +The SDMA engine used by TEMAC halts operation when it has finished +processing of the last buffer descriptor in the buffer ring. +Unfortunately, no interrupt event is generated when this happens, +so we need to setup another mechanism to make sure DMA operation is +restarted when enough buffers have been added to the ring. + +Fixes: 92744989533c ("net: add Xilinx ll_temac device driver") +Signed-off-by: Esben Haabendal +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/xilinx/ll_temac.h | 3 ++ + drivers/net/ethernet/xilinx/ll_temac_main.c | 58 +++++++++++++++++++-- + 2 files changed, 56 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h +index 99fe059e5c7f3..53fb8141f1a67 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac.h ++++ b/drivers/net/ethernet/xilinx/ll_temac.h +@@ -380,6 +380,9 @@ struct temac_local { + /* DMA channel control setup */ + u32 tx_chnl_ctrl; + u32 rx_chnl_ctrl; ++ u8 coalesce_count_rx; ++ ++ struct delayed_work restart_work; + }; + + /* Wrappers for temac_ior()/temac_iow() function pointers above */ +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index 2e3f59dae586e..eb480204cdbeb 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -866,8 +867,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + cur_p->len = cpu_to_be32(skb_headlen(skb)); +- if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) +- return NETDEV_TX_BUSY; ++ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { ++ dev_kfree_skb_any(skb); ++ ndev->stats.tx_dropped++; ++ return NETDEV_TX_OK; ++ } + cur_p->phys = cpu_to_be32(skb_dma_addr); + ptr_to_txbd((void *)skb, cur_p); + +@@ -897,7 +901,9 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_headlen(skb), DMA_TO_DEVICE); +- return NETDEV_TX_BUSY; ++ dev_kfree_skb_any(skb); ++ ndev->stats.tx_dropped++; ++ return NETDEV_TX_OK; + } + cur_p->phys = cpu_to_be32(skb_dma_addr); + cur_p->len = cpu_to_be32(skb_frag_size(frag)); +@@ -920,6 +926,17 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + return NETDEV_TX_OK; + } + ++static int ll_temac_recv_buffers_available(struct temac_local *lp) ++{ ++ int available; ++ ++ if (!lp->rx_skb[lp->rx_bd_ci]) ++ return 0; ++ available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; ++ if (available <= 0) ++ available += RX_BD_NUM; ++ return available; ++} + + static void ll_temac_recv(struct net_device *ndev) + { +@@ -990,6 +1007,18 @@ static void ll_temac_recv(struct net_device *ndev) + lp->rx_bd_ci = 0; + } while (rx_bd != lp->rx_bd_tail); + ++ /* DMA operations will halt when the last buffer descriptor is ++ * processed (ie. the one pointed to by RX_TAILDESC_PTR). ++ * When that happens, no more interrupt events will be ++ * generated. No IRQ_COAL or IRQ_DLY, and not even an ++ * IRQ_ERR. To avoid stalling, we schedule a delayed work ++ * when there is a potential risk of that happening. The work ++ * will call this function, and thus re-schedule itself until ++ * enough buffers are available again. ++ */ ++ if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) ++ schedule_delayed_work(&lp->restart_work, HZ / 1000); ++ + /* Allocate new buffers for those buffer descriptors that were + * passed to network stack. Note that GFP_ATOMIC allocations + * can fail (e.g. when a larger burst of GFP_ATOMIC +@@ -1045,6 +1074,18 @@ static void ll_temac_recv(struct net_device *ndev) + spin_unlock_irqrestore(&lp->rx_lock, flags); + } + ++/* Function scheduled to ensure a restart in case of DMA halt ++ * condition caused by running out of buffer descriptors. ++ */ ++static void ll_temac_restart_work_func(struct work_struct *work) ++{ ++ struct temac_local *lp = container_of(work, struct temac_local, ++ restart_work.work); ++ struct net_device *ndev = lp->ndev; ++ ++ ll_temac_recv(ndev); ++} ++ + static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) + { + struct net_device *ndev = _ndev; +@@ -1137,6 +1178,8 @@ static int temac_stop(struct net_device *ndev) + + dev_dbg(&ndev->dev, "temac_close()\n"); + ++ cancel_delayed_work_sync(&lp->restart_work); ++ + free_irq(lp->tx_irq, ndev); + free_irq(lp->rx_irq, ndev); + +@@ -1269,6 +1312,7 @@ static int temac_probe(struct platform_device *pdev) + lp->dev = &pdev->dev; + lp->options = XTE_OPTION_DEFAULTS; + spin_lock_init(&lp->rx_lock); ++ INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); + + /* Setup mutex for synchronization of indirect register access */ + if (pdata) { +@@ -1375,6 +1419,7 @@ static int temac_probe(struct platform_device *pdev) + */ + lp->tx_chnl_ctrl = 0x10220000; + lp->rx_chnl_ctrl = 0xff070000; ++ lp->coalesce_count_rx = 0x07; + + /* Finished with the DMA node; drop the reference */ + of_node_put(dma_np); +@@ -1406,11 +1451,14 @@ static int temac_probe(struct platform_device *pdev) + (pdata->tx_irq_count << 16); + else + lp->tx_chnl_ctrl = 0x10220000; +- if (pdata->rx_irq_timeout || pdata->rx_irq_count) ++ if (pdata->rx_irq_timeout || pdata->rx_irq_count) { + lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | + (pdata->rx_irq_count << 16); +- else ++ lp->coalesce_count_rx = pdata->rx_irq_count; ++ } else { + lp->rx_chnl_ctrl = 0xff070000; ++ lp->coalesce_count_rx = 0x07; ++ } + } + + /* Error handle returned DMA RX and TX interrupts */ +-- +2.20.1 + diff --git a/queue-5.5/net-phy-mscc-fix-firmware-paths.patch b/queue-5.5/net-phy-mscc-fix-firmware-paths.patch new file mode 100644 index 00000000000..9c8f549ebbd --- /dev/null +++ b/queue-5.5/net-phy-mscc-fix-firmware-paths.patch @@ -0,0 +1,43 @@ +From deb1378dc308b0b6668f18816f0395181b40539f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Feb 2020 16:26:50 +0100 +Subject: net: phy: mscc: fix firmware paths + +From: Antoine Tenart + +[ Upstream commit c87a9d6fc6d555e4981f2ded77d9a8cce950743e ] + +The firmware paths for the VSC8584 PHYs not not contain the leading +'microchip/' directory, as used in linux-firmware, resulting in an +error when probing the driver. This patch fixes it. + +Fixes: a5afc1678044 ("net: phy: mscc: add support for VSC8584 PHY") +Signed-off-by: Antoine Tenart +Reviewed-by: Andrew Lunn +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/phy/mscc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c +index d5f8f351d9eff..3e38d15a67c64 100644 +--- a/drivers/net/phy/mscc.c ++++ b/drivers/net/phy/mscc.c +@@ -310,11 +310,11 @@ enum rgmii_rx_clock_delay { + BIT(VSC8531_FORCE_LED_OFF) | \ + BIT(VSC8531_FORCE_LED_ON)) + +-#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin" ++#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin" + #define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800 + #define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48 + +-#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin" ++#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin" + #define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000 + #define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8 + +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-do-not-allow-to-change-mux-id-if-mux-id-is.patch b/queue-5.5/net-rmnet-do-not-allow-to-change-mux-id-if-mux-id-is.patch new file mode 100644 index 00000000000..7b8014a9470 --- /dev/null +++ b/queue-5.5/net-rmnet-do-not-allow-to-change-mux-id-if-mux-id-is.patch @@ -0,0 +1,47 @@ +From 863e618eb3da14bd2baf46a428e6f1e082665415 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:25:19 +0000 +Subject: net: rmnet: do not allow to change mux id if mux id is duplicated + +From: Taehee Yoo + +[ Upstream commit 1dc49e9d164cd7e11c81279c83db84a147e14740 ] + +Basically, duplicate mux id isn't be allowed. +So, the creation of rmnet will be failed if there is duplicate mux id +is existing. +But, changelink routine doesn't check duplicate mux id. + +Test commands: + modprobe rmnet + ip link add dummy0 type dummy + ip link add rmnet0 link dummy0 type rmnet mux_id 1 + ip link add rmnet1 link dummy0 type rmnet mux_id 2 + ip link set rmnet1 type rmnet mux_id 1 + +Fixes: 23790ef12082 ("net: qualcomm: rmnet: Allow to configure flags for existing devices") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index 0ad64aa665925..3c0e6d24d0834 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -306,6 +306,10 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], + + if (data[IFLA_RMNET_MUX_ID]) { + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); ++ if (rmnet_get_endpoint(port, mux_id)) { ++ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); ++ return -EINVAL; ++ } + ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-fix-bridge-mode-bugs.patch b/queue-5.5/net-rmnet-fix-bridge-mode-bugs.patch new file mode 100644 index 00000000000..c16e4eb20fb --- /dev/null +++ b/queue-5.5/net-rmnet-fix-bridge-mode-bugs.patch @@ -0,0 +1,360 @@ +From 9ed94a5eb09836eae7a46be340e10c2d736828cb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:26:02 +0000 +Subject: net: rmnet: fix bridge mode bugs + +From: Taehee Yoo + +[ Upstream commit d939b6d30bea1a2322bc536b12be0a7c4c2bccd7 ] + +In order to attach a bridge interface to the rmnet interface, +"master" operation is used. +(e.g. ip link set dummy1 master rmnet0) +But, in the rmnet_add_bridge(), which is a callback of ->ndo_add_slave() +doesn't register lower interface. +So, ->ndo_del_slave() doesn't work. +There are other problems too. +1. It couldn't detect circular upper/lower interface relationship. +2. It couldn't prevent stack overflow because of too deep depth +of upper/lower interface +3. It doesn't check the number of lower interfaces. +4. Panics because of several reasons. + +The root problem of these issues is actually the same. +So, in this patch, these all problems will be fixed. + +Test commands: + modprobe rmnet + ip link add dummy0 type dummy + ip link add rmnet0 link dummy0 type rmnet mux_id 1 + ip link add dummy1 master rmnet0 type dummy + ip link add dummy2 master rmnet0 type dummy + ip link del rmnet0 + ip link del dummy2 + ip link del dummy1 + +Splat looks like: +[ 41.867595][ T1164] general protection fault, probably for non-canonical address 0xdffffc0000000101I +[ 41.869993][ T1164] KASAN: null-ptr-deref in range [0x0000000000000808-0x000000000000080f] +[ 41.872950][ T1164] CPU: 0 PID: 1164 Comm: ip Not tainted 5.6.0-rc1+ #447 +[ 41.873915][ T1164] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 41.875161][ T1164] RIP: 0010:rmnet_unregister_bridge.isra.6+0x71/0xf0 [rmnet] +[ 41.876178][ T1164] Code: 48 89 ef 48 89 c6 5b 5d e9 fc fe ff ff e8 f7 f3 ff ff 48 8d b8 08 08 00 00 48 ba 00 7 +[ 41.878925][ T1164] RSP: 0018:ffff8880c4d0f188 EFLAGS: 00010202 +[ 41.879774][ T1164] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000101 +[ 41.887689][ T1164] RDX: dffffc0000000000 RSI: ffffffffb8cf64f0 RDI: 0000000000000808 +[ 41.888727][ T1164] RBP: ffff8880c40e4000 R08: ffffed101b3c0e3c R09: 0000000000000001 +[ 41.889749][ T1164] R10: 0000000000000001 R11: ffffed101b3c0e3b R12: 1ffff110189a1e3c +[ 41.890783][ T1164] R13: ffff8880c4d0f200 R14: ffffffffb8d56160 R15: ffff8880ccc2c000 +[ 41.891794][ T1164] FS: 00007f4300edc0c0(0000) GS:ffff8880d9c00000(0000) knlGS:0000000000000000 +[ 41.892953][ T1164] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 41.893800][ T1164] CR2: 00007f43003bc8c0 CR3: 00000000ca53e001 CR4: 00000000000606f0 +[ 41.894824][ T1164] Call Trace: +[ 41.895274][ T1164] ? rcu_is_watching+0x2c/0x80 +[ 41.895895][ T1164] rmnet_config_notify_cb+0x1f7/0x590 [rmnet] +[ 41.896687][ T1164] ? rmnet_unregister_bridge.isra.6+0xf0/0xf0 [rmnet] +[ 41.897611][ T1164] ? rmnet_unregister_bridge.isra.6+0xf0/0xf0 [rmnet] +[ 41.898508][ T1164] ? __module_text_address+0x13/0x140 +[ 41.899162][ T1164] notifier_call_chain+0x90/0x160 +[ 41.899814][ T1164] rollback_registered_many+0x660/0xcf0 +[ 41.900544][ T1164] ? netif_set_real_num_tx_queues+0x780/0x780 +[ 41.901316][ T1164] ? __lock_acquire+0xdfe/0x3de0 +[ 41.901958][ T1164] ? memset+0x1f/0x40 +[ 41.902468][ T1164] ? __nla_validate_parse+0x98/0x1ab0 +[ 41.903166][ T1164] unregister_netdevice_many.part.133+0x13/0x1b0 +[ 41.903988][ T1164] rtnl_delete_link+0xbc/0x100 +[ ... ] + +Fixes: 60d58f971c10 ("net: qualcomm: rmnet: Implement bridge mode") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + .../ethernet/qualcomm/rmnet/rmnet_config.c | 131 +++++++++--------- + .../ethernet/qualcomm/rmnet/rmnet_config.h | 1 + + .../net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 8 -- + .../net/ethernet/qualcomm/rmnet/rmnet_vnd.h | 1 - + 4 files changed, 64 insertions(+), 77 deletions(-) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index e3fbf2331b965..fbf4cbcf1a654 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -13,25 +13,6 @@ + #include "rmnet_vnd.h" + #include "rmnet_private.h" + +-/* Locking scheme - +- * The shared resource which needs to be protected is realdev->rx_handler_data. +- * For the writer path, this is using rtnl_lock(). The writer paths are +- * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These +- * paths are already called with rtnl_lock() acquired in. There is also an +- * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For +- * dereference here, we will need to use rtnl_dereference(). Dev list writing +- * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). +- * For the reader path, the real_dev->rx_handler_data is called in the TX / RX +- * path. We only need rcu_read_lock() for these scenarios. In these cases, +- * the rcu_read_lock() is held in __dev_queue_xmit() and +- * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() +- * to get the relevant information. For dev list reading, we again acquire +- * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). +- * We also use unregister_netdevice_many() to free all rmnet devices in +- * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in +- * same context. +- */ +- + /* Local Definitions and Declarations */ + + static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = { +@@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev) + return rtnl_dereference(real_dev->rx_handler_data); + } + +-static int rmnet_unregister_real_device(struct net_device *real_dev, +- struct rmnet_port *port) ++static int rmnet_unregister_real_device(struct net_device *real_dev) + { ++ struct rmnet_port *port = rmnet_get_port_rtnl(real_dev); ++ + if (port->nr_rmnet_devs) + return -EINVAL; + +@@ -93,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev) + return 0; + } + +-static void rmnet_unregister_bridge(struct net_device *dev, +- struct rmnet_port *port) ++static void rmnet_unregister_bridge(struct rmnet_port *port) + { +- struct rmnet_port *bridge_port; +- struct net_device *bridge_dev; ++ struct net_device *bridge_dev, *real_dev, *rmnet_dev; ++ struct rmnet_port *real_port; + + if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) + return; + +- /* bridge slave handling */ ++ rmnet_dev = port->rmnet_dev; + if (!port->nr_rmnet_devs) { +- bridge_dev = port->bridge_ep; ++ /* bridge device */ ++ real_dev = port->bridge_ep; ++ bridge_dev = port->dev; + +- bridge_port = rmnet_get_port_rtnl(bridge_dev); +- bridge_port->bridge_ep = NULL; +- bridge_port->rmnet_mode = RMNET_EPMODE_VND; ++ real_port = rmnet_get_port_rtnl(real_dev); ++ real_port->bridge_ep = NULL; ++ real_port->rmnet_mode = RMNET_EPMODE_VND; + } else { ++ /* real device */ + bridge_dev = port->bridge_ep; + +- bridge_port = rmnet_get_port_rtnl(bridge_dev); +- rmnet_unregister_real_device(bridge_dev, bridge_port); ++ port->bridge_ep = NULL; ++ port->rmnet_mode = RMNET_EPMODE_VND; + } ++ ++ netdev_upper_dev_unlink(bridge_dev, rmnet_dev); ++ rmnet_unregister_real_device(bridge_dev); + } + + static int rmnet_newlink(struct net *src_net, struct net_device *dev, +@@ -161,6 +148,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, + goto err2; + + port->rmnet_mode = mode; ++ port->rmnet_dev = dev; + + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + +@@ -178,8 +166,9 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, + + err2: + unregister_netdevice(dev); ++ rmnet_vnd_dellink(mux_id, port, ep); + err1: +- rmnet_unregister_real_device(real_dev, port); ++ rmnet_unregister_real_device(real_dev); + err0: + kfree(ep); + return err; +@@ -188,30 +177,32 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, + static void rmnet_dellink(struct net_device *dev, struct list_head *head) + { + struct rmnet_priv *priv = netdev_priv(dev); +- struct net_device *real_dev; ++ struct net_device *real_dev, *bridge_dev; ++ struct rmnet_port *real_port, *bridge_port; + struct rmnet_endpoint *ep; +- struct rmnet_port *port; +- u8 mux_id; ++ u8 mux_id = priv->mux_id; + + real_dev = priv->real_dev; + +- if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) ++ if (!rmnet_is_real_dev_registered(real_dev)) + return; + +- port = rmnet_get_port_rtnl(real_dev); +- +- mux_id = rmnet_vnd_get_mux(dev); ++ real_port = rmnet_get_port_rtnl(real_dev); ++ bridge_dev = real_port->bridge_ep; ++ if (bridge_dev) { ++ bridge_port = rmnet_get_port_rtnl(bridge_dev); ++ rmnet_unregister_bridge(bridge_port); ++ } + +- ep = rmnet_get_endpoint(port, mux_id); ++ ep = rmnet_get_endpoint(real_port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); +- rmnet_unregister_bridge(dev, port); +- rmnet_vnd_dellink(mux_id, port, ep); ++ rmnet_vnd_dellink(mux_id, real_port, ep); + kfree(ep); + } +- netdev_upper_dev_unlink(real_dev, dev); +- rmnet_unregister_real_device(real_dev, port); + ++ netdev_upper_dev_unlink(real_dev, dev); ++ rmnet_unregister_real_device(real_dev); + unregister_netdevice_queue(dev, head); + } + +@@ -223,23 +214,23 @@ static void rmnet_force_unassociate_device(struct net_device *real_dev) + unsigned long bkt_ep; + LIST_HEAD(list); + +- ASSERT_RTNL(); +- + port = rmnet_get_port_rtnl(real_dev); + +- rmnet_unregister_bridge(real_dev, port); +- +- hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { +- netdev_upper_dev_unlink(real_dev, ep->egress_dev); +- unregister_netdevice_queue(ep->egress_dev, &list); +- rmnet_vnd_dellink(ep->mux_id, port, ep); +- hlist_del_init_rcu(&ep->hlnode); +- kfree(ep); ++ if (port->nr_rmnet_devs) { ++ /* real device */ ++ rmnet_unregister_bridge(port); ++ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { ++ unregister_netdevice_queue(ep->egress_dev, &list); ++ netdev_upper_dev_unlink(real_dev, ep->egress_dev); ++ rmnet_vnd_dellink(ep->mux_id, port, ep); ++ hlist_del_init_rcu(&ep->hlnode); ++ kfree(ep); ++ } ++ rmnet_unregister_real_device(real_dev); ++ unregister_netdevice_many(&list); ++ } else { ++ rmnet_unregister_bridge(port); + } +- +- unregister_netdevice_many(&list); +- +- rmnet_unregister_real_device(real_dev, port); + } + + static int rmnet_config_notify_cb(struct notifier_block *nb, +@@ -418,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, + if (port->nr_rmnet_devs > 1) + return -EINVAL; + ++ if (port->rmnet_mode != RMNET_EPMODE_VND) ++ return -EINVAL; ++ + if (rmnet_is_real_dev_registered(slave_dev)) + return -EBUSY; + +@@ -425,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, + if (err) + return -EBUSY; + ++ err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, ++ extack); ++ if (err) { ++ rmnet_unregister_real_device(slave_dev); ++ return err; ++ } ++ + slave_port = rmnet_get_port_rtnl(slave_dev); + slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; + slave_port->bridge_ep = real_dev; ++ slave_port->rmnet_dev = rmnet_dev; + + port->rmnet_mode = RMNET_EPMODE_BRIDGE; + port->bridge_ep = slave_dev; +@@ -439,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, + int rmnet_del_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev) + { +- struct rmnet_priv *priv = netdev_priv(rmnet_dev); +- struct net_device *real_dev = priv->real_dev; +- struct rmnet_port *port, *slave_port; +- +- port = rmnet_get_port_rtnl(real_dev); +- port->rmnet_mode = RMNET_EPMODE_VND; +- port->bridge_ep = NULL; ++ struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev); + +- slave_port = rmnet_get_port_rtnl(slave_dev); +- rmnet_unregister_real_device(slave_dev, slave_port); ++ rmnet_unregister_bridge(port); + + netdev_dbg(slave_dev, "removed from rmnet as slave\n"); + return 0; +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +index 0d568dcfd65a1..be515982d6286 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +@@ -28,6 +28,7 @@ struct rmnet_port { + u8 rmnet_mode; + struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; + struct net_device *bridge_ep; ++ struct net_device *rmnet_dev; + }; + + extern struct rtnl_link_ops rmnet_link_ops; +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +index 509dfc895a33e..26ad40f19c64c 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +@@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + return 0; + } + +-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) +-{ +- struct rmnet_priv *priv; +- +- priv = netdev_priv(rmnet_dev); +- return priv->mux_id; +-} +- + int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) + { + netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +index 54cbaf3c3bc43..14d77c709d4ad 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +@@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep); + void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); + void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); +-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); + void rmnet_vnd_setup(struct net_device *dev); + #endif /* _RMNET_VND_H_ */ +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_chan.patch b/queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_chan.patch new file mode 100644 index 00000000000..4ffece1d68c --- /dev/null +++ b/queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_chan.patch @@ -0,0 +1,88 @@ +From 4db4484cf3a3ff332cb2743e4014ac1a754e21cf Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:24:26 +0000 +Subject: net: rmnet: fix NULL pointer dereference in rmnet_changelink() + +From: Taehee Yoo + +[ Upstream commit 1eb1f43a6e37282348a41e3d68f5e9a6a4359212 ] + +In the rmnet_changelink(), it uses IFLA_LINK without checking +NULL pointer. +tb[IFLA_LINK] could be NULL pointer. +So, NULL-ptr-deref could occur. + +rmnet already has a lower interface (real_dev). +So, after this patch, rmnet_changelink() does not use IFLA_LINK anymore. + +Test commands: + modprobe rmnet + ip link add dummy0 type dummy + ip link add rmnet0 link dummy0 type rmnet mux_id 1 + ip link set rmnet0 type rmnet mux_id 2 + +Splat looks like: +[ 90.578726][ T1131] general protection fault, probably for non-canonical address 0xdffffc0000000000I +[ 90.581121][ T1131] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] +[ 90.582380][ T1131] CPU: 2 PID: 1131 Comm: ip Not tainted 5.6.0-rc1+ #447 +[ 90.584285][ T1131] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 90.587506][ T1131] RIP: 0010:rmnet_changelink+0x5a/0x8a0 [rmnet] +[ 90.588546][ T1131] Code: 83 ec 20 48 c1 ea 03 80 3c 02 00 0f 85 6f 07 00 00 48 8b 5e 28 48 b8 00 00 00 00 00 0 +[ 90.591447][ T1131] RSP: 0018:ffff8880ce78f1b8 EFLAGS: 00010247 +[ 90.592329][ T1131] RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffff8880ce78f8b0 +[ 90.593253][ T1131] RDX: 0000000000000000 RSI: ffff8880ce78f4a0 RDI: 0000000000000004 +[ 90.594058][ T1131] RBP: ffff8880cf543e00 R08: 0000000000000002 R09: 0000000000000002 +[ 90.594859][ T1131] R10: ffffffffc0586a40 R11: 0000000000000000 R12: ffff8880ca47c000 +[ 90.595690][ T1131] R13: ffff8880ca47c000 R14: ffff8880cf545000 R15: 0000000000000000 +[ 90.596553][ T1131] FS: 00007f21f6c7e0c0(0000) GS:ffff8880da400000(0000) knlGS:0000000000000000 +[ 90.597504][ T1131] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 90.599418][ T1131] CR2: 0000556e413db458 CR3: 00000000c917a002 CR4: 00000000000606e0 +[ 90.600289][ T1131] Call Trace: +[ 90.600631][ T1131] __rtnl_newlink+0x922/0x1270 +[ 90.601194][ T1131] ? lock_downgrade+0x6e0/0x6e0 +[ 90.601724][ T1131] ? rtnl_link_unregister+0x220/0x220 +[ 90.602309][ T1131] ? lock_acquire+0x164/0x3b0 +[ 90.602784][ T1131] ? is_bpf_image_address+0xff/0x1d0 +[ 90.603331][ T1131] ? rtnl_newlink+0x4c/0x90 +[ 90.603810][ T1131] ? kernel_text_address+0x111/0x140 +[ 90.604419][ T1131] ? __kernel_text_address+0xe/0x30 +[ 90.604981][ T1131] ? unwind_get_return_address+0x5f/0xa0 +[ 90.605616][ T1131] ? create_prof_cpu_mask+0x20/0x20 +[ 90.606304][ T1131] ? arch_stack_walk+0x83/0xb0 +[ 90.606985][ T1131] ? stack_trace_save+0x82/0xb0 +[ 90.607656][ T1131] ? stack_trace_consume_entry+0x160/0x160 +[ 90.608503][ T1131] ? deactivate_slab.isra.78+0x2c5/0x800 +[ 90.609336][ T1131] ? kasan_unpoison_shadow+0x30/0x40 +[ 90.610096][ T1131] ? kmem_cache_alloc_trace+0x135/0x350 +[ 90.610889][ T1131] ? rtnl_newlink+0x4c/0x90 +[ 90.611512][ T1131] rtnl_newlink+0x65/0x90 +[ ... ] + +Fixes: 23790ef12082 ("net: qualcomm: rmnet: Allow to configure flags for existing devices") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index 471e3b2a1403a..ac58f584190bd 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -300,10 +300,8 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], + if (!dev) + return -ENODEV; + +- real_dev = __dev_get_by_index(dev_net(dev), +- nla_get_u32(tb[IFLA_LINK])); +- +- if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) ++ real_dev = priv->real_dev; ++ if (!rmnet_is_real_dev_registered(real_dev)) + return -ENODEV; + + port = rmnet_get_port_rtnl(real_dev); +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_newl.patch b/queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_newl.patch new file mode 100644 index 00000000000..0ee14d4391b --- /dev/null +++ b/queue-5.5/net-rmnet-fix-null-pointer-dereference-in-rmnet_newl.patch @@ -0,0 +1,82 @@ +From 05721f2ba1c6933479b46899400753dcd554e9db Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:23:52 +0000 +Subject: net: rmnet: fix NULL pointer dereference in rmnet_newlink() + +From: Taehee Yoo + +[ Upstream commit 93b5cbfa9636d385126f211dca9efa7e3f683202 ] + +rmnet registers IFLA_LINK interface as a lower interface. +But, IFLA_LINK could be NULL. +In the current code, rmnet doesn't check IFLA_LINK. +So, panic would occur. + +Test commands: + modprobe rmnet + ip link add rmnet0 type rmnet mux_id 1 + +Splat looks like: +[ 36.826109][ T1115] general protection fault, probably for non-canonical address 0xdffffc0000000000I +[ 36.838817][ T1115] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] +[ 36.839908][ T1115] CPU: 1 PID: 1115 Comm: ip Not tainted 5.6.0-rc1+ #447 +[ 36.840569][ T1115] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 36.841408][ T1115] RIP: 0010:rmnet_newlink+0x54/0x510 [rmnet] +[ 36.841986][ T1115] Code: 83 ec 18 48 c1 e9 03 80 3c 01 00 0f 85 d4 03 00 00 48 8b 6a 28 48 b8 00 00 00 00 00 c +[ 36.843923][ T1115] RSP: 0018:ffff8880b7e0f1c0 EFLAGS: 00010247 +[ 36.844756][ T1115] RAX: dffffc0000000000 RBX: ffff8880d14cca00 RCX: 1ffff11016fc1e99 +[ 36.845859][ T1115] RDX: 0000000000000000 RSI: ffff8880c3d04000 RDI: 0000000000000004 +[ 36.846961][ T1115] RBP: 0000000000000000 R08: ffff8880b7e0f8b0 R09: ffff8880b6ac2d90 +[ 36.848020][ T1115] R10: ffffffffc0589a40 R11: ffffed1016d585b7 R12: ffffffff88ceaf80 +[ 36.848788][ T1115] R13: ffff8880c3d04000 R14: ffff8880b7e0f8b0 R15: ffff8880c3d04000 +[ 36.849546][ T1115] FS: 00007f50ab3360c0(0000) GS:ffff8880da000000(0000) knlGS:0000000000000000 +[ 36.851784][ T1115] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 36.852422][ T1115] CR2: 000055871afe5ab0 CR3: 00000000ae246001 CR4: 00000000000606e0 +[ 36.853181][ T1115] Call Trace: +[ 36.853514][ T1115] __rtnl_newlink+0xbdb/0x1270 +[ 36.853967][ T1115] ? lock_downgrade+0x6e0/0x6e0 +[ 36.854420][ T1115] ? rtnl_link_unregister+0x220/0x220 +[ 36.854936][ T1115] ? lock_acquire+0x164/0x3b0 +[ 36.855376][ T1115] ? is_bpf_image_address+0xff/0x1d0 +[ 36.855884][ T1115] ? rtnl_newlink+0x4c/0x90 +[ 36.856304][ T1115] ? kernel_text_address+0x111/0x140 +[ 36.856857][ T1115] ? __kernel_text_address+0xe/0x30 +[ 36.857440][ T1115] ? unwind_get_return_address+0x5f/0xa0 +[ 36.858063][ T1115] ? create_prof_cpu_mask+0x20/0x20 +[ 36.858644][ T1115] ? arch_stack_walk+0x83/0xb0 +[ 36.859171][ T1115] ? stack_trace_save+0x82/0xb0 +[ 36.859710][ T1115] ? stack_trace_consume_entry+0x160/0x160 +[ 36.860357][ T1115] ? deactivate_slab.isra.78+0x2c5/0x800 +[ 36.860928][ T1115] ? kasan_unpoison_shadow+0x30/0x40 +[ 36.861520][ T1115] ? kmem_cache_alloc_trace+0x135/0x350 +[ 36.862125][ T1115] ? rtnl_newlink+0x4c/0x90 +[ 36.864073][ T1115] rtnl_newlink+0x65/0x90 +[ ... ] + +Fixes: ceed73a2cf4a ("drivers: net: ethernet: qualcomm: rmnet: Initial implementation") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index 06de59521fc4a..471e3b2a1403a 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -135,6 +135,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, + int err = 0; + u16 mux_id; + ++ if (!tb[IFLA_LINK]) { ++ NL_SET_ERR_MSG_MOD(extack, "link not specified"); ++ return -EINVAL; ++ } ++ + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!real_dev || !dev) + return -ENODEV; +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-fix-packet-forwarding-in-rmnet-bridge-mode.patch b/queue-5.5/net-rmnet-fix-packet-forwarding-in-rmnet-bridge-mode.patch new file mode 100644 index 00000000000..40e66beb5d2 --- /dev/null +++ b/queue-5.5/net-rmnet-fix-packet-forwarding-in-rmnet-bridge-mode.patch @@ -0,0 +1,61 @@ +From a41655352117202382acd4edd21503e068ce7d95 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:26:15 +0000 +Subject: net: rmnet: fix packet forwarding in rmnet bridge mode + +From: Taehee Yoo + +[ Upstream commit ad3cc31b599ea80f06b29ebdc18b3a39878a48d6 ] + +Packet forwarding is not working in rmnet bridge mode. +Because when a packet is forwarded, skb_push() for an ethernet header +is needed. But it doesn't call skb_push(). +So, the ethernet header will be lost. + +Test commands: + modprobe rmnet + ip netns add nst + ip netns add nst2 + ip link add veth0 type veth peer name veth1 + ip link add veth2 type veth peer name veth3 + ip link set veth1 netns nst + ip link set veth3 netns nst2 + + ip link add rmnet0 link veth0 type rmnet mux_id 1 + ip link set veth2 master rmnet0 + ip link set veth0 up + ip link set veth2 up + ip link set rmnet0 up + ip a a 192.168.100.1/24 dev rmnet0 + + ip netns exec nst ip link set veth1 up + ip netns exec nst ip a a 192.168.100.2/24 dev veth1 + ip netns exec nst2 ip link set veth3 up + ip netns exec nst2 ip a a 192.168.100.3/24 dev veth3 + ip netns exec nst2 ping 192.168.100.2 + +Fixes: 60d58f971c10 ("net: qualcomm: rmnet: Implement bridge mode") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +index 074a8b326c304..29a7bfa2584dc 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +@@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, + static void + rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) + { ++ if (skb_mac_header_was_set(skb)) ++ skb_push(skb, skb->mac_len); ++ + if (bridge_dev) { + skb->dev = bridge_dev; + dev_queue_xmit(skb); +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-fix-suspicious-rcu-usage.patch b/queue-5.5/net-rmnet-fix-suspicious-rcu-usage.patch new file mode 100644 index 00000000000..75ce4b064c3 --- /dev/null +++ b/queue-5.5/net-rmnet-fix-suspicious-rcu-usage.patch @@ -0,0 +1,163 @@ +From 2cbea8f0f09c42b0d428518b4897fde13e9ffa26 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:24:45 +0000 +Subject: net: rmnet: fix suspicious RCU usage + +From: Taehee Yoo + +[ Upstream commit 102210f7664442d8c0ce332c006ea90626df745b ] + +rmnet_get_port() internally calls rcu_dereference_rtnl(), +which checks RTNL. +But rmnet_get_port() could be called by packet path. +The packet path is not protected by RTNL. +So, the suspicious RCU usage problem occurs. + +Test commands: + modprobe rmnet + ip netns add nst + ip link add veth0 type veth peer name veth1 + ip link set veth1 netns nst + ip link add rmnet0 link veth0 type rmnet mux_id 1 + ip netns exec nst ip link add rmnet1 link veth1 type rmnet mux_id 1 + ip netns exec nst ip link set veth1 up + ip netns exec nst ip link set rmnet1 up + ip netns exec nst ip a a 192.168.100.2/24 dev rmnet1 + ip link set veth0 up + ip link set rmnet0 up + ip a a 192.168.100.1/24 dev rmnet0 + ping 192.168.100.2 + +Splat looks like: +[ 146.630958][ T1174] WARNING: suspicious RCU usage +[ 146.631735][ T1174] 5.6.0-rc1+ #447 Not tainted +[ 146.632387][ T1174] ----------------------------- +[ 146.633151][ T1174] drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c:386 suspicious rcu_dereference_check() ! +[ 146.634742][ T1174] +[ 146.634742][ T1174] other info that might help us debug this: +[ 146.634742][ T1174] +[ 146.645992][ T1174] +[ 146.645992][ T1174] rcu_scheduler_active = 2, debug_locks = 1 +[ 146.646937][ T1174] 5 locks held by ping/1174: +[ 146.647609][ T1174] #0: ffff8880c31dea70 (sk_lock-AF_INET){+.+.}, at: raw_sendmsg+0xab8/0x2980 +[ 146.662463][ T1174] #1: ffffffff93925660 (rcu_read_lock_bh){....}, at: ip_finish_output2+0x243/0x2150 +[ 146.671696][ T1174] #2: ffffffff93925660 (rcu_read_lock_bh){....}, at: __dev_queue_xmit+0x213/0x2940 +[ 146.673064][ T1174] #3: ffff8880c19ecd58 (&dev->qdisc_running_key#7){+...}, at: ip_finish_output2+0x714/0x2150 +[ 146.690358][ T1174] #4: ffff8880c5796898 (&dev->qdisc_xmit_lock_key#3){+.-.}, at: sch_direct_xmit+0x1e2/0x1020 +[ 146.699875][ T1174] +[ 146.699875][ T1174] stack backtrace: +[ 146.701091][ T1174] CPU: 0 PID: 1174 Comm: ping Not tainted 5.6.0-rc1+ #447 +[ 146.705215][ T1174] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 146.706565][ T1174] Call Trace: +[ 146.707102][ T1174] dump_stack+0x96/0xdb +[ 146.708007][ T1174] rmnet_get_port.part.9+0x76/0x80 [rmnet] +[ 146.709233][ T1174] rmnet_egress_handler+0x107/0x420 [rmnet] +[ 146.710492][ T1174] ? sch_direct_xmit+0x1e2/0x1020 +[ 146.716193][ T1174] rmnet_vnd_start_xmit+0x3d/0xa0 [rmnet] +[ 146.717012][ T1174] dev_hard_start_xmit+0x160/0x740 +[ 146.717854][ T1174] sch_direct_xmit+0x265/0x1020 +[ 146.718577][ T1174] ? register_lock_class+0x14d0/0x14d0 +[ 146.719429][ T1174] ? dev_watchdog+0xac0/0xac0 +[ 146.723738][ T1174] ? __dev_queue_xmit+0x15fd/0x2940 +[ 146.724469][ T1174] ? lock_acquire+0x164/0x3b0 +[ 146.725172][ T1174] __dev_queue_xmit+0x20c7/0x2940 +[ ... ] + +Fixes: ceed73a2cf4a ("drivers: net: ethernet: qualcomm: rmnet: Initial implementation") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 13 ++++++------- + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 2 +- + .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 4 ++-- + 3 files changed, 9 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index ac58f584190bd..fc68ecdd804bc 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -382,11 +382,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = { + .fill_info = rmnet_fill_info, + }; + +-/* Needs either rcu_read_lock() or rtnl lock */ +-struct rmnet_port *rmnet_get_port(struct net_device *real_dev) ++struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev) + { + if (rmnet_is_real_dev_registered(real_dev)) +- return rcu_dereference_rtnl(real_dev->rx_handler_data); ++ return rcu_dereference_bh(real_dev->rx_handler_data); + else + return NULL; + } +@@ -412,7 +411,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, + struct rmnet_port *port, *slave_port; + int err; + +- port = rmnet_get_port(real_dev); ++ port = rmnet_get_port_rtnl(real_dev); + + /* If there is more than one rmnet dev attached, its probably being + * used for muxing. Skip the briding in that case +@@ -427,7 +426,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, + if (err) + return -EBUSY; + +- slave_port = rmnet_get_port(slave_dev); ++ slave_port = rmnet_get_port_rtnl(slave_dev); + slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; + slave_port->bridge_ep = real_dev; + +@@ -445,11 +444,11 @@ int rmnet_del_bridge(struct net_device *rmnet_dev, + struct net_device *real_dev = priv->real_dev; + struct rmnet_port *port, *slave_port; + +- port = rmnet_get_port(real_dev); ++ port = rmnet_get_port_rtnl(real_dev); + port->rmnet_mode = RMNET_EPMODE_VND; + port->bridge_ep = NULL; + +- slave_port = rmnet_get_port(slave_dev); ++ slave_port = rmnet_get_port_rtnl(slave_dev); + rmnet_unregister_real_device(slave_dev, slave_port); + + netdev_dbg(slave_dev, "removed from rmnet as slave\n"); +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +index cd0a6bcbe74ad..0d568dcfd65a1 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +@@ -65,7 +65,7 @@ struct rmnet_priv { + struct rmnet_priv_stats stats; + }; + +-struct rmnet_port *rmnet_get_port(struct net_device *real_dev); ++struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev); + struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); + int rmnet_add_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev, +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +index 1b74bc1604027..074a8b326c304 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +@@ -184,7 +184,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) + return RX_HANDLER_PASS; + + dev = skb->dev; +- port = rmnet_get_port(dev); ++ port = rmnet_get_port_rcu(dev); + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: +@@ -217,7 +217,7 @@ void rmnet_egress_handler(struct sk_buff *skb) + skb->dev = priv->real_dev; + mux_id = priv->mux_id; + +- port = rmnet_get_port(skb->dev); ++ port = rmnet_get_port_rcu(skb->dev); + if (!port) + goto drop; + +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-remove-rcu_read_lock-in-rmnet_force_unasso.patch b/queue-5.5/net-rmnet-remove-rcu_read_lock-in-rmnet_force_unasso.patch new file mode 100644 index 00000000000..fe0f9e16efc --- /dev/null +++ b/queue-5.5/net-rmnet-remove-rcu_read_lock-in-rmnet_force_unasso.patch @@ -0,0 +1,95 @@ +From 825bad7a84810de72a811595f50834851d097140 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:25:05 +0000 +Subject: net: rmnet: remove rcu_read_lock in rmnet_force_unassociate_device() + +From: Taehee Yoo + +[ Upstream commit c026d970102e9af9958edefb4a015702c6aab636 ] + +The notifier_call() of the slave interface removes rmnet interface with +unregister_netdevice_queue(). +But, before calling unregister_netdevice_queue(), it acquires +rcu readlock. +In the RCU critical section, sleeping isn't be allowed. +But, unregister_netdevice_queue() internally calls synchronize_net(), +which would sleep. +So, suspicious RCU usage warning occurs. + +Test commands: + modprobe rmnet + ip link add dummy0 type dummy + ip link add dummy1 type dummy + ip link add rmnet0 link dummy0 type rmnet mux_id 1 + ip link set dummy1 master rmnet0 + ip link del dummy0 + +Splat looks like: +[ 79.639245][ T1195] ============================= +[ 79.640134][ T1195] WARNING: suspicious RCU usage +[ 79.640852][ T1195] 5.6.0-rc1+ #447 Not tainted +[ 79.641657][ T1195] ----------------------------- +[ 79.642472][ T1195] ./include/linux/rcupdate.h:273 Illegal context switch in RCU read-side critical section! +[ 79.644043][ T1195] +[ 79.644043][ T1195] other info that might help us debug this: +[ 79.644043][ T1195] +[ 79.645682][ T1195] +[ 79.645682][ T1195] rcu_scheduler_active = 2, debug_locks = 1 +[ 79.646980][ T1195] 2 locks held by ip/1195: +[ 79.647629][ T1195] #0: ffffffffa3cf64f0 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x457/0x890 +[ 79.649312][ T1195] #1: ffffffffa39256c0 (rcu_read_lock){....}, at: rmnet_config_notify_cb+0xf0/0x590 [rmnet] +[ 79.651717][ T1195] +[ 79.651717][ T1195] stack backtrace: +[ 79.652650][ T1195] CPU: 3 PID: 1195 Comm: ip Not tainted 5.6.0-rc1+ #447 +[ 79.653702][ T1195] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 79.655037][ T1195] Call Trace: +[ 79.655560][ T1195] dump_stack+0x96/0xdb +[ 79.656252][ T1195] ___might_sleep+0x345/0x440 +[ 79.656994][ T1195] synchronize_net+0x18/0x30 +[ 79.661132][ T1195] netdev_rx_handler_unregister+0x40/0xb0 +[ 79.666266][ T1195] rmnet_unregister_real_device+0x42/0xb0 [rmnet] +[ 79.667211][ T1195] rmnet_config_notify_cb+0x1f7/0x590 [rmnet] +[ 79.668121][ T1195] ? rmnet_unregister_bridge.isra.6+0xf0/0xf0 [rmnet] +[ 79.669166][ T1195] ? rmnet_unregister_bridge.isra.6+0xf0/0xf0 [rmnet] +[ 79.670286][ T1195] ? __module_text_address+0x13/0x140 +[ 79.671139][ T1195] notifier_call_chain+0x90/0x160 +[ 79.671973][ T1195] rollback_registered_many+0x660/0xcf0 +[ 79.672893][ T1195] ? netif_set_real_num_tx_queues+0x780/0x780 +[ 79.675091][ T1195] ? __lock_acquire+0xdfe/0x3de0 +[ 79.675825][ T1195] ? memset+0x1f/0x40 +[ 79.676367][ T1195] ? __nla_validate_parse+0x98/0x1ab0 +[ 79.677290][ T1195] unregister_netdevice_many.part.133+0x13/0x1b0 +[ 79.678163][ T1195] rtnl_delete_link+0xbc/0x100 +[ ... ] + +Fixes: ceed73a2cf4a ("drivers: net: ethernet: qualcomm: rmnet: Initial implementation") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index fc68ecdd804bc..0ad64aa665925 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -230,7 +230,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev) + + port = rmnet_get_port_rtnl(dev); + +- rcu_read_lock(); + rmnet_unregister_bridge(dev, port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { +@@ -241,7 +240,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev) + kfree(ep); + } + +- rcu_read_unlock(); + unregister_netdevice_many(&list); + + rmnet_unregister_real_device(real_dev, port); +-- +2.20.1 + diff --git a/queue-5.5/net-rmnet-use-upper-lower-device-infrastructure.patch b/queue-5.5/net-rmnet-use-upper-lower-device-infrastructure.patch new file mode 100644 index 00000000000..0dca5e932d7 --- /dev/null +++ b/queue-5.5/net-rmnet-use-upper-lower-device-infrastructure.patch @@ -0,0 +1,199 @@ +From 0f651d1210d71a6346b0a5ac13fcdb84c860143e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 27 Feb 2020 12:25:43 +0000 +Subject: net: rmnet: use upper/lower device infrastructure + +From: Taehee Yoo + +[ Upstream commit 037f9cdf72fb8a7ff9ec2b5dd05336ec1492bdf1 ] + +netdev_upper_dev_link() is useful to manage lower/upper interfaces. +And this function internally validates looping, maximum depth. +All or most virtual interfaces that could have a real interface +(e.g. macsec, macvlan, ipvlan etc.) use lower/upper infrastructure. + +Test commands: + modprobe rmnet + ip link add dummy0 type dummy + ip link add rmnet1 link dummy0 type rmnet mux_id 1 + for i in {2..100} + do + let A=$i-1 + ip link add rmnet$i link rmnet$A type rmnet mux_id $i + done + ip link del dummy0 + +The purpose of the test commands is to make stack overflow. + +Splat looks like: +[ 52.411438][ T1395] BUG: KASAN: slab-out-of-bounds in find_busiest_group+0x27e/0x2c00 +[ 52.413218][ T1395] Write of size 64 at addr ffff8880c774bde0 by task ip/1395 +[ 52.414841][ T1395] +[ 52.430720][ T1395] CPU: 1 PID: 1395 Comm: ip Not tainted 5.6.0-rc1+ #447 +[ 52.496511][ T1395] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 52.513597][ T1395] Call Trace: +[ 52.546516][ T1395] +[ 52.558773][ T1395] Allocated by task 3171537984: +[ 52.588290][ T1395] BUG: unable to handle page fault for address: ffffffffb999e260 +[ 52.589311][ T1395] #PF: supervisor read access in kernel mode +[ 52.590529][ T1395] #PF: error_code(0x0000) - not-present page +[ 52.591374][ T1395] PGD d6818067 P4D d6818067 PUD d6819063 PMD 0 +[ 52.592288][ T1395] Thread overran stack, or stack corrupted +[ 52.604980][ T1395] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI +[ 52.605856][ T1395] CPU: 1 PID: 1395 Comm: ip Not tainted 5.6.0-rc1+ #447 +[ 52.611764][ T1395] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +[ 52.621520][ T1395] RIP: 0010:stack_depot_fetch+0x10/0x30 +[ 52.622296][ T1395] Code: ff e9 f9 fe ff ff 48 89 df e8 9c 1d 91 ff e9 ca fe ff ff cc cc cc cc cc cc cc 89 f8 0 +[ 52.627887][ T1395] RSP: 0018:ffff8880c774bb60 EFLAGS: 00010006 +[ 52.628735][ T1395] RAX: 00000000001f8880 RBX: ffff8880c774d140 RCX: 0000000000000000 +[ 52.631773][ T1395] RDX: 000000000000001d RSI: ffff8880c774bb68 RDI: 0000000000003ff0 +[ 52.649584][ T1395] RBP: ffffea00031dd200 R08: ffffed101b43e403 R09: ffffed101b43e403 +[ 52.674857][ T1395] R10: 0000000000000001 R11: ffffed101b43e402 R12: ffff8880d900e5c0 +[ 52.678257][ T1395] R13: ffff8880c774c000 R14: 0000000000000000 R15: dffffc0000000000 +[ 52.694541][ T1395] FS: 00007fe867f6e0c0(0000) GS:ffff8880da000000(0000) knlGS:0000000000000000 +[ 52.764039][ T1395] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 52.815008][ T1395] CR2: ffffffffb999e260 CR3: 00000000c26aa005 CR4: 00000000000606e0 +[ 52.862312][ T1395] Call Trace: +[ 52.887133][ T1395] Modules linked in: dummy rmnet veth openvswitch nsh nf_conncount nf_nat nf_conntrack nf_dex +[ 52.936749][ T1395] CR2: ffffffffb999e260 +[ 52.965695][ T1395] ---[ end trace 7e32ca99482dbb31 ]--- +[ 52.966556][ T1395] RIP: 0010:stack_depot_fetch+0x10/0x30 +[ 52.971083][ T1395] Code: ff e9 f9 fe ff ff 48 89 df e8 9c 1d 91 ff e9 ca fe ff ff cc cc cc cc cc cc cc 89 f8 0 +[ 53.003650][ T1395] RSP: 0018:ffff8880c774bb60 EFLAGS: 00010006 +[ 53.043183][ T1395] RAX: 00000000001f8880 RBX: ffff8880c774d140 RCX: 0000000000000000 +[ 53.076480][ T1395] RDX: 000000000000001d RSI: ffff8880c774bb68 RDI: 0000000000003ff0 +[ 53.093858][ T1395] RBP: ffffea00031dd200 R08: ffffed101b43e403 R09: ffffed101b43e403 +[ 53.112795][ T1395] R10: 0000000000000001 R11: ffffed101b43e402 R12: ffff8880d900e5c0 +[ 53.139837][ T1395] R13: ffff8880c774c000 R14: 0000000000000000 R15: dffffc0000000000 +[ 53.141500][ T1395] FS: 00007fe867f6e0c0(0000) GS:ffff8880da000000(0000) knlGS:0000000000000000 +[ 53.143343][ T1395] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 53.152007][ T1395] CR2: ffffffffb999e260 CR3: 00000000c26aa005 CR4: 00000000000606e0 +[ 53.156459][ T1395] Kernel panic - not syncing: Fatal exception +[ 54.213570][ T1395] Shutting down cpus with NMI +[ 54.354112][ T1395] Kernel Offset: 0x33000000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0x) +[ 54.355687][ T1395] Rebooting in 5 seconds.. + +Fixes: b37f78f234bf ("net: qualcomm: rmnet: Fix crash on real dev unregistration") +Signed-off-by: Taehee Yoo +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + .../ethernet/qualcomm/rmnet/rmnet_config.c | 35 +++++++++---------- + 1 file changed, 16 insertions(+), 19 deletions(-) + +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +index 3c0e6d24d0834..e3fbf2331b965 100644 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +@@ -61,9 +61,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, + + kfree(port); + +- /* release reference on real_dev */ +- dev_put(real_dev); +- + netdev_dbg(real_dev, "Removed from rmnet\n"); + return 0; + } +@@ -89,9 +86,6 @@ static int rmnet_register_real_device(struct net_device *real_dev) + return -EBUSY; + } + +- /* hold on to real dev for MAP data */ +- dev_hold(real_dev); +- + for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) + INIT_HLIST_HEAD(&port->muxed_ep[entry]); + +@@ -162,6 +156,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, + if (err) + goto err1; + ++ err = netdev_upper_dev_link(real_dev, dev, extack); ++ if (err < 0) ++ goto err2; ++ + port->rmnet_mode = mode; + + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); +@@ -178,6 +176,8 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, + + return 0; + ++err2: ++ unregister_netdevice(dev); + err1: + rmnet_unregister_real_device(real_dev, port); + err0: +@@ -209,33 +209,30 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) + rmnet_vnd_dellink(mux_id, port, ep); + kfree(ep); + } ++ netdev_upper_dev_unlink(real_dev, dev); + rmnet_unregister_real_device(real_dev, port); + + unregister_netdevice_queue(dev, head); + } + +-static void rmnet_force_unassociate_device(struct net_device *dev) ++static void rmnet_force_unassociate_device(struct net_device *real_dev) + { +- struct net_device *real_dev = dev; + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + unsigned long bkt_ep; + LIST_HEAD(list); + +- if (!rmnet_is_real_dev_registered(real_dev)) +- return; +- + ASSERT_RTNL(); + +- port = rmnet_get_port_rtnl(dev); ++ port = rmnet_get_port_rtnl(real_dev); + +- rmnet_unregister_bridge(dev, port); ++ rmnet_unregister_bridge(real_dev, port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { ++ netdev_upper_dev_unlink(real_dev, ep->egress_dev); + unregister_netdevice_queue(ep->egress_dev, &list); + rmnet_vnd_dellink(ep->mux_id, port, ep); +- + hlist_del_init_rcu(&ep->hlnode); + kfree(ep); + } +@@ -248,15 +245,15 @@ static void rmnet_force_unassociate_device(struct net_device *dev) + static int rmnet_config_notify_cb(struct notifier_block *nb, + unsigned long event, void *data) + { +- struct net_device *dev = netdev_notifier_info_to_dev(data); ++ struct net_device *real_dev = netdev_notifier_info_to_dev(data); + +- if (!dev) ++ if (!rmnet_is_real_dev_registered(real_dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: +- netdev_dbg(dev, "Kernel unregister\n"); +- rmnet_force_unassociate_device(dev); ++ netdev_dbg(real_dev, "Kernel unregister\n"); ++ rmnet_force_unassociate_device(real_dev); + break; + + default: +@@ -477,8 +474,8 @@ static int __init rmnet_init(void) + + static void __exit rmnet_exit(void) + { +- unregister_netdevice_notifier(&rmnet_dev_notifier); + rtnl_link_unregister(&rmnet_link_ops); ++ unregister_netdevice_notifier(&rmnet_dev_notifier); + } + + module_init(rmnet_init) +-- +2.20.1 + diff --git a/queue-5.5/net-usb-qmi_wwan-restore-mtu-min-max-values-after-ra.patch b/queue-5.5/net-usb-qmi_wwan-restore-mtu-min-max-values-after-ra.patch new file mode 100644 index 00000000000..30b755f0f3c --- /dev/null +++ b/queue-5.5/net-usb-qmi_wwan-restore-mtu-min-max-values-after-ra.patch @@ -0,0 +1,51 @@ +From 2fcb6b1b5992c4a2cd882726bb5f81d06f1aa342 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 21 Feb 2020 14:17:05 +0100 +Subject: net: usb: qmi_wwan: restore mtu min/max values after raw_ip switch +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Daniele Palmas + +[ Upstream commit eae7172f8141eb98e64e6e81acc9e9d5b2add127 ] + +usbnet creates network interfaces with min_mtu = 0 and +max_mtu = ETH_MAX_MTU. + +These values are not modified by qmi_wwan when the network interface +is created initially, allowing, for example, to set mtu greater than 1500. + +When a raw_ip switch is done (raw_ip set to 'Y', then set to 'N') the mtu +values for the network interface are set through ether_setup, with +min_mtu = ETH_MIN_MTU and max_mtu = ETH_DATA_LEN, not allowing anymore to +set mtu greater than 1500 (error: mtu greater than device maximum). + +The patch restores the original min/max mtu values set by usbnet after a +raw_ip switch. + +Signed-off-by: Daniele Palmas +Acked-by: Bjørn Mork +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/usb/qmi_wwan.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 3b7a3b8a5e067..5754bb6ca0eec 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -337,6 +337,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net) + netdev_dbg(net, "mode: raw IP\n"); + } else if (!net->header_ops) { /* don't bother if already set */ + ether_setup(net); ++ /* Restoring min/max mtu values set originally by usbnet */ ++ net->min_mtu = 0; ++ net->max_mtu = ETH_MAX_MTU; + clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); + netdev_dbg(net, "mode: Ethernet\n"); + } +-- +2.20.1 + diff --git a/queue-5.5/riscv-set-pmp-configuration-if-kernel-is-running-in-.patch b/queue-5.5/riscv-set-pmp-configuration-if-kernel-is-running-in-.patch new file mode 100644 index 00000000000..7d2e8a5b509 --- /dev/null +++ b/queue-5.5/riscv-set-pmp-configuration-if-kernel-is-running-in-.patch @@ -0,0 +1,75 @@ +From ce4feedb4b2a36d47cbacc70a7c1432f9d36deea Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 9 Jan 2020 11:17:40 +0800 +Subject: riscv: set pmp configuration if kernel is running in M-mode + +From: Greentime Hu + +[ Upstream commit c68a9032299e837b56d356de9250c93094f7e0e3 ] + +When the kernel is running in S-mode, the expectation is that the +bootloader or SBI layer will configure the PMP to allow the kernel to +access physical memory. But, when the kernel is running in M-mode and is +started with the ELF "loader", there's probably no bootloader or SBI layer +involved to configure the PMP. Thus, we need to configure the PMP +ourselves to enable the kernel to access all regions. + +Signed-off-by: Greentime Hu +Reviewed-by: Palmer Dabbelt +Signed-off-by: Palmer Dabbelt +Signed-off-by: Sasha Levin +--- + arch/riscv/include/asm/csr.h | 12 ++++++++++++ + arch/riscv/kernel/head.S | 6 ++++++ + 2 files changed, 18 insertions(+) + +diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h +index 435b65532e294..8e18d2c64399d 100644 +--- a/arch/riscv/include/asm/csr.h ++++ b/arch/riscv/include/asm/csr.h +@@ -72,6 +72,16 @@ + #define EXC_LOAD_PAGE_FAULT 13 + #define EXC_STORE_PAGE_FAULT 15 + ++/* PMP configuration */ ++#define PMP_R 0x01 ++#define PMP_W 0x02 ++#define PMP_X 0x04 ++#define PMP_A 0x18 ++#define PMP_A_TOR 0x08 ++#define PMP_A_NA4 0x10 ++#define PMP_A_NAPOT 0x18 ++#define PMP_L 0x80 ++ + /* symbolic CSR names: */ + #define CSR_CYCLE 0xc00 + #define CSR_TIME 0xc01 +@@ -100,6 +110,8 @@ + #define CSR_MCAUSE 0x342 + #define CSR_MTVAL 0x343 + #define CSR_MIP 0x344 ++#define CSR_PMPCFG0 0x3a0 ++#define CSR_PMPADDR0 0x3b0 + #define CSR_MHARTID 0xf14 + + #ifdef CONFIG_RISCV_M_MODE +diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S +index a4242be66966b..e4d9baf973232 100644 +--- a/arch/riscv/kernel/head.S ++++ b/arch/riscv/kernel/head.S +@@ -58,6 +58,12 @@ _start_kernel: + /* Reset all registers except ra, a0, a1 */ + call reset_regs + ++ /* Setup a PMP to permit access to all of memory. */ ++ li a0, -1 ++ csrw CSR_PMPADDR0, a0 ++ li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) ++ csrw CSR_PMPCFG0, a0 ++ + /* + * The hartid in a0 is expected later on, and we have no firmware + * to hand it to us. +-- +2.20.1 + diff --git a/queue-5.5/scsi-libfc-free-response-frame-from-gpn_id.patch b/queue-5.5/scsi-libfc-free-response-frame-from-gpn_id.patch new file mode 100644 index 00000000000..84fb670c3d9 --- /dev/null +++ b/queue-5.5/scsi-libfc-free-response-frame-from-gpn_id.patch @@ -0,0 +1,37 @@ +From f29bce26f7210f10152dbd4b48d623d452f35236 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Jan 2020 14:43:19 +0000 +Subject: scsi: libfc: free response frame from GPN_ID + +From: Igor Druzhinin + +[ Upstream commit ff6993bb79b9f99bdac0b5378169052931b65432 ] + +fc_disc_gpn_id_resp() should be the last function using it so free it here +to avoid memory leak. + +Link: https://lore.kernel.org/r/1579013000-14570-2-git-send-email-igor.druzhinin@citrix.com +Reviewed-by: Hannes Reinecke +Signed-off-by: Igor Druzhinin +Signed-off-by: Martin K. Petersen +Signed-off-by: Sasha Levin +--- + drivers/scsi/libfc/fc_disc.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c +index 9c5f7c9178c66..2b865c6423e29 100644 +--- a/drivers/scsi/libfc/fc_disc.c ++++ b/drivers/scsi/libfc/fc_disc.c +@@ -628,6 +628,8 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + } + out: + kref_put(&rdata->kref, fc_rport_destroy); ++ if (!IS_ERR(fp)) ++ fc_frame_free(fp); + } + + /** +-- +2.20.1 + diff --git a/queue-5.5/selftests-rseq-fix-out-of-tree-compilation.patch b/queue-5.5/selftests-rseq-fix-out-of-tree-compilation.patch new file mode 100644 index 00000000000..1c9f4eb2ba7 --- /dev/null +++ b/queue-5.5/selftests-rseq-fix-out-of-tree-compilation.patch @@ -0,0 +1,48 @@ +From 5188655c8d94699248540262f0b2e8f30de03b6c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 20 Feb 2020 22:37:48 +1100 +Subject: selftests/rseq: Fix out-of-tree compilation + +From: Michael Ellerman + +[ Upstream commit ef89d0545132d685f73da6f58b7e7fe002536f91 ] + +Currently if you build with O=... the rseq tests don't build: + + $ make O=$PWD/output -C tools/testing/selftests/ TARGETS=rseq + make: Entering directory '/linux/tools/testing/selftests' + ... + make[1]: Entering directory '/linux/tools/testing/selftests/rseq' + gcc -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ -shared -fPIC rseq.c -lpthread -o /linux/output/rseq/librseq.so + gcc -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ basic_test.c -lpthread -lrseq -o /linux/output/rseq/basic_test + /usr/bin/ld: cannot find -lrseq + collect2: error: ld returned 1 exit status + +This is because the library search path points to the source +directory, not the output. + +We can fix it by changing the library search path to $(OUTPUT). + +Signed-off-by: Michael Ellerman +Signed-off-by: Shuah Khan +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/rseq/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile +index f1053630bb6f5..2af9d39a97168 100644 +--- a/tools/testing/selftests/rseq/Makefile ++++ b/tools/testing/selftests/rseq/Makefile +@@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) + CLANG_FLAGS += -no-integrated-as + endif + +-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ \ ++CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ + $(CLANG_FLAGS) + LDLIBS += -lpthread + +-- +2.20.1 + diff --git a/queue-5.5/series b/queue-5.5/series index 460c886460a..ed2fc8c5db6 100644 --- a/queue-5.5/series +++ b/queue-5.5/series @@ -6,3 +6,52 @@ mmc-sdhci-tegra-fix-busy-detection-by-enabling-mmc_c.patch mmc-sdhci-omap-fix-busy-detection-by-enabling-mmc_ca.patch mmc-core-respect-mmc_cap_need_rsp_busy-for-emmc-slee.patch mmc-core-respect-mmc_cap_need_rsp_busy-for-erase-tri.patch +acpi-watchdog-allow-disabling-wdat-at-boot.patch +hid-apple-add-support-for-recent-firmware-on-magic-k.patch +acpi-watchdog-set-default-timeout-in-probe.patch +hid-i2c-hid-add-trekstor-surfbook-e11b-to-descriptor.patch +mips-vdso-fix-jalr-t9-crash-in-vdso-code.patch +mips-disable-vdso-time-functionality-on-micromips.patch +mips-vdso-add-build-time-check-that-no-jalr-t9-calls.patch +hid-hid-bigbenff-fix-general-protection-fault-caused.patch +hid-hid-bigbenff-call-hid_hw_stop-in-case-of-error.patch +hid-hid-bigbenff-fix-race-condition-for-scheduled-wo.patch +riscv-set-pmp-configuration-if-kernel-is-running-in-.patch +mips-vdso-wrap-mexplicit-relocs-in-cc-option.patch +kunit-run-kunit_tool-from-any-directory.patch +selftests-rseq-fix-out-of-tree-compilation.patch +tracing-fix-number-printing-bug-in-print_synth_event.patch +cfg80211-check-reg_rule-for-null-in-handle_channel_c.patch +scsi-libfc-free-response-frame-from-gpn_id.patch +net-usb-qmi_wwan-restore-mtu-min-max-values-after-ra.patch +net-ks8851-ml-fix-irq-handling-and-locking.patch +mac80211-rx-avoid-rcu-list-traversal-under-mutex.patch +net-ll_temac-fix-race-condition-causing-tx-hang.patch +net-ll_temac-add-more-error-handling-of-dma_map_sing.patch +net-ll_temac-fix-rx-buffer-descriptor-handling-on-gf.patch +net-ll_temac-handle-dma-halt-condition-caused-by-buf.patch +blk-mq-insert-passthrough-request-into-hctx-dispatch.patch +io_uring-fix-poll_list-race-for-setup_iopoll-setup_s.patch +drm-amdgpu-fix-memory-leak-during-tdr-test-v2.patch +io_uring-pick-up-link-work-on-submit-reference-drop.patch +kbuild-add-dtbs_check-to-phony.patch +kbuild-add-dt_binding_check-to-phony-in-a-correct-pl.patch +signal-avoid-double-atomic-counter-increments-for-us.patch +net-bcmgenet-clear-id_mode_dis-in-ext_rgmii_oob_ctrl.patch +slip-not-call-free_netdev-before-rtnl_unlock-in-slip.patch +net-phy-mscc-fix-firmware-paths.patch +hinic-fix-a-irq-affinity-bug.patch +hinic-fix-a-bug-of-setting-hw_ioctxt.patch +hinic-fix-a-bug-of-rss-configuration.patch +net-rmnet-fix-null-pointer-dereference-in-rmnet_newl.patch +net-rmnet-fix-null-pointer-dereference-in-rmnet_chan.patch +net-rmnet-fix-suspicious-rcu-usage.patch +net-rmnet-remove-rcu_read_lock-in-rmnet_force_unasso.patch +net-rmnet-do-not-allow-to-change-mux-id-if-mux-id-is.patch +net-rmnet-use-upper-lower-device-infrastructure.patch +net-rmnet-fix-bridge-mode-bugs.patch +net-rmnet-fix-packet-forwarding-in-rmnet-bridge-mode.patch +sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch +mlxsw-pci-wait-longer-before-accessing-the-device-af.patch +net-dsa-mv88e6xxx-fix-masking-of-egress-port.patch +jbd2-fix-data-races-at-struct-journal_head.patch diff --git a/queue-5.5/sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch b/queue-5.5/sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch new file mode 100644 index 00000000000..3c9d1100b65 --- /dev/null +++ b/queue-5.5/sfc-fix-timestamp-reconstruction-at-16-bit-rollover-.patch @@ -0,0 +1,81 @@ +From f0f037a3ac86c8508e83670cb3f78de5b00c1178 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Feb 2020 17:33:19 +0000 +Subject: sfc: fix timestamp reconstruction at 16-bit rollover points + +From: Alex Maftei (amaftei) + +[ Upstream commit 23797b98909f34b75fd130369bde86f760db69d0 ] + +We can't just use the top bits of the last sync event as they could be +off-by-one every 65,536 seconds, giving an error in reconstruction of +65,536 seconds. + +This patch uses the difference in the bottom 16 bits (mod 2^16) to +calculate an offset that needs to be applied to the last sync event to +get to the current time. + +Signed-off-by: Alexandru-Mihai Maftei +Acked-by: Martin Habets +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/sfc/ptp.c | 38 +++++++++++++++++++++++++++++++--- + 1 file changed, 35 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c +index af15a737c6756..59b4f16896a81 100644 +--- a/drivers/net/ethernet/sfc/ptp.c ++++ b/drivers/net/ethernet/sfc/ptp.c +@@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, + u32 nic_major, u32 nic_minor, + s32 correction) + { ++ u32 sync_timestamp; + ktime_t kt = { 0 }; ++ s16 delta; + + if (!(nic_major & 0x80000000)) { + WARN_ON_ONCE(nic_major >> 16); +- /* Use the top bits from the latest sync event. */ +- nic_major &= 0xffff; +- nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000); ++ ++ /* Medford provides 48 bits of timestamp, so we must get the top ++ * 16 bits from the timesync event state. ++ * ++ * We only have the lower 16 bits of the time now, but we do ++ * have a full resolution timestamp at some point in past. As ++ * long as the difference between the (real) now and the sync ++ * is less than 2^15, then we can reconstruct the difference ++ * between those two numbers using only the lower 16 bits of ++ * each. ++ * ++ * Put another way ++ * ++ * a - b = ((a mod k) - b) mod k ++ * ++ * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know ++ * (a mod k) and b, so can calculate the delta, a - b. ++ * ++ */ ++ sync_timestamp = last_sync_timestamp_major(efx); ++ ++ /* Because delta is s16 this does an implicit mask down to ++ * 16 bits which is what we need, assuming ++ * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that ++ * we can deal with the (unlikely) case of sync timestamps ++ * arriving from the future. ++ */ ++ delta = nic_major - sync_timestamp; ++ ++ /* Recover the fully specified time now, by applying the offset ++ * to the (fully specified) sync time. ++ */ ++ nic_major = sync_timestamp + delta; + + kt = ptp->nic_to_kernel_time(nic_major, nic_minor, + correction); +-- +2.20.1 + diff --git a/queue-5.5/signal-avoid-double-atomic-counter-increments-for-us.patch b/queue-5.5/signal-avoid-double-atomic-counter-increments-for-us.patch new file mode 100644 index 00000000000..9d66690bc8c --- /dev/null +++ b/queue-5.5/signal-avoid-double-atomic-counter-increments-for-us.patch @@ -0,0 +1,125 @@ +From e033d215e0c6457b3ee67e1b3385baae887f1224 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 24 Feb 2020 12:47:14 -0800 +Subject: signal: avoid double atomic counter increments for user accounting + +From: Linus Torvalds + +[ Upstream commit fda31c50292a5062332fa0343c084bd9f46604d9 ] + +When queueing a signal, we increment both the users count of pending +signals (for RLIMIT_SIGPENDING tracking) and we increment the refcount +of the user struct itself (because we keep a reference to the user in +the signal structure in order to correctly account for it when freeing). + +That turns out to be fairly expensive, because both of them are atomic +updates, and particularly under extreme signal handling pressure on big +machines, you can get a lot of cache contention on the user struct. +That can then cause horrid cacheline ping-pong when you do these +multiple accesses. + +So change the reference counting to only pin the user for the _first_ +pending signal, and to unpin it when the last pending signal is +dequeued. That means that when a user sees a lot of concurrent signal +queuing - which is the only situation when this matters - the only +atomic access needed is generally the 'sigpending' count update. + +This was noticed because of a particularly odd timing artifact on a +dual-socket 96C/192T Cascade Lake platform: when you get into bad +contention, on that machine for some reason seems to be much worse when +the contention happens in the upper 32-byte half of the cacheline. + +As a result, the kernel test robot will-it-scale 'signal1' benchmark had +an odd performance regression simply due to random alignment of the +'struct user_struct' (and pointed to a completely unrelated and +apparently nonsensical commit for the regression). + +Avoiding the double increments (and decrements on the dequeueing side, +of course) makes for much less contention and hugely improved +performance on that will-it-scale microbenchmark. + +Quoting Feng Tang: + + "It makes a big difference, that the performance score is tripled! bump + from original 17000 to 54000. Also the gap between 5.0-rc6 and + 5.0-rc6+Jiri's patch is reduced to around 2%" + +[ The "2% gap" is the odd cacheline placement difference on that + platform: under the extreme contention case, the effect of which half + of the cacheline was hot was 5%, so with the reduced contention the + odd timing artifact is reduced too ] + +It does help in the non-contended case too, but is not nearly as +noticeable. + +Reported-and-tested-by: Feng Tang +Cc: Eric W. Biederman +Cc: Huang, Ying +Cc: Philip Li +Cc: Andi Kleen +Cc: Jiri Olsa +Cc: Peter Zijlstra +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + kernel/signal.c | 23 ++++++++++++++--------- + 1 file changed, 14 insertions(+), 9 deletions(-) + +diff --git a/kernel/signal.c b/kernel/signal.c +index bcd46f547db39..eea748174ade9 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -413,27 +413,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi + { + struct sigqueue *q = NULL; + struct user_struct *user; ++ int sigpending; + + /* + * Protect access to @t credentials. This can go away when all + * callers hold rcu read lock. ++ * ++ * NOTE! A pending signal will hold on to the user refcount, ++ * and we get/put the refcount only when the sigpending count ++ * changes from/to zero. + */ + rcu_read_lock(); +- user = get_uid(__task_cred(t)->user); +- atomic_inc(&user->sigpending); ++ user = __task_cred(t)->user; ++ sigpending = atomic_inc_return(&user->sigpending); ++ if (sigpending == 1) ++ get_uid(user); + rcu_read_unlock(); + +- if (override_rlimit || +- atomic_read(&user->sigpending) <= +- task_rlimit(t, RLIMIT_SIGPENDING)) { ++ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { + q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } + + if (unlikely(q == NULL)) { +- atomic_dec(&user->sigpending); +- free_uid(user); ++ if (atomic_dec_and_test(&user->sigpending)) ++ free_uid(user); + } else { + INIT_LIST_HEAD(&q->list); + q->flags = 0; +@@ -447,8 +452,8 @@ static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) + return; +- atomic_dec(&q->user->sigpending); +- free_uid(q->user); ++ if (atomic_dec_and_test(&q->user->sigpending)) ++ free_uid(q->user); + kmem_cache_free(sigqueue_cachep, q); + } + +-- +2.20.1 + diff --git a/queue-5.5/slip-not-call-free_netdev-before-rtnl_unlock-in-slip.patch b/queue-5.5/slip-not-call-free_netdev-before-rtnl_unlock-in-slip.patch new file mode 100644 index 00000000000..2e6101217b5 --- /dev/null +++ b/queue-5.5/slip-not-call-free_netdev-before-rtnl_unlock-in-slip.patch @@ -0,0 +1,38 @@ +From 77a4cce38c31b40112e951cb3821043f1c61b9ba Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 26 Feb 2020 11:54:35 +0800 +Subject: slip: not call free_netdev before rtnl_unlock in slip_open + +From: yangerkun + +[ Upstream commit f596c87005f7b1baeb7d62d9a9e25d68c3dfae10 ] + +As the description before netdev_run_todo, we cannot call free_netdev +before rtnl_unlock, fix it by reorder the code. + +Signed-off-by: yangerkun +Reviewed-by: Oliver Hartkopp +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/slip/slip.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c +index 61d7e0d1d77db..8e56a41dd7585 100644 +--- a/drivers/net/slip/slip.c ++++ b/drivers/net/slip/slip.c +@@ -863,7 +863,10 @@ static int slip_open(struct tty_struct *tty) + tty->disc_data = NULL; + clear_bit(SLF_INUSE, &sl->flags); + sl_free_netdev(sl->dev); ++ /* do not call free_netdev before rtnl_unlock */ ++ rtnl_unlock(); + free_netdev(sl->dev); ++ return err; + + err_exit: + rtnl_unlock(); +-- +2.20.1 + diff --git a/queue-5.5/tracing-fix-number-printing-bug-in-print_synth_event.patch b/queue-5.5/tracing-fix-number-printing-bug-in-print_synth_event.patch new file mode 100644 index 00000000000..cdaf5dd2b82 --- /dev/null +++ b/queue-5.5/tracing-fix-number-printing-bug-in-print_synth_event.patch @@ -0,0 +1,91 @@ +From ab5832a762f2b5fecdf1803320d9392cfb143cb6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 14 Feb 2020 16:56:41 -0600 +Subject: tracing: Fix number printing bug in print_synth_event() + +From: Tom Zanussi + +[ Upstream commit 784bd0847eda032ed2f3522f87250655a18c0190 ] + +Fix a varargs-related bug in print_synth_event() which resulted in +strange output and oopses on 32-bit x86 systems. The problem is that +trace_seq_printf() expects the varargs to match the format string, but +print_synth_event() was always passing u64 values regardless. This +results in unspecified behavior when unpacking with va_arg() in +trace_seq_printf(). + +Add a function that takes the size into account when calling +trace_seq_printf(). + +Before: + + modprobe-1731 [003] .... 919.039758: gen_synth_test: next_pid_field=777(null)next_comm_field=hula hoops ts_ns=1000000 ts_ms=1000 cpu=3(null)my_string_field=thneed my_int_field=598(null) + +After: + + insmod-1136 [001] .... 36.634590: gen_synth_test: next_pid_field=777 next_comm_field=hula hoops ts_ns=1000000 ts_ms=1000 cpu=1 my_string_field=thneed my_int_field=598 + +Link: http://lkml.kernel.org/r/a9b59eb515dbbd7d4abe53b347dccf7a8e285657.1581720155.git.zanussi@kernel.org + +Reported-by: Steven Rostedt (VMware) +Signed-off-by: Tom Zanussi +Signed-off-by: Steven Rostedt (VMware) +Signed-off-by: Sasha Levin +--- + kernel/trace/trace_events_hist.c | 32 +++++++++++++++++++++++++++++--- + 1 file changed, 29 insertions(+), 3 deletions(-) + +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index e10585ef00e15..862fb6d16edb8 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -811,6 +811,29 @@ static const char *synth_field_fmt(char *type) + return fmt; + } + ++static void print_synth_event_num_val(struct trace_seq *s, ++ char *print_fmt, char *name, ++ int size, u64 val, char *space) ++{ ++ switch (size) { ++ case 1: ++ trace_seq_printf(s, print_fmt, name, (u8)val, space); ++ break; ++ ++ case 2: ++ trace_seq_printf(s, print_fmt, name, (u16)val, space); ++ break; ++ ++ case 4: ++ trace_seq_printf(s, print_fmt, name, (u32)val, space); ++ break; ++ ++ default: ++ trace_seq_printf(s, print_fmt, name, val, space); ++ break; ++ } ++} ++ + static enum print_line_t print_synth_event(struct trace_iterator *iter, + int flags, + struct trace_event *event) +@@ -849,10 +872,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, + } else { + struct trace_print_flags __flags[] = { + __def_gfpflag_names, {-1, NULL} }; ++ char *space = (i == se->n_fields - 1 ? "" : " "); + +- trace_seq_printf(s, print_fmt, se->fields[i]->name, +- entry->fields[n_u64], +- i == se->n_fields - 1 ? "" : " "); ++ print_synth_event_num_val(s, print_fmt, ++ se->fields[i]->name, ++ se->fields[i]->size, ++ entry->fields[n_u64], ++ space); + + if (strcmp(se->fields[i]->type, "gfp_t") == 0) { + trace_seq_puts(s, " ("); +-- +2.20.1 + -- 2.47.3