From: Greg Kroah-Hartman Date: Wed, 8 Sep 2021 14:35:39 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v5.4.145~22 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=925fac2594fb958fe2d2db0186119014dcdd484a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: revert-r8169-avoid-link-up-interrupt-issue-on-rtl8106e-if-user-enables-aspm.patch x86-events-amd-iommu-fix-invalid-perf-result-due-to-iommu-pmc-power-gating.patch --- diff --git a/queue-5.4/revert-r8169-avoid-link-up-interrupt-issue-on-rtl8106e-if-user-enables-aspm.patch b/queue-5.4/revert-r8169-avoid-link-up-interrupt-issue-on-rtl8106e-if-user-enables-aspm.patch new file mode 100644 index 00000000000..cb697358f9f --- /dev/null +++ b/queue-5.4/revert-r8169-avoid-link-up-interrupt-issue-on-rtl8106e-if-user-enables-aspm.patch @@ -0,0 +1,30 @@ +From 2115d3d482656ea702f7cf308c0ded3500282903 Mon Sep 17 00:00:00 2001 +From: Hayes Wang +Date: Fri, 6 Aug 2021 17:15:55 +0800 +Subject: Revert "r8169: avoid link-up interrupt issue on RTL8106e if user enables ASPM" + +From: Hayes Wang + +commit 2115d3d482656ea702f7cf308c0ded3500282903 upstream. + +This reverts commit 1ee8856de82faec9bc8bd0f2308a7f27e30ba207. + +This is used to re-enable ASPM on RTL8106e, if it is possible. + +Signed-off-by: Hayes Wang +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/realtek/r8169_main.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -4713,6 +4713,7 @@ static void rtl_hw_start_8168g(struct rt + rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); ++ rtl_hw_aspm_clkreq_enable(tp, true); + } + + static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) diff --git a/queue-5.4/series b/queue-5.4/series index d504f831572..63f1c4d6b54 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -25,3 +25,5 @@ ipv4-icmp-l3mdev-perform-icmp-error-route-lookup-on-source-device-routing-table- powerpc-boot-delete-unneeded-.globl-_zimage_start.patch net-ll_temac-remove-left-over-debug-message.patch mm-page_alloc-speed-up-the-iteration-of-max_order.patch +revert-r8169-avoid-link-up-interrupt-issue-on-rtl8106e-if-user-enables-aspm.patch +x86-events-amd-iommu-fix-invalid-perf-result-due-to-iommu-pmc-power-gating.patch diff --git a/queue-5.4/x86-events-amd-iommu-fix-invalid-perf-result-due-to-iommu-pmc-power-gating.patch b/queue-5.4/x86-events-amd-iommu-fix-invalid-perf-result-due-to-iommu-pmc-power-gating.patch new file mode 100644 index 00000000000..056e9c71f1f --- /dev/null +++ b/queue-5.4/x86-events-amd-iommu-fix-invalid-perf-result-due-to-iommu-pmc-power-gating.patch @@ -0,0 +1,135 @@ +From e10de314287c2c14b0e6f0e3e961975ce2f4a83d Mon Sep 17 00:00:00 2001 +From: Suravee Suthikulpanit +Date: Tue, 4 May 2021 01:52:36 -0500 +Subject: x86/events/amd/iommu: Fix invalid Perf result due to IOMMU PMC power-gating + +From: Suravee Suthikulpanit + +commit e10de314287c2c14b0e6f0e3e961975ce2f4a83d upstream. + +On certain AMD platforms, when the IOMMU performance counter source +(csource) field is zero, power-gating for the counter is enabled, which +prevents write access and returns zero for read access. + +This can cause invalid perf result especially when event multiplexing +is needed (i.e. more number of events than available counters) since +the current logic keeps track of the previously read counter value, +and subsequently re-program the counter to continue counting the event. +With power-gating enabled, we cannot gurantee successful re-programming +of the counter. + +Workaround this issue by : + +1. Modifying the ordering of setting/reading counters and enabing/ + disabling csources to only access the counter when the csource + is set to non-zero. + +2. Since AMD IOMMU PMU does not support interrupt mode, the logic + can be simplified to always start counting with value zero, + and accumulate the counter value when stopping without the need + to keep track and reprogram the counter with the previously read + counter value. + +This has been tested on systems with and without power-gating. + +Fixes: 994d6608efe4 ("iommu/amd: Remove performance counter pre-initialization test") +Suggested-by: Alexander Monakov +Signed-off-by: Suravee Suthikulpanit +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/20210504065236.4415-1-suravee.suthikulpanit@amd.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/events/amd/iommu.c | 47 ++++++++++++++++++++++++-------------------- + 1 file changed, 26 insertions(+), 21 deletions(-) + +--- a/arch/x86/events/amd/iommu.c ++++ b/arch/x86/events/amd/iommu.c +@@ -18,8 +18,6 @@ + #include "../perf_event.h" + #include "iommu.h" + +-#define COUNTER_SHIFT 16 +- + /* iommu pmu conf masks */ + #define GET_CSOURCE(x) ((x)->conf & 0xFFULL) + #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL) +@@ -285,22 +283,31 @@ static void perf_iommu_start(struct perf + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + ++ /* ++ * To account for power-gating, which prevents write to ++ * the counter, we need to enable the counter ++ * before setting up counter register. ++ */ ++ perf_iommu_enable_event(event); ++ + if (flags & PERF_EF_RELOAD) { +- u64 prev_raw_count = local64_read(&hwc->prev_count); ++ u64 count = 0; + struct amd_iommu *iommu = perf_event_2_iommu(event); + ++ /* ++ * Since the IOMMU PMU only support counting mode, ++ * the counter always start with value zero. ++ */ + amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, +- IOMMU_PC_COUNTER_REG, &prev_raw_count); ++ IOMMU_PC_COUNTER_REG, &count); + } + +- perf_iommu_enable_event(event); + perf_event_update_userpage(event); +- + } + + static void perf_iommu_read(struct perf_event *event) + { +- u64 count, prev, delta; ++ u64 count; + struct hw_perf_event *hwc = &event->hw; + struct amd_iommu *iommu = perf_event_2_iommu(event); + +@@ -311,14 +318,11 @@ static void perf_iommu_read(struct perf_ + /* IOMMU pc counter register is only 48 bits */ + count &= GENMASK_ULL(47, 0); + +- prev = local64_read(&hwc->prev_count); +- if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev) +- return; +- +- /* Handle 48-bit counter overflow */ +- delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); +- delta >>= COUNTER_SHIFT; +- local64_add(delta, &event->count); ++ /* ++ * Since the counter always start with value zero, ++ * simply just accumulate the count for the event. ++ */ ++ local64_add(count, &event->count); + } + + static void perf_iommu_stop(struct perf_event *event, int flags) +@@ -328,15 +332,16 @@ static void perf_iommu_stop(struct perf_ + if (hwc->state & PERF_HES_UPTODATE) + return; + ++ /* ++ * To account for power-gating, in which reading the counter would ++ * return zero, we need to read the register before disabling. ++ */ ++ perf_iommu_read(event); ++ hwc->state |= PERF_HES_UPTODATE; ++ + perf_iommu_disable_event(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; +- +- if (hwc->state & PERF_HES_UPTODATE) +- return; +- +- perf_iommu_read(event); +- hwc->state |= PERF_HES_UPTODATE; + } + + static int perf_iommu_add(struct perf_event *event, int flags)