--- /dev/null
+From 9c9c6d0ab08acfe41c9f7efa72c4ad3f133a266b Mon Sep 17 00:00:00 2001
+From: Matt Roper <matthew.d.roper@intel.com>
+Date: Wed, 28 Jul 2021 16:34:11 -0700
+Subject: drm/i915: Correct SFC_DONE register offset
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+commit 9c9c6d0ab08acfe41c9f7efa72c4ad3f133a266b upstream.
+
+The register offset for SFC_DONE was missing a '0' at the end, causing
+us to read from a non-existent register address. We only use this
+register in error state dumps so the mistake hasn't caused any real
+problems, but fixing it will hopefully make the error state dumps a bit
+more useful for debugging.
+
+Fixes: e50dbdbfd9fb ("drm/i915/tgl: Add SFC instdone to error state")
+Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210728233411.2365788-1-matthew.d.roper@intel.com
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+(cherry picked from commit 82929a2140eb99f1f1d21855f3f580e70d7abdd8)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_reg.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -413,7 +413,7 @@ static inline bool i915_mmio_reg_valid(i
+ #define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
+ #define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+
+-#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
++#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+ #define GEN12_SFC_DONE_MAX 4
+
+ #define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
--- /dev/null
+From 73606ba9242f8e32023699b500b7922b4cf2993c Mon Sep 17 00:00:00 2001
+From: Mike Tipton <mdtipton@codeaurora.org>
+Date: Wed, 21 Jul 2021 10:54:30 -0700
+Subject: interconnect: Always call pre_aggregate before aggregate
+
+From: Mike Tipton <mdtipton@codeaurora.org>
+
+commit 73606ba9242f8e32023699b500b7922b4cf2993c upstream.
+
+The pre_aggregate callback isn't called in all cases before calling
+aggregate. Add the missing calls so providers can rely on consistent
+framework behavior.
+
+Fixes: d3703b3e255f ("interconnect: Aggregate before setting initial bandwidth")
+Signed-off-by: Mike Tipton <mdtipton@codeaurora.org>
+Link: https://lore.kernel.org/r/20210721175432.2119-3-mdtipton@codeaurora.org
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/interconnect/core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -973,9 +973,14 @@ void icc_node_add(struct icc_node *node,
+ }
+ node->avg_bw = node->init_avg;
+ node->peak_bw = node->init_peak;
++
++ if (provider->pre_aggregate)
++ provider->pre_aggregate(node);
++
+ if (provider->aggregate)
+ provider->aggregate(node, 0, node->init_avg, node->init_peak,
+ &node->avg_bw, &node->peak_bw);
++
+ provider->set(node, node);
+ node->avg_bw = 0;
+ node->peak_bw = 0;
--- /dev/null
+From 85b1ebfea2b0d8797266bcc6f04b6cc87e38290a Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Fri, 30 Jul 2021 08:54:08 +0100
+Subject: interconnect: Fix undersized devress_alloc allocation
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 85b1ebfea2b0d8797266bcc6f04b6cc87e38290a upstream.
+
+The expression sizeof(**ptr) for the void **ptr is just 1 rather than
+the size of a pointer. Fix this by using sizeof(*ptr).
+
+Addresses-Coverity: ("Wrong sizeof argument")
+Fixes: e145d9a184f2 ("interconnect: Add devm_of_icc_get() as exported API for users")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Link: https://lore.kernel.org/r/20210730075408.19945-1-colin.king@canonical.com
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/interconnect/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -403,7 +403,7 @@ struct icc_path *devm_of_icc_get(struct
+ {
+ struct icc_path **ptr, *path;
+
+- ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
++ ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
--- /dev/null
+From f84f5b6f72e68bbaeb850b58ac167e4a3a47532a Mon Sep 17 00:00:00 2001
+From: Mike Tipton <mdtipton@codeaurora.org>
+Date: Wed, 21 Jul 2021 10:54:32 -0700
+Subject: interconnect: qcom: icc-rpmh: Add BCMs to commit list in pre_aggregate
+
+From: Mike Tipton <mdtipton@codeaurora.org>
+
+commit f84f5b6f72e68bbaeb850b58ac167e4a3a47532a upstream.
+
+We're only adding BCMs to the commit list in aggregate(), but there are
+cases where pre_aggregate() is called without subsequently calling
+aggregate(). In particular, in icc_sync_state() when a node with initial
+BW has zero requests. Since BCMs aren't added to the commit list in
+these cases, we don't actually send the zero BW request to HW. So the
+resources remain on unnecessarily.
+
+Add BCMs to the commit list in pre_aggregate() instead, which is always
+called even when there are no requests.
+
+Fixes: 976daac4a1c5 ("interconnect: qcom: Consolidate interconnect RPMh support")
+Signed-off-by: Mike Tipton <mdtipton@codeaurora.org>
+Link: https://lore.kernel.org/r/20210721175432.2119-5-mdtipton@codeaurora.org
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/interconnect/qcom/icc-rpmh.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/interconnect/qcom/icc-rpmh.c
++++ b/drivers/interconnect/qcom/icc-rpmh.c
+@@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_n
+ {
+ size_t i;
+ struct qcom_icc_node *qn;
++ struct qcom_icc_provider *qp;
+
+ qn = node->data;
++ qp = to_qcom_provider(node->provider);
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ qn->sum_avg[i] = 0;
+ qn->max_peak[i] = 0;
+ }
++
++ for (i = 0; i < qn->num_bcms; i++)
++ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
+ }
+ EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
+
+@@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *
+ {
+ size_t i;
+ struct qcom_icc_node *qn;
+- struct qcom_icc_provider *qp;
+
+ qn = node->data;
+- qp = to_qcom_provider(node->provider);
+
+ if (!tag)
+ tag = QCOM_ICC_TAG_ALWAYS;
+@@ -67,9 +70,6 @@ int qcom_icc_aggregate(struct icc_node *
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+- for (i = 0; i < qn->num_bcms; i++)
+- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
--- /dev/null
+From ce5a595744126be4f1327e29e3c5ae9aac6b38d5 Mon Sep 17 00:00:00 2001
+From: Mike Tipton <mdtipton@codeaurora.org>
+Date: Wed, 21 Jul 2021 10:54:31 -0700
+Subject: interconnect: qcom: icc-rpmh: Ensure floor BW is enforced for all nodes
+
+From: Mike Tipton <mdtipton@codeaurora.org>
+
+commit ce5a595744126be4f1327e29e3c5ae9aac6b38d5 upstream.
+
+We currently only enforce BW floors for a subset of nodes in a path.
+All BCMs that need updating are queued in the pre_aggregate/aggregate
+phase. The first set() commits all queued BCMs and subsequent set()
+calls short-circuit without committing anything. Since the floor BW
+isn't set in sum_avg/max_peak until set(), then some BCMs are committed
+before their associated nodes reflect the floor.
+
+Set the floor as each node is being aggregated. This ensures that all
+all relevant floors are set before the BCMs are committed.
+
+Fixes: 266cd33b5913 ("interconnect: qcom: Ensure that the floor bandwidth value is enforced")
+Signed-off-by: Mike Tipton <mdtipton@codeaurora.org>
+Link: https://lore.kernel.org/r/20210721175432.2119-4-mdtipton@codeaurora.org
+[georgi: Removed unused variable]
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/interconnect/qcom/icc-rpmh.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/interconnect/qcom/icc-rpmh.c
++++ b/drivers/interconnect/qcom/icc-rpmh.c
+@@ -57,6 +57,11 @@ int qcom_icc_aggregate(struct icc_node *
+ qn->sum_avg[i] += avg_bw;
+ qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
+ }
++
++ if (node->init_avg || node->init_peak) {
++ qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
++ qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
++ }
+ }
+
+ *agg_avg += avg_bw;
+@@ -79,7 +84,6 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
+ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+ {
+ struct qcom_icc_provider *qp;
+- struct qcom_icc_node *qn;
+ struct icc_node *node;
+
+ if (!src)
+@@ -88,12 +92,6 @@ int qcom_icc_set(struct icc_node *src, s
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+- qn = node->data;
+-
+- qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
+- node->avg_bw);
+- qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
+- node->peak_bw);
+
+ qcom_icc_bcm_voter_commit(qp->voter);
+
--- /dev/null
+From 456a9dace42ecfcec7ce6e17c18d1985d628dcd0 Mon Sep 17 00:00:00 2001
+From: Mike Tipton <mdtipton@codeaurora.org>
+Date: Wed, 21 Jul 2021 10:54:29 -0700
+Subject: interconnect: Zero initial BW after sync-state
+
+From: Mike Tipton <mdtipton@codeaurora.org>
+
+commit 456a9dace42ecfcec7ce6e17c18d1985d628dcd0 upstream.
+
+The initial BW values may be used by providers to enforce floors. Zero
+these values after sync-state so that providers know when to stop
+enforcing them.
+
+Fixes: b1d681d8d324 ("interconnect: Add sync state support")
+Signed-off-by: Mike Tipton <mdtipton@codeaurora.org>
+Link: https://lore.kernel.org/r/20210721175432.2119-2-mdtipton@codeaurora.org
+Signed-off-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/interconnect/core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -1106,6 +1106,8 @@ void icc_sync_state(struct device *dev)
+ dev_dbg(p->dev, "interconnect provider is in synced state\n");
+ list_for_each_entry(n, &p->nodes, node_list) {
+ if (n->init_avg || n->init_peak) {
++ n->init_avg = 0;
++ n->init_peak = 0;
+ aggregate_requests(n);
+ p->set(n, n);
+ }
--- /dev/null
+From df51fe7ea1c1c2c3bfdb81279712fdd2e4ea6c27 Mon Sep 17 00:00:00 2001
+From: Like Xu <likexu@tencent.com>
+Date: Mon, 2 Aug 2021 15:08:50 +0800
+Subject: perf/x86/amd: Don't touch the AMD64_EVENTSEL_HOSTONLY bit inside the guest
+
+From: Like Xu <likexu@tencent.com>
+
+commit df51fe7ea1c1c2c3bfdb81279712fdd2e4ea6c27 upstream.
+
+If we use "perf record" in an AMD Milan guest, dmesg reports a #GP
+warning from an unchecked MSR access error on MSR_F15H_PERF_CTLx:
+
+ [] unchecked MSR access error: WRMSR to 0xc0010200 (tried to write 0x0000020000110076) at rIP: 0xffffffff8106ddb4 (native_write_msr+0x4/0x20)
+ [] Call Trace:
+ [] amd_pmu_disable_event+0x22/0x90
+ [] x86_pmu_stop+0x4c/0xa0
+ [] x86_pmu_del+0x3a/0x140
+
+The AMD64_EVENTSEL_HOSTONLY bit is defined and used on the host,
+while the guest perf driver should avoid such use.
+
+Fixes: 1018faa6cf23 ("perf/x86/kvm: Fix Host-Only/Guest-Only counting with SVM disabled")
+Signed-off-by: Like Xu <likexu@tencent.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
+Tested-by: Kim Phillips <kim.phillips@amd.com>
+Tested-by: Liam Merwick <liam.merwick@oracle.com>
+Link: https://lkml.kernel.org/r/20210802070850.35295-1-likexu@tencent.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/perf_event.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -1009,9 +1009,10 @@ void x86_pmu_stop(struct perf_event *eve
+
+ static inline void x86_pmu_disable_event(struct perf_event *event)
+ {
++ u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
+ struct hw_perf_event *hwc = &event->hw;
+
+- wrmsrl(hwc->config_base, hwc->config);
++ wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
+
+ if (is_counter_pair(hwc))
+ wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
--- /dev/null
+From f558c2b834ec27e75d37b1c860c139e7b7c3a8e4 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 3 Aug 2021 12:45:01 +0200
+Subject: sched/rt: Fix double enqueue caused by rt_effective_prio
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit f558c2b834ec27e75d37b1c860c139e7b7c3a8e4 upstream.
+
+Double enqueues in rt runqueues (list) have been reported while running
+a simple test that spawns a number of threads doing a short sleep/run
+pattern while being concurrently setscheduled between rt and fair class.
+
+ WARNING: CPU: 3 PID: 2825 at kernel/sched/rt.c:1294 enqueue_task_rt+0x355/0x360
+ CPU: 3 PID: 2825 Comm: setsched__13
+ RIP: 0010:enqueue_task_rt+0x355/0x360
+ Call Trace:
+ __sched_setscheduler+0x581/0x9d0
+ _sched_setscheduler+0x63/0xa0
+ do_sched_setscheduler+0xa0/0x150
+ __x64_sys_sched_setscheduler+0x1a/0x30
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+ list_add double add: new=ffff9867cb629b40, prev=ffff9867cb629b40,
+ next=ffff98679fc67ca0.
+ kernel BUG at lib/list_debug.c:31!
+ invalid opcode: 0000 [#1] PREEMPT_RT SMP PTI
+ CPU: 3 PID: 2825 Comm: setsched__13
+ RIP: 0010:__list_add_valid+0x41/0x50
+ Call Trace:
+ enqueue_task_rt+0x291/0x360
+ __sched_setscheduler+0x581/0x9d0
+ _sched_setscheduler+0x63/0xa0
+ do_sched_setscheduler+0xa0/0x150
+ __x64_sys_sched_setscheduler+0x1a/0x30
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+__sched_setscheduler() uses rt_effective_prio() to handle proper queuing
+of priority boosted tasks that are setscheduled while being boosted.
+rt_effective_prio() is however called twice per each
+__sched_setscheduler() call: first directly by __sched_setscheduler()
+before dequeuing the task and then by __setscheduler() to actually do
+the priority change. If the priority of the pi_top_task is concurrently
+being changed however, it might happen that the two calls return
+different results. If, for example, the first call returned the same rt
+priority the task was running at and the second one a fair priority, the
+task won't be removed by the rt list (on_list still set) and then
+enqueued in the fair runqueue. When eventually setscheduled back to rt
+it will be seen as enqueued already and the WARNING/BUG be issued.
+
+Fix this by calling rt_effective_prio() only once and then reusing the
+return value. While at it refactor code as well for clarity. Concurrent
+priority inheritance handling is still safe and will eventually converge
+to a new state by following the inheritance chain(s).
+
+Fixes: 0782e63bc6fe ("sched: Handle priority boosted tasks proper in setscheduler()")
+[squashed Peterz changes; added changelog]
+Reported-by: Mark Simmons <msimmons@redhat.com>
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20210803104501.38333-1-juri.lelli@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 90 ++++++++++++++++++++--------------------------------
+ 1 file changed, 35 insertions(+), 55 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1598,12 +1598,18 @@ void deactivate_task(struct rq *rq, stru
+ dequeue_task(rq, p, flags);
+ }
+
+-/*
+- * __normal_prio - return the priority that is based on the static prio
+- */
+-static inline int __normal_prio(struct task_struct *p)
++static inline int __normal_prio(int policy, int rt_prio, int nice)
+ {
+- return p->static_prio;
++ int prio;
++
++ if (dl_policy(policy))
++ prio = MAX_DL_PRIO - 1;
++ else if (rt_policy(policy))
++ prio = MAX_RT_PRIO - 1 - rt_prio;
++ else
++ prio = NICE_TO_PRIO(nice);
++
++ return prio;
+ }
+
+ /*
+@@ -1615,15 +1621,7 @@ static inline int __normal_prio(struct t
+ */
+ static inline int normal_prio(struct task_struct *p)
+ {
+- int prio;
+-
+- if (task_has_dl_policy(p))
+- prio = MAX_DL_PRIO-1;
+- else if (task_has_rt_policy(p))
+- prio = MAX_RT_PRIO-1 - p->rt_priority;
+- else
+- prio = __normal_prio(p);
+- return prio;
++ return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
+ }
+
+ /*
+@@ -3248,7 +3246,7 @@ int sched_fork(unsigned long clone_flags
+ } else if (PRIO_TO_NICE(p->static_prio) < 0)
+ p->static_prio = NICE_TO_PRIO(0);
+
+- p->prio = p->normal_prio = __normal_prio(p);
++ p->prio = p->normal_prio = p->static_prio;
+ set_load_weight(p, false);
+
+ /*
+@@ -4799,6 +4797,18 @@ int default_wake_function(wait_queue_ent
+ }
+ EXPORT_SYMBOL(default_wake_function);
+
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++ if (dl_prio(prio))
++ p->sched_class = &dl_sched_class;
++ else if (rt_prio(prio))
++ p->sched_class = &rt_sched_class;
++ else
++ p->sched_class = &fair_sched_class;
++
++ p->prio = prio;
++}
++
+ #ifdef CONFIG_RT_MUTEXES
+
+ static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
+@@ -4914,22 +4924,19 @@ void rt_mutex_setprio(struct task_struct
+ } else {
+ p->dl.pi_se = &p->dl;
+ }
+- p->sched_class = &dl_sched_class;
+ } else if (rt_prio(prio)) {
+ if (dl_prio(oldprio))
+ p->dl.pi_se = &p->dl;
+ if (oldprio < prio)
+ queue_flag |= ENQUEUE_HEAD;
+- p->sched_class = &rt_sched_class;
+ } else {
+ if (dl_prio(oldprio))
+ p->dl.pi_se = &p->dl;
+ if (rt_prio(oldprio))
+ p->rt.timeout = 0;
+- p->sched_class = &fair_sched_class;
+ }
+
+- p->prio = prio;
++ __setscheduler_prio(p, prio);
+
+ if (queued)
+ enqueue_task(rq, p, queue_flag);
+@@ -5162,35 +5169,6 @@ static void __setscheduler_params(struct
+ set_load_weight(p, true);
+ }
+
+-/* Actually do priority change: must hold pi & rq lock. */
+-static void __setscheduler(struct rq *rq, struct task_struct *p,
+- const struct sched_attr *attr, bool keep_boost)
+-{
+- /*
+- * If params can't change scheduling class changes aren't allowed
+- * either.
+- */
+- if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
+- return;
+-
+- __setscheduler_params(p, attr);
+-
+- /*
+- * Keep a potential priority boosting if called from
+- * sched_setscheduler().
+- */
+- p->prio = normal_prio(p);
+- if (keep_boost)
+- p->prio = rt_effective_prio(p, p->prio);
+-
+- if (dl_prio(p->prio))
+- p->sched_class = &dl_sched_class;
+- else if (rt_prio(p->prio))
+- p->sched_class = &rt_sched_class;
+- else
+- p->sched_class = &fair_sched_class;
+-}
+-
+ /*
+ * Check the target process has a UID that matches the current process's:
+ */
+@@ -5211,10 +5189,8 @@ static int __sched_setscheduler(struct t
+ const struct sched_attr *attr,
+ bool user, bool pi)
+ {
+- int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
+- MAX_RT_PRIO - 1 - attr->sched_priority;
+- int retval, oldprio, oldpolicy = -1, queued, running;
+- int new_effective_prio, policy = attr->sched_policy;
++ int oldpolicy = -1, policy = attr->sched_policy;
++ int retval, oldprio, newprio, queued, running;
+ const struct sched_class *prev_class;
+ struct rq_flags rf;
+ int reset_on_fork;
+@@ -5412,6 +5388,7 @@ change:
+ p->sched_reset_on_fork = reset_on_fork;
+ oldprio = p->prio;
+
++ newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
+ if (pi) {
+ /*
+ * Take priority boosted tasks into account. If the new
+@@ -5420,8 +5397,8 @@ change:
+ * the runqueue. This will be done when the task deboost
+ * itself.
+ */
+- new_effective_prio = rt_effective_prio(p, newprio);
+- if (new_effective_prio == oldprio)
++ newprio = rt_effective_prio(p, newprio);
++ if (newprio == oldprio)
+ queue_flags &= ~DEQUEUE_MOVE;
+ }
+
+@@ -5434,7 +5411,10 @@ change:
+
+ prev_class = p->sched_class;
+
+- __setscheduler(rq, p, attr, pi);
++ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++ __setscheduler_params(p, attr);
++ __setscheduler_prio(p, newprio);
++ }
+ __setscheduler_uclamp(p, attr);
+
+ if (queued) {
kvm-do-not-leak-memory-for-duplicate-debugfs-directories.patch
kvm-x86-mmu-fix-per-cpu-counter-corruption-on-32-bit-builds.patch
arm64-vdso-avoid-isb-after-reading-from-cntvct_el0.patch
+soc-ixp4xx-fix-printing-resources.patch
+interconnect-fix-undersized-devress_alloc-allocation.patch
+spi-meson-spicc-fix-memory-leak-in-meson_spicc_remove.patch
+interconnect-zero-initial-bw-after-sync-state.patch
+interconnect-always-call-pre_aggregate-before-aggregate.patch
+interconnect-qcom-icc-rpmh-ensure-floor-bw-is-enforced-for-all-nodes.patch
+interconnect-qcom-icc-rpmh-add-bcms-to-commit-list-in-pre_aggregate.patch
+drm-i915-correct-sfc_done-register-offset.patch
+soc-ixp4xx-qmgr-fix-invalid-__iomem-access.patch
+perf-x86-amd-don-t-touch-the-amd64_eventsel_hostonly-bit-inside-the-guest.patch
+sched-rt-fix-double-enqueue-caused-by-rt_effective_prio.patch
--- /dev/null
+From 8861452b2097bb0b5d0081a1c137fb3870b0a31f Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 8 Nov 2019 09:43:06 +0100
+Subject: soc: ixp4xx: fix printing resources
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 8861452b2097bb0b5d0081a1c137fb3870b0a31f upstream.
+
+When compile-testing with 64-bit resource_size_t, gcc reports an invalid
+printk format string:
+
+In file included from include/linux/dma-mapping.h:7,
+ from drivers/soc/ixp4xx/ixp4xx-npe.c:15:
+drivers/soc/ixp4xx/ixp4xx-npe.c: In function 'ixp4xx_npe_probe':
+drivers/soc/ixp4xx/ixp4xx-npe.c:694:18: error: format '%x' expects argument of type 'unsigned int', but argument 4 has type 'resource_size_t' {aka 'long long unsigned int'} [-Werror=format=]
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
+
+Use the special %pR format string to print the resources.
+
+Fixes: 0b458d7b10f8 ("soc: ixp4xx: npe: Pass addresses as resources")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/ixp4xx/ixp4xx-npe.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
++++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
+@@ -690,8 +690,8 @@ static int ixp4xx_npe_probe(struct platf
+
+ if (!(ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << i))) {
+- dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
+- i, res->start, res->end);
++ dev_info(dev, "NPE%d at %pR not available\n",
++ i, res);
+ continue; /* NPE already disabled or not present */
+ }
+ npe->regs = devm_ioremap_resource(dev, res);
+@@ -699,13 +699,12 @@ static int ixp4xx_npe_probe(struct platf
+ return PTR_ERR(npe->regs);
+
+ if (npe_reset(npe)) {
+- dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
+- i, res->start, res->end);
++ dev_info(dev, "NPE%d at %pR does not reset\n",
++ i, res);
+ continue;
+ }
+ npe->valid = 1;
+- dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
+- i, res->start, res->end);
++ dev_info(dev, "NPE%d at %pR registered\n", i, res);
+ found++;
+ }
+
--- /dev/null
+From a8eee86317f11e97990d755d4615c1c0db203d08 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 3 Aug 2021 10:12:34 +0200
+Subject: soc: ixp4xx/qmgr: fix invalid __iomem access
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit a8eee86317f11e97990d755d4615c1c0db203d08 upstream.
+
+Sparse reports a compile time warning when dereferencing an
+__iomem pointer:
+
+drivers/soc/ixp4xx/ixp4xx-qmgr.c:149:37: warning: dereference of noderef expression
+drivers/soc/ixp4xx/ixp4xx-qmgr.c:153:40: warning: dereference of noderef expression
+drivers/soc/ixp4xx/ixp4xx-qmgr.c:154:40: warning: dereference of noderef expression
+drivers/soc/ixp4xx/ixp4xx-qmgr.c:174:38: warning: dereference of noderef expression
+drivers/soc/ixp4xx/ixp4xx-qmgr.c:174:44: warning: dereference of noderef expression
+
+Use __raw_readl() here for consistency with the rest of the file.
+This should really get converted to some proper accessor, as the
+__raw functions are not meant to be used in drivers, but the driver
+has used these since the start, so for the moment, let's only fix
+the warning.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Fixes: d4c9e9fc9751 ("IXP42x: Add QMgr support for IXP425 rev. A0 processors.")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/ixp4xx/ixp4xx-qmgr.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
++++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+@@ -145,12 +145,12 @@ static irqreturn_t qmgr_irq1_a0(int irq,
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
+
+- en_bitmap = qmgr_regs->irqen[0];
++ en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
+ while (en_bitmap) {
+ i = __fls(en_bitmap); /* number of the last "low" queue */
+ en_bitmap &= ~BIT(i);
+- src = qmgr_regs->irqsrc[i >> 3];
+- stat = qmgr_regs->stat1[i >> 3];
++ src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
++ stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
+ if (src & 4) /* the IRQ condition is inverted */
+ stat = ~stat;
+ if (stat & BIT(src & 3)) {
+@@ -170,7 +170,8 @@ static irqreturn_t qmgr_irq2_a0(int irq,
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
+
+- req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
++ req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
++ __raw_readl(&qmgr_regs->statne_h);
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last "high" queue */
+ req_bitmap &= ~BIT(i);
--- /dev/null
+From 8311ee2164c5cd1b63a601ea366f540eae89f10e Mon Sep 17 00:00:00 2001
+From: Dongliang Mu <mudongliangabcd@gmail.com>
+Date: Tue, 20 Jul 2021 18:01:16 +0800
+Subject: spi: meson-spicc: fix memory leak in meson_spicc_remove
+
+From: Dongliang Mu <mudongliangabcd@gmail.com>
+
+commit 8311ee2164c5cd1b63a601ea366f540eae89f10e upstream.
+
+In meson_spicc_probe, the error handling code needs to clean up master
+by calling spi_master_put, but the remove function does not have this
+function call. This will lead to memory leak of spicc->master.
+
+Reported-by: Dongliang Mu <mudongliangabcd@gmail.com>
+Fixes: 454fa271bc4e("spi: Add Meson SPICC driver")
+Signed-off-by: Dongliang Mu <mudongliangabcd@gmail.com>
+Link: https://lore.kernel.org/r/20210720100116.1438974-1-mudongliangabcd@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-meson-spicc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/spi/spi-meson-spicc.c
++++ b/drivers/spi/spi-meson-spicc.c
+@@ -785,6 +785,8 @@ static int meson_spicc_remove(struct pla
+ clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
+
++ spi_master_put(spicc->master);
++
+ return 0;
+ }
+