--- /dev/null
+From 992bdddaabfba19bdc77c1c7a4977b2aa41ec891 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 13 Jun 2023 13:06:36 +0200
+Subject: backlight/bd6107: Compare against struct fb_info.device
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 992bdddaabfba19bdc77c1c7a4977b2aa41ec891 upstream.
+
+Struct bd6107_platform_data refers to a platform device within
+the Linux device hierarchy. The test in bd6107_backlight_check_fb()
+compares it against the fbdev device in struct fb_info.dev, which
+is different. Fix the test by comparing to struct fb_info.device.
+
+Fixes a bug in the backlight driver and prepares fbdev for making
+struct fb_info.dev optional.
+
+v2:
+ * move renames into separate patch (Javier, Sam, Michael)
+
+Fixes: 67b43e590415 ("backlight: Add ROHM BD6107 backlight driver")
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Cc: Lee Jones <lee@kernel.org>
+Cc: Daniel Thompson <daniel.thompson@linaro.org>
+Cc: Jingoo Han <jingoohan1@gmail.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v3.12+
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Reviewed-by: Daniel Thompson <daniel.thompson@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230613110953.24176-2-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/backlight/bd6107.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/backlight/bd6107.c
++++ b/drivers/video/backlight/bd6107.c
+@@ -104,7 +104,7 @@ static int bd6107_backlight_check_fb(str
+ {
+ struct bd6107 *bd = bl_get_data(backlight);
+
+- return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->dev;
++ return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->device;
+ }
+
+ static const struct backlight_ops bd6107_backlight_ops = {
--- /dev/null
+From 7b91d017f77c1bda56f27c2f4bbb70de7c6eca08 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 13 Jun 2023 13:06:38 +0200
+Subject: backlight/gpio_backlight: Compare against struct fb_info.device
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 7b91d017f77c1bda56f27c2f4bbb70de7c6eca08 upstream.
+
+Struct gpio_backlight_platform_data refers to a platform device within
+the Linux device hierarchy. The test in gpio_backlight_check_fb()
+compares it against the fbdev device in struct fb_info.dev, which
+is different. Fix the test by comparing to struct fb_info.device.
+
+Fixes a bug in the backlight driver and prepares fbdev for making
+struct fb_info.dev optional.
+
+v2:
+ * move renames into separate patch (Javier, Sam, Michael)
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 8b770e3c9824 ("backlight: Add GPIO-based backlight driver")
+Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Lee Jones <lee@kernel.org>
+Cc: Daniel Thompson <daniel.thompson@linaro.org>
+Cc: Jingoo Han <jingoohan1@gmail.com>
+Cc: linux-sh@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v3.12+
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Reviewed-by: Daniel Thompson <daniel.thompson@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230613110953.24176-4-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/backlight/gpio_backlight.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/backlight/gpio_backlight.c
++++ b/drivers/video/backlight/gpio_backlight.c
+@@ -35,7 +35,7 @@ static int gpio_backlight_check_fb(struc
+ {
+ struct gpio_backlight *gbl = bl_get_data(bl);
+
+- return gbl->fbdev == NULL || gbl->fbdev == info->dev;
++ return gbl->fbdev == NULL || gbl->fbdev == info->device;
+ }
+
+ static const struct backlight_ops gpio_backlight_ops = {
--- /dev/null
+From 1ca8819320fd84e7d95b04e7668efc5f9fe9fa5c Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 13 Jun 2023 13:06:40 +0200
+Subject: backlight/lv5207lp: Compare against struct fb_info.device
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 1ca8819320fd84e7d95b04e7668efc5f9fe9fa5c upstream.
+
+Struct lv5207lp_platform_data refers to a platform device within
+the Linux device hierarchy. The test in lv5207lp_backlight_check_fb()
+compares it against the fbdev device in struct fb_info.dev, which
+is different. Fix the test by comparing to struct fb_info.device.
+
+Fixes a bug in the backlight driver and prepares fbdev for making
+struct fb_info.dev optional.
+
+v2:
+ * move renames into separate patch (Javier, Sam, Michael)
+
+Fixes: 82e5c40d88f9 ("backlight: Add Sanyo LV5207LP backlight driver")
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Rich Felker <dalias@libc.org>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Lee Jones <lee@kernel.org>
+Cc: Daniel Thompson <daniel.thompson@linaro.org>
+Cc: Jingoo Han <jingoohan1@gmail.com>
+Cc: linux-sh@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v3.12+
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Reviewed-by: Daniel Thompson <daniel.thompson@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230613110953.24176-6-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/backlight/lv5207lp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/backlight/lv5207lp.c
++++ b/drivers/video/backlight/lv5207lp.c
+@@ -67,7 +67,7 @@ static int lv5207lp_backlight_check_fb(s
+ {
+ struct lv5207lp *lv = bl_get_data(backlight);
+
+- return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->dev;
++ return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->device;
+ }
+
+ static const struct backlight_ops lv5207lp_backlight_ops = {
--- /dev/null
+From 1611917f39bee1abfc01501238db8ac19649042d Mon Sep 17 00:00:00 2001
+From: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Date: Tue, 22 Aug 2023 12:31:09 -0400
+Subject: drm/amd/display: register edp_backlight_control() for DCN301
+
+From: Hamza Mahfooz <hamza.mahfooz@amd.com>
+
+commit 1611917f39bee1abfc01501238db8ac19649042d upstream.
+
+As made mention of in commit 099303e9a9bd ("drm/amd/display: eDP
+intermittent black screen during PnP"), we need to turn off the
+display's backlight before powering off an eDP display. Not doing so
+will result in undefined behaviour according to the eDP spec. So, set
+DCN301's edp_backlight_control() function pointer to
+dce110_edp_backlight_control().
+
+Cc: stable@vger.kernel.org
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2765
+Fixes: 9c75891feef0 ("drm/amd/display: rework recent update PHY state commit")
+Suggested-by: Swapnil Patel <swapnil.patel@amd.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs d
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
++ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
--- /dev/null
+From 6e13d6528be2f7e801af63c8153b87293f25d736 Mon Sep 17 00:00:00 2001
+From: Frank Li <Frank.Li@nxp.com>
+Date: Thu, 31 Aug 2023 10:13:24 -0400
+Subject: i3c: master: svc: fix probe failure when no i3c device exist
+
+From: Frank Li <Frank.Li@nxp.com>
+
+commit 6e13d6528be2f7e801af63c8153b87293f25d736 upstream.
+
+I3C masters are expected to support hot-join. This means at initialization
+time we might not yet discover any device and this should not be treated
+as a fatal error.
+
+During the DAA procedure which happens at probe time, if no device has
+joined, all CCC will be NACKed (from a bus perspective). This leads to an
+early return with an error code which fails the probe of the master.
+
+Let's avoid this by just telling the core through an I3C_ERROR_M2
+return command code that no device was discovered, which is a valid
+situation. This way the master will no longer bail out and fail to probe
+for a wrong reason.
+
+Cc: stable@vger.kernel.org
+Fixes: dd3c52846d59 ("i3c: master: svc: Add Silvaco I3C master driver")
+Signed-off-by: Frank Li <Frank.Li@nxp.com>
+Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/r/20230831141324.2841525-1-Frank.Li@nxp.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i3c/master/svc-i3c-master.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -789,6 +789,10 @@ static int svc_i3c_master_do_daa_locked(
+ */
+ break;
+ } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
++ /* No I3C devices attached */
++ if (dev_nb == 0)
++ break;
++
+ /*
+ * A slave device nacked the address, this is
+ * allowed only once, DAA will be stopped and
+@@ -1263,11 +1267,17 @@ static int svc_i3c_master_send_ccc_cmd(s
+ {
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ bool broadcast = cmd->id < 0x80;
++ int ret;
+
+ if (broadcast)
+- return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
++ ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ else
+- return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
++ ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
++
++ if (ret)
++ cmd->err = I3C_ERROR_M2;
++
++ return ret;
+ }
+
+ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
--- /dev/null
+From dc314886cb3d0e4ab2858003e8de2917f8a3ccbd Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 9 Aug 2023 16:20:21 +0100
+Subject: io_uring: break iopolling on signal
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit dc314886cb3d0e4ab2858003e8de2917f8a3ccbd upstream.
+
+Don't keep spinning iopoll with a signal set. It'll eventually return
+back, e.g. by virtue of need_resched(), but it's not a nice user
+experience.
+
+Cc: stable@vger.kernel.org
+Fixes: def596e9557c9 ("io_uring: support for IO polling")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/eeba551e82cad12af30c3220125eb6cb244cc94c.1691594339.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1673,6 +1673,9 @@ static int io_iopoll_check(struct io_rin
+ break;
+ nr_events += ret;
+ ret = 0;
++
++ if (task_sigpending(current))
++ return -EINTR;
+ } while (nr_events < min && !need_resched());
+
+ return ret;
--- /dev/null
+From 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Thu, 7 Sep 2023 13:50:07 +0100
+Subject: io_uring: break out of iowq iopoll on teardown
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 upstream.
+
+io-wq will retry iopoll even when it failed with -EAGAIN. If that
+races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers,
+such workers might potentially infinitely spin retrying iopoll again and
+again and each time failing on some allocation / waiting / etc. Don't
+keep spinning if io-wq is dying.
+
+Fixes: 561fb04a6a225 ("io_uring: replace workqueue usage with io-wq")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io-wq.c | 10 ++++++++++
+ io_uring/io-wq.h | 1 +
+ io_uring/io_uring.c | 2 ++
+ 3 files changed, 13 insertions(+)
+
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -174,6 +174,16 @@ static void io_worker_ref_put(struct io_
+ complete(&wq->worker_done);
+ }
+
++bool io_wq_worker_stopped(void)
++{
++ struct io_worker *worker = current->worker_private;
++
++ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
++ return true;
++
++ return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
++}
++
+ static void io_worker_cancel_cb(struct io_worker *worker)
+ {
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
+--- a/io_uring/io-wq.h
++++ b/io_uring/io-wq.h
+@@ -52,6 +52,7 @@ void io_wq_hash_work(struct io_wq_work *
+
+ int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
+ int io_wq_max_workers(struct io_wq *wq, int *new_count);
++bool io_wq_worker_stopped(void);
+
+ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+ {
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1966,6 +1966,8 @@ fail:
+ if (!needs_poll) {
+ if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
+ break;
++ if (io_wq_worker_stopped())
++ break;
+ cond_resched();
+ continue;
+ }
--- /dev/null
+From 569f5308e54352a12181cc0185f848024c5443e8 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 9 Aug 2023 13:22:16 +0100
+Subject: io_uring: fix false positive KASAN warnings
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 569f5308e54352a12181cc0185f848024c5443e8 upstream.
+
+io_req_local_work_add() peeks into the work list, which can be executed
+in the meanwhile. It's completely fine without KASAN as we're in an RCU
+read section and it's SLAB_TYPESAFE_BY_RCU. With KASAN though it may
+trigger a false positive warning because internal io_uring caches are
+sanitised.
+
+Remove sanitisation from the io_uring request cache for now.
+
+Cc: stable@vger.kernel.org
+Fixes: 8751d15426a31 ("io_uring: reduce scheduling due to tw")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/c6fbf7a82a341e66a0007c76eefd9d57f2d3ba51.1691541473.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io_uring.c | 1 -
+ io_uring/io_uring.h | 1 -
+ 2 files changed, 2 deletions(-)
+
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -229,7 +229,6 @@ static inline void req_fail_link_node(st
+ static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
+ {
+ wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
+- kasan_poison_object_data(req_cachep, req);
+ }
+
+ static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -354,7 +354,6 @@ static inline struct io_kiocb *io_extrac
+ struct io_kiocb *req;
+
+ req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
+- kasan_unpoison_object_data(req_cachep, req);
+ wq_stack_extract(&ctx->submit_state.free_list);
+ return req;
+ }
--- /dev/null
+From 1bfed23349716a7811645336a7ce42c4b8f250bc Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Fri, 11 Aug 2023 13:53:41 +0100
+Subject: io_uring/net: don't overflow multishot accept
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 1bfed23349716a7811645336a7ce42c4b8f250bc upstream.
+
+Don't allow overflowing multishot accept CQEs, we want to limit
+the grows of the overflow list.
+
+Cc: stable@vger.kernel.org
+Fixes: 4e86a2c980137 ("io_uring: implement multishot mode for accept")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/7d0d749649244873772623dd7747966f516fe6e2.1691757663.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1367,7 +1367,7 @@ retry:
+ if (ret < 0)
+ return ret;
+ if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
+- IORING_CQE_F_MORE, true))
++ IORING_CQE_F_MORE, false))
+ goto retry;
+
+ return -ECANCELED;
--- /dev/null
+From b2e74db55dd93d6db22a813c9a775b5dbf87c560 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Fri, 11 Aug 2023 13:53:42 +0100
+Subject: io_uring/net: don't overflow multishot recv
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit b2e74db55dd93d6db22a813c9a775b5dbf87c560 upstream.
+
+Don't allow overflowing multishot recv CQEs, it might get out of
+hand, hurt performance, and in the worst case scenario OOM the task.
+
+Cc: stable@vger.kernel.org
+Fixes: b3fdea6ecb55c ("io_uring: multishot recv")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/0b295634e8f1b71aa764c984608c22d85f88f75c.1691757663.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/net.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -642,7 +642,7 @@ static inline bool io_recv_finish(struct
+
+ if (!mshot_finished) {
+ if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+- *ret, cflags | IORING_CQE_F_MORE, true)) {
++ *ret, cflags | IORING_CQE_F_MORE, false)) {
+ io_recv_prep_retry(req);
+ /* Known not-empty or unknown state, retry */
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
--- /dev/null
+From ebdfefc09c6de7897962769bd3e63a2ff443ebf5 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 13 Aug 2023 11:05:36 -0600
+Subject: io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit ebdfefc09c6de7897962769bd3e63a2ff443ebf5 upstream.
+
+If we setup the ring with SQPOLL, then that polling thread has its
+own io-wq setup. This means that if the application uses
+IORING_REGISTER_IOWQ_AFF to set the io-wq affinity, we should not be
+setting it for the invoking task, but rather the sqpoll task.
+
+Add an sqpoll helper that parks the thread and updates the affinity,
+and use that one if we're using SQPOLL.
+
+Fixes: fe76421d1da1 ("io_uring: allow user configurable IO thread CPU affinity")
+Cc: stable@vger.kernel.org # 5.10+
+Link: https://github.com/axboe/liburing/discussions/884
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/io-wq.c | 9 ++++++---
+ io_uring/io-wq.h | 2 +-
+ io_uring/io_uring.c | 29 ++++++++++++++++++-----------
+ io_uring/sqpoll.c | 15 +++++++++++++++
+ io_uring/sqpoll.h | 1 +
+ 5 files changed, 41 insertions(+), 15 deletions(-)
+
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -1285,13 +1285,16 @@ static int io_wq_cpu_offline(unsigned in
+ return __io_wq_cpu_online(wq, cpu, false);
+ }
+
+-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
++int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
+ {
++ if (!tctx || !tctx->io_wq)
++ return -EINVAL;
++
+ rcu_read_lock();
+ if (mask)
+- cpumask_copy(wq->cpu_mask, mask);
++ cpumask_copy(tctx->io_wq->cpu_mask, mask);
+ else
+- cpumask_copy(wq->cpu_mask, cpu_possible_mask);
++ cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
+ rcu_read_unlock();
+
+ return 0;
+--- a/io_uring/io-wq.h
++++ b/io_uring/io-wq.h
+@@ -50,7 +50,7 @@ void io_wq_put_and_exit(struct io_wq *wq
+ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
+ void io_wq_hash_work(struct io_wq_work *work, void *val);
+
+-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
++int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
+ int io_wq_max_workers(struct io_wq *wq, int *new_count);
+
+ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -4201,16 +4201,28 @@ static int io_register_enable_rings(stru
+ return 0;
+ }
+
++static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
++ cpumask_var_t new_mask)
++{
++ int ret;
++
++ if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
++ ret = io_wq_cpu_affinity(current->io_uring, new_mask);
++ } else {
++ mutex_unlock(&ctx->uring_lock);
++ ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
++ mutex_lock(&ctx->uring_lock);
++ }
++
++ return ret;
++}
++
+ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ void __user *arg, unsigned len)
+ {
+- struct io_uring_task *tctx = current->io_uring;
+ cpumask_var_t new_mask;
+ int ret;
+
+- if (!tctx || !tctx->io_wq)
+- return -EINVAL;
+-
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+@@ -4231,19 +4243,14 @@ static __cold int io_register_iowq_aff(s
+ return -EFAULT;
+ }
+
+- ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
++ ret = __io_register_iowq_aff(ctx, new_mask);
+ free_cpumask_var(new_mask);
+ return ret;
+ }
+
+ static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
+ {
+- struct io_uring_task *tctx = current->io_uring;
+-
+- if (!tctx || !tctx->io_wq)
+- return -EINVAL;
+-
+- return io_wq_cpu_affinity(tctx->io_wq, NULL);
++ return __io_register_iowq_aff(ctx, NULL);
+ }
+
+ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -421,3 +421,18 @@ err:
+ io_sq_thread_finish(ctx);
+ return ret;
+ }
++
++__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
++ cpumask_var_t mask)
++{
++ struct io_sq_data *sqd = ctx->sq_data;
++ int ret = -EINVAL;
++
++ if (sqd) {
++ io_sq_thread_park(sqd);
++ ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
++ io_sq_thread_unpark(sqd);
++ }
++
++ return ret;
++}
+--- a/io_uring/sqpoll.h
++++ b/io_uring/sqpoll.h
+@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data
+ void io_sq_thread_unpark(struct io_sq_data *sqd);
+ void io_put_sq_data(struct io_sq_data *sqd);
+ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
++int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
--- /dev/null
+From 303be4b33562a5b689261ced1616bf16ad49efa7 Mon Sep 17 00:00:00 2001
+From: Hongchen Zhang <zhanghongchen@loongson.cn>
+Date: Wed, 6 Sep 2023 22:53:09 +0800
+Subject: LoongArch: mm: Add p?d_leaf() definitions
+
+From: Hongchen Zhang <zhanghongchen@loongson.cn>
+
+commit 303be4b33562a5b689261ced1616bf16ad49efa7 upstream.
+
+When I do LTP test, LTP test case ksm06 caused panic at
+ break_ksm_pmd_entry
+ -> pmd_leaf (Huge page table but False)
+ -> pte_present (panic)
+
+The reason is pmd_leaf() is not defined, So like commit 501b81046701
+("mips: mm: add p?d_leaf() definitions") add p?d_leaf() definition for
+LoongArch.
+
+Fixes: 09cfefb7fa70 ("LoongArch: Add memory management")
+Cc: stable@vger.kernel.org
+Acked-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Hongchen Zhang <zhanghongchen@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/pgtable.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/loongarch/include/asm/pgtable.h
++++ b/arch/loongarch/include/asm/pgtable.h
+@@ -593,6 +593,9 @@ static inline long pmd_protnone(pmd_t pm
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+
++#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
++#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
++
+ /*
+ * We provide our own get_unmapped area to cope with the virtual aliasing
+ * constraints placed on us by the cache architecture.
--- /dev/null
+From 081690e941188acfad41b8dbde2112029a2aa206 Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Wed, 6 Sep 2023 12:08:16 -0700
+Subject: powercap: intel_rapl: Fix invalid setting of Power Limit 4
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit 081690e941188acfad41b8dbde2112029a2aa206 upstream.
+
+System runs at minimum performance, once powercap RAPL package domain
+enabled flag is changed from 1 to 0 to 1.
+
+Setting RAPL package domain enabled flag to 0, results in setting of
+power limit 4 (PL4) MSR 0x601 to 0. This implies disabling PL4 limit.
+The PL4 limit controls the peak power. So setting 0, results in some
+undesirable performance, which depends on hardware implementation.
+
+Even worse, when the enabled flag is set to 1 again. This will set PL4
+MSR value to 0x01, which means reduce peak power to 0.125W. This will
+force system to run at the lowest possible performance on every PL4
+supported system.
+
+Setting enabled flag should only affect the "enable" bit, not other
+bits. Here it is changing power limit.
+
+This is caused by a change which assumes that there is an enable bit in
+the PL4 MSR like other power limits. Although PL4 enable/disable bit is
+present with TPMI RAPL interface, it is not present with the MSR
+interface.
+
+There is a rapl_primitive_info defined for non existent PL4 enable bit
+and then it is used with the commit 9050a9cd5e4c ("powercap: intel_rapl:
+Cleanup Power Limits support") to enable PL4. This is wrong, hence remove
+this rapl primitive for PL4. Also in the function
+rapl_detect_powerlimit(), PL_ENABLE is used to check for the presence of
+power limits. Replace PL_ENABLE with PL_LIMIT, as PL_LIMIT must be
+present. Without this change, PL4 controls will not be available in the
+sysfs once rapl primitive for PL4 is removed.
+
+Fixes: 9050a9cd5e4c ("powercap: intel_rapl: Cleanup Power Limits support")
+Suggested-by: Zhang Rui <rui.zhang@intel.com>
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Tested-by: Sumeet Pawnikar <sumeet.r.pawnikar@intel.com>
+Cc: 6.5+ <stable@vger.kernel.org> # 6.5+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/powercap/intel_rapl_common.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index 5c2e6d5eea2a..40a2cc649c79 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -658,8 +658,6 @@ static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ [PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+- [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
+- RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
+ [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+ RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
+ [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+@@ -1458,7 +1456,7 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
+ }
+ }
+
+- if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64))
++ if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
+ rd->rpl[i].name = NULL;
+ }
+ }
+--
+2.42.0
+
--- /dev/null
+From f6834c8c59a8e977a6f6e4f96c5d28dfa5db8430 Mon Sep 17 00:00:00 2001
+From: Naveen N Rao <naveen@kernel.org>
+Date: Mon, 19 Jun 2023 15:17:19 +0530
+Subject: powerpc/ftrace: Fix dropping weak symbols with older toolchains
+
+From: Naveen N Rao <naveen@kernel.org>
+
+commit f6834c8c59a8e977a6f6e4f96c5d28dfa5db8430 upstream.
+
+The minimum level of gcc supported for building the kernel is v5.1.
+v5.x releases of gcc emitted a three instruction sequence for
+-mprofile-kernel:
+ mflr r0
+ std r0, 16(r1)
+ bl _mcount
+
+It is only with the v6.x releases that gcc started emitting the two
+instruction sequence for -mprofile-kernel, omitting the second store
+instruction.
+
+With the older three instruction sequence, the actual ftrace location
+can be the 5th instruction into a function. Update the allowed offset
+for ftrace location from 12 to 16 to accommodate the same.
+
+Cc: stable@vger.kernel.org
+Fixes: 7af82ff90a2b06 ("powerpc/ftrace: Ignore weak functions")
+Signed-off-by: Naveen N Rao <naveen@kernel.org>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/7b265908a9461e38fc756ef9b569703860a80621.1687166935.git.naveen@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/ftrace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/ftrace.h
++++ b/arch/powerpc/include/asm/ftrace.h
+@@ -12,7 +12,7 @@
+
+ /* Ignore unused weak functions which will have larger offsets */
+ #ifdef CONFIG_MPROFILE_KERNEL
+-#define FTRACE_MCOUNT_MAX_OFFSET 12
++#define FTRACE_MCOUNT_MAX_OFFSET 16
+ #elif defined(CONFIG_PPC32)
+ #define FTRACE_MCOUNT_MAX_OFFSET 8
+ #endif
arm-omap2-fix-warray-bounds-warning-in-_pwrdm_state_switch.patch
riscv-move-create_tmp_mapping-to-init-sections.patch
riscv-mark-kasan-tmp-page-tables-variables-as-static.patch
+xarray-do-not-return-sibling-entries-from-xa_load.patch
+io_uring-fix-false-positive-kasan-warnings.patch
+io_uring-break-iopolling-on-signal.patch
+io_uring-sqpoll-fix-io-wq-affinity-when-ioring_setup_sqpoll-is-used.patch
+io_uring-net-don-t-overflow-multishot-recv.patch
+io_uring-net-don-t-overflow-multishot-accept.patch
+io_uring-break-out-of-iowq-iopoll-on-teardown.patch
+backlight-gpio_backlight-compare-against-struct-fb_info.device.patch
+backlight-bd6107-compare-against-struct-fb_info.device.patch
+backlight-lv5207lp-compare-against-struct-fb_info.device.patch
+drm-amd-display-register-edp_backlight_control-for-dcn301.patch
+xtensa-pmu-fix-base-address-for-the-newer-hardware.patch
+loongarch-mm-add-p-d_leaf-definitions.patch
+powercap-intel_rapl-fix-invalid-setting-of-power-limit-4.patch
+powerpc-ftrace-fix-dropping-weak-symbols-with-older-toolchains.patch
+i3c-master-svc-fix-probe-failure-when-no-i3c-device-exist.patch
--- /dev/null
+From cbc02854331edc6dc22d8b77b6e22e38ebc7dd51 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Wed, 26 Jul 2023 22:58:17 -0400
+Subject: XArray: Do not return sibling entries from xa_load()
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit cbc02854331edc6dc22d8b77b6e22e38ebc7dd51 upstream.
+
+It is possible for xa_load() to observe a sibling entry pointing to
+another sibling entry. An example:
+
+Thread A: Thread B:
+ xa_store_range(xa, entry, 188, 191, gfp);
+xa_load(xa, 191);
+entry = xa_entry(xa, node, 63);
+[entry is a sibling of 188]
+ xa_store_range(xa, entry, 184, 191, gfp);
+if (xa_is_sibling(entry))
+offset = xa_to_sibling(entry);
+entry = xa_entry(xas->xa, node, offset);
+[entry is now a sibling of 184]
+
+It is sufficient to go around this loop until we hit a non-sibling entry.
+Sibling entries always point earlier in the node, so we are guaranteed
+to terminate this search.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Fixes: 6b24ca4a1a8d ("mm: Use multi-index entries in the page cache")
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/xarray.c | 2 -
+ tools/testing/radix-tree/multiorder.c | 68 +++++++++++++++++++++++++++++++++-
+ 2 files changed, 67 insertions(+), 3 deletions(-)
+
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state
+ void *entry = xa_entry(xas->xa, node, offset);
+
+ xas->xa_node = node;
+- if (xa_is_sibling(entry)) {
++ while (xa_is_sibling(entry)) {
+ offset = xa_to_sibling(entry);
+ entry = xa_entry(xas->xa, node, offset);
+ if (node->shift && xa_is_node(entry))
+--- a/tools/testing/radix-tree/multiorder.c
++++ b/tools/testing/radix-tree/multiorder.c
+@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct
+ item_kill_tree(xa);
+ }
+
+-bool stop_iteration = false;
++bool stop_iteration;
+
+ static void *creator_func(void *ptr)
+ {
+@@ -201,6 +201,7 @@ static void multiorder_iteration_race(st
+ pthread_t worker_thread[num_threads];
+ int i;
+
++ stop_iteration = false;
+ pthread_create(&worker_thread[0], NULL, &creator_func, xa);
+ for (i = 1; i < num_threads; i++)
+ pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
+@@ -211,6 +212,61 @@ static void multiorder_iteration_race(st
+ item_kill_tree(xa);
+ }
+
++static void *load_creator(void *ptr)
++{
++ /* 'order' is set up to ensure we have sibling entries */
++ unsigned int order;
++ struct radix_tree_root *tree = ptr;
++ int i;
++
++ rcu_register_thread();
++ item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
++ item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
++ for (i = 0; i < 10000; i++) {
++ for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
++ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
++ (1 << order);
++ item_insert_order(tree, index, order);
++ item_delete_rcu(tree, index);
++ }
++ }
++ rcu_unregister_thread();
++
++ stop_iteration = true;
++ return NULL;
++}
++
++static void *load_worker(void *ptr)
++{
++ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
++
++ rcu_register_thread();
++ while (!stop_iteration) {
++ struct item *item = xa_load(ptr, index);
++ assert(!xa_is_internal(item));
++ }
++ rcu_unregister_thread();
++
++ return NULL;
++}
++
++static void load_race(struct xarray *xa)
++{
++ const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
++ pthread_t worker_thread[num_threads];
++ int i;
++
++ stop_iteration = false;
++ pthread_create(&worker_thread[0], NULL, &load_creator, xa);
++ for (i = 1; i < num_threads; i++)
++ pthread_create(&worker_thread[i], NULL, &load_worker, xa);
++
++ for (i = 0; i < num_threads; i++)
++ pthread_join(worker_thread[i], NULL);
++
++ item_kill_tree(xa);
++}
++
+ static DEFINE_XARRAY(array);
+
+ void multiorder_checks(void)
+@@ -218,12 +274,20 @@ void multiorder_checks(void)
+ multiorder_iteration(&array);
+ multiorder_tagged_iteration(&array);
+ multiorder_iteration_race(&array);
++ load_race(&array);
+
+ radix_tree_cpu_dead(0);
+ }
+
+-int __weak main(void)
++int __weak main(int argc, char **argv)
+ {
++ int opt;
++
++ while ((opt = getopt(argc, argv, "ls:v")) != -1) {
++ if (opt == 'v')
++ test_verbose++;
++ }
++
+ rcu_register_thread();
+ radix_tree_init();
+ multiorder_checks();
--- /dev/null
+From 687eb3c42f4ad81e7c947c50e2d865f692064291 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Mon, 24 Jul 2023 00:58:24 -0700
+Subject: xtensa: PMU: fix base address for the newer hardware
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 687eb3c42f4ad81e7c947c50e2d865f692064291 upstream.
+
+With introduction of ERI access control in RG.0 base address of the PMU
+unit registers has changed. Add support for the new PMU configuration.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/xtensa/include/asm/core.h | 9 +++++++++
+ arch/xtensa/kernel/perf_event.c | 17 +++++++++++++----
+ 2 files changed, 22 insertions(+), 4 deletions(-)
+
+--- a/arch/xtensa/include/asm/core.h
++++ b/arch/xtensa/include/asm/core.h
+@@ -52,4 +52,13 @@
+ #define XTENSA_STACK_ALIGNMENT 16
+ #endif
+
++#ifndef XCHAL_HW_MIN_VERSION
++#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
++#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
++ XCHAL_HW_MIN_VERSION_MINOR)
++#else
++#define XCHAL_HW_MIN_VERSION 0
++#endif
++#endif
++
+ #endif
+--- a/arch/xtensa/kernel/perf_event.c
++++ b/arch/xtensa/kernel/perf_event.c
+@@ -13,17 +13,26 @@
+ #include <linux/perf_event.h>
+ #include <linux/platform_device.h>
+
++#include <asm/core.h>
+ #include <asm/processor.h>
+ #include <asm/stacktrace.h>
+
++#define XTENSA_HWVERSION_RG_2015_0 260000
++
++#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
++#define XTENSA_PMU_ERI_BASE 0x00101000
++#else
++#define XTENSA_PMU_ERI_BASE 0x00001000
++#endif
++
+ /* Global control/status for all perf counters */
+-#define XTENSA_PMU_PMG 0x1000
++#define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE
+ /* Perf counter values */
+-#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4)
++#define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
+ /* Perf counter control registers */
+-#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4)
++#define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
+ /* Perf counter status registers */
+-#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4)
++#define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
+
+ #define XTENSA_PMU_PMG_PMEN 0x1
+