--- /dev/null
+From 4b8247233057991638c4e662f16bdd0524738aa6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Dec 2022 11:39:27 +0100
+Subject: can: kvaser_usb: hydra: help gcc-13 to figure out cmd_len
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+[ Upstream commit f006229135b7debf4037adb1eb93e358559593db ]
+
+Debian's gcc-13 [1] throws the following error in
+kvaser_usb_hydra_cmd_size():
+
+[1] gcc version 13.0.0 20221214 (experimental) [master r13-4693-g512098a3316] (Debian 13-20221214-1)
+
+| drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c:502:65: error:
+| array subscript ‘struct kvaser_cmd_ext[0]’ is partly outside array
+| bounds of ‘unsigned char[32]’ [-Werror=array-bounds=]
+| 502 | ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len);
+
+kvaser_usb_hydra_cmd_size() returns the size of given command. It
+depends on the command number (cmd->header.cmd_no). For extended
+commands (cmd->header.cmd_no == CMD_EXTENDED) the above shown code is
+executed.
+
+Help gcc to recognize that this code path is not taken in all cases,
+by calling kvaser_usb_hydra_cmd_size() directly after assigning the
+command number.
+
+Fixes: aec5fb2268b7 ("can: kvaser_usb: Add support for Kvaser USB hydra family")
+Cc: Jimmy Assarsson <extja@kvaser.com>
+Cc: Anssi Hannula <anssi.hannula@bitwise.fi>
+Link: https://lore.kernel.org/all/20221219110104.1073881-1-mkl@pengutronix.de
+Tested-by: Jimmy Assarsson <extja@kvaser.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/can/usb/kvaser_usb/kvaser_usb_hydra.c | 33 ++++++++++++++-----
+ 1 file changed, 24 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+index f688124d6d669..ef341c4254fc8 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+@@ -545,6 +545,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ u8 cmd_no, int channel)
+ {
+ struct kvaser_cmd *cmd;
++ size_t cmd_len;
+ int err;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -552,6 +553,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ if (channel < 0) {
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+@@ -568,7 +570,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ goto end;
+
+@@ -584,6 +586,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ {
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb *dev = priv->dev;
++ size_t cmd_len;
+ int err;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+@@ -591,14 +594,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+- err = kvaser_usb_send_cmd_async(priv, cmd,
+- kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
+ if (err)
+ kfree(cmd);
+
+@@ -742,6 +745,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ {
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
++ size_t cmd_len;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+@@ -753,13 +757,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ goto end;
+
+@@ -1578,6 +1583,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
+ struct kvaser_cmd *cmd;
++ size_t cmd_len;
+ int err;
+
+ if (!hydra)
+@@ -1588,6 +1594,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+@@ -1597,7 +1604,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+
+ reinit_completion(&priv->get_busparams_comp);
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ return err;
+
+@@ -1624,6 +1631,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
++ size_t cmd_len;
+ int err;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1631,6 +1639,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
+ sizeof(cmd->set_busparams_req.busparams_nominal));
+
+@@ -1639,7 +1648,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+
+ kfree(cmd);
+
+@@ -1652,6 +1661,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
++ size_t cmd_len;
+ int err;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1659,6 +1669,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
+ sizeof(cmd->set_busparams_req.busparams_data));
+
+@@ -1676,7 +1687,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+
+ kfree(cmd);
+
+@@ -1804,6 +1815,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+ {
+ struct kvaser_cmd *cmd;
++ size_t cmd_len;
+ int err;
+ u32 flags;
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+@@ -1813,6 +1825,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ cmd->sw_detail_req.use_ext_cmd = 1;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+@@ -1820,7 +1833,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ goto end;
+
+@@ -1938,6 +1951,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ {
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
++ size_t cmd_len;
+ int err;
+
+ if ((priv->can.ctrlmode &
+@@ -1953,6 +1967,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
++ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+@@ -1962,7 +1977,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ else
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
+
+- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ kfree(cmd);
+
+ return err;
+--
+2.39.0
+
--- /dev/null
+From cbeefb22c37836b2fffa1faf2010bb490883de48 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 14:48:32 +0800
+Subject: clk: mxl: Add option to override gate clks
+
+From: Rahul Tanwar <rtanwar@maxlinear.com>
+
+[ Upstream commit a5d49bd369b8588c0ee9d4d0a2c0160558a3ab69 ]
+
+In MxL's LGM SoC, gate clocks can be controlled either from CGU clk driver
+i.e. this driver or directly from power management driver/daemon. It is
+dependent on the power policy/profile requirements of the end product.
+
+To support such use cases, provide option to override gate clks enable/disable
+by adding a flag GATE_CLK_HW which controls if these gate clks are controlled
+by HW i.e. this driver or overridden in order to allow it to be controlled
+by power profiles instead.
+
+Reviewed-by: Yi xin Zhu <yzhu@maxlinear.com>
+Signed-off-by: Rahul Tanwar <rtanwar@maxlinear.com>
+Link: https://lore.kernel.org/r/bdc9c89317b5d338a6c4f1d49386b696e947a672.1665642720.git.rtanwar@maxlinear.com
+[sboyd@kernel.org: Add braces on many line if-else]
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Stable-dep-of: 106ef3bda210 ("clk: mxl: Fix a clk entry by adding relevant flags")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/x86/clk-cgu.c | 16 +++++++++++++++-
+ drivers/clk/x86/clk-cgu.h | 1 +
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
+index 1f7e93de67bc0..4278a687076c9 100644
+--- a/drivers/clk/x86/clk-cgu.c
++++ b/drivers/clk/x86/clk-cgu.c
+@@ -354,8 +354,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ hw = lgm_clk_register_fixed_factor(ctx, list);
+ break;
+ case CLK_TYPE_GATE:
+- hw = lgm_clk_register_gate(ctx, list);
++ if (list->gate_flags & GATE_CLK_HW) {
++ hw = lgm_clk_register_gate(ctx, list);
++ } else {
++ /*
++ * GATE_CLKs can be controlled either from
++ * CGU clk driver i.e. this driver or directly
++ * from power management driver/daemon. It is
++ * dependent on the power policy/profile requirements
++ * of the end product. To override control of gate
++ * clks from this driver, provide NULL for this index
++ * of gate clk provider.
++ */
++ hw = NULL;
++ }
+ break;
++
+ default:
+ dev_err(ctx->dev, "invalid clk type\n");
+ return -EINVAL;
+diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
+index 0aa0f35d63a0b..73ce84345f81e 100644
+--- a/drivers/clk/x86/clk-cgu.h
++++ b/drivers/clk/x86/clk-cgu.h
+@@ -197,6 +197,7 @@ struct lgm_clk_branch {
+ /* clock flags definition */
+ #define CLOCK_FLAG_VAL_INIT BIT(16)
+ #define MUX_CLK_SW BIT(17)
++#define GATE_CLK_HW BIT(18)
+
+ #define LGM_MUX(_id, _name, _pdata, _f, _reg, \
+ _shift, _width, _cf, _v) \
+--
+2.39.0
+
--- /dev/null
+From 28a1119baf19fafae7c898bb38b45e9a74d0fd17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 14:48:33 +0800
+Subject: clk: mxl: Fix a clk entry by adding relevant flags
+
+From: Rahul Tanwar <rtanwar@maxlinear.com>
+
+[ Upstream commit 106ef3bda21006fe37b62c85931230a6355d78d3 ]
+
+One of the clock entry "dcl" clk has some HW limitations. One is that
+its rate can only by changed by changing its parent clk's rate & two is
+that HW does not support enable/disable for this clk.
+
+Handle above two limitations by adding relevant flags. Add standard flag
+CLK_SET_RATE_PARENT to handle rate change and add driver internal flag
+DIV_CLK_NO_MASK to handle enable/disable.
+
+Fixes: d058fd9e8984 ("clk: intel: Add CGU clock driver for a new SoC")
+Reviewed-by: Yi xin Zhu <yzhu@maxlinear.com>
+Signed-off-by: Rahul Tanwar <rtanwar@maxlinear.com>
+Link: https://lore.kernel.org/r/a4770e7225f8a0c03c8ab2ba80434a4e8e9afb17.1665642720.git.rtanwar@maxlinear.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/x86/clk-cgu.c | 5 +++--
+ drivers/clk/x86/clk-cgu.h | 1 +
+ drivers/clk/x86/clk-lgm.c | 4 ++--
+ 3 files changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
+index 4278a687076c9..89b53f280aee0 100644
+--- a/drivers/clk/x86/clk-cgu.c
++++ b/drivers/clk/x86/clk-cgu.c
+@@ -164,8 +164,9 @@ static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
+ {
+ struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
+
+- lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
+- div->width_gate, enable);
++ if (div->flags != DIV_CLK_NO_MASK)
++ lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
++ div->width_gate, enable);
+ return 0;
+ }
+
+diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
+index 73ce84345f81e..bcaf8aec94e5d 100644
+--- a/drivers/clk/x86/clk-cgu.h
++++ b/drivers/clk/x86/clk-cgu.h
+@@ -198,6 +198,7 @@ struct lgm_clk_branch {
+ #define CLOCK_FLAG_VAL_INIT BIT(16)
+ #define MUX_CLK_SW BIT(17)
+ #define GATE_CLK_HW BIT(18)
++#define DIV_CLK_NO_MASK BIT(19)
+
+ #define LGM_MUX(_id, _name, _pdata, _f, _reg, \
+ _shift, _width, _cf, _v) \
+diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
+index e312af42e97ae..4de77b2c750d3 100644
+--- a/drivers/clk/x86/clk-lgm.c
++++ b/drivers/clk/x86/clk-lgm.c
+@@ -255,8 +255,8 @@ static const struct lgm_clk_branch lgm_branch_clks[] = {
+ LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1,
+ 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2),
+ LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0),
+- LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR,
+- 25, 3, 0, 0, 0, 0, dcl_div),
++ LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", CLK_SET_RATE_PARENT, CGU_PCMCR,
++ 25, 3, 0, 0, DIV_CLK_NO_MASK, 0, dcl_div),
+ LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR,
+ 0, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr",
+--
+2.39.0
+
--- /dev/null
+From cae26f56ddd50fde09ea3b86b0b943f6ea67f80e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 14:48:31 +0800
+Subject: clk: mxl: Remove redundant spinlocks
+
+From: Rahul Tanwar <rtanwar@maxlinear.com>
+
+[ Upstream commit eaabee88a88a26b108be8d120fc072dfaf462cef ]
+
+Patch 1/4 of this patch series switches from direct readl/writel
+based register access to regmap based register access. Instead
+of using direct readl/writel, regmap API's are used to read, write
+& read-modify-write clk registers. Regmap API's already use their
+own spinlocks to serialize the register accesses across multiple
+cores in which case additional driver spinlocks becomes redundant.
+
+Hence, remove redundant spinlocks from driver in this patch 2/4.
+
+Reviewed-by: Yi xin Zhu <yzhu@maxlinear.com>
+Signed-off-by: Rahul Tanwar <rtanwar@maxlinear.com>
+Link: https://lore.kernel.org/r/a8a02c8773b88924503a9fdaacd37dd2e6488bf3.1665642720.git.rtanwar@maxlinear.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Stable-dep-of: 106ef3bda210 ("clk: mxl: Fix a clk entry by adding relevant flags")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/x86/clk-cgu-pll.c | 13 ------
+ drivers/clk/x86/clk-cgu.c | 80 ++++-------------------------------
+ drivers/clk/x86/clk-cgu.h | 6 ---
+ drivers/clk/x86/clk-lgm.c | 1 -
+ 4 files changed, 9 insertions(+), 91 deletions(-)
+
+diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c
+index c83083affe88e..409dbf55f4cae 100644
+--- a/drivers/clk/x86/clk-cgu-pll.c
++++ b/drivers/clk/x86/clk-cgu-pll.c
+@@ -41,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+ {
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned int div, mult, frac;
+- unsigned long flags;
+
+- spin_lock_irqsave(&pll->lock, flags);
+ mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
+ div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
+ frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
+- spin_unlock_irqrestore(&pll->lock, flags);
+
+ if (pll->type == TYPE_LJPLL)
+ div *= 4;
+@@ -58,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+ static int lgm_pll_is_enabled(struct clk_hw *hw)
+ {
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+- unsigned long flags;
+ unsigned int ret;
+
+- spin_lock_irqsave(&pll->lock, flags);
+ ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
+- spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+ }
+@@ -71,16 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw)
+ static int lgm_pll_enable(struct clk_hw *hw)
+ {
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+- unsigned long flags;
+ u32 val;
+ int ret;
+
+- spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
+ ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg,
+ val, (val & 0x1), 1, 100);
+
+- spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+ }
+@@ -88,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw)
+ static void lgm_pll_disable(struct clk_hw *hw)
+ {
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+- unsigned long flags;
+
+- spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
+- spin_unlock_irqrestore(&pll->lock, flags);
+ }
+
+ static const struct clk_ops lgm_pll_ops = {
+@@ -123,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx,
+ return ERR_PTR(-ENOMEM);
+
+ pll->membase = ctx->membase;
+- pll->lock = ctx->lock;
+ pll->reg = list->reg;
+ pll->flags = list->flags;
+ pll->type = list->type;
+diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
+index f5f30a18f4869..1f7e93de67bc0 100644
+--- a/drivers/clk/x86/clk-cgu.c
++++ b/drivers/clk/x86/clk-cgu.c
+@@ -25,14 +25,10 @@
+ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+ {
+- unsigned long flags;
+
+- if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+- spin_lock_irqsave(&ctx->lock, flags);
++ if (list->div_flags & CLOCK_FLAG_VAL_INIT)
+ lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ list->div_width, list->div_val);
+- spin_unlock_irqrestore(&ctx->lock, flags);
+- }
+
+ return clk_hw_register_fixed_rate(NULL, list->name,
+ list->parent_data[0].name,
+@@ -42,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
+ static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
+ {
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+- unsigned long flags;
+ u32 val;
+
+- spin_lock_irqsave(&mux->lock, flags);
+ if (mux->flags & MUX_CLK_SW)
+ val = mux->reg;
+ else
+ val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
+ mux->width);
+- spin_unlock_irqrestore(&mux->lock, flags);
+ return clk_mux_val_to_index(hw, NULL, mux->flags, val);
+ }
+
+ static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ {
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+- unsigned long flags;
+ u32 val;
+
+ val = clk_mux_index_to_val(NULL, mux->flags, index);
+- spin_lock_irqsave(&mux->lock, flags);
+ if (mux->flags & MUX_CLK_SW)
+ mux->reg = val;
+ else
+ lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
+ mux->width, val);
+- spin_unlock_irqrestore(&mux->lock, flags);
+
+ return 0;
+ }
+@@ -91,7 +81,7 @@ static struct clk_hw *
+ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+ {
+- unsigned long flags, cflags = list->mux_flags;
++ unsigned long cflags = list->mux_flags;
+ struct device *dev = ctx->dev;
+ u8 shift = list->mux_shift;
+ u8 width = list->mux_width;
+@@ -112,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ init.num_parents = list->num_parents;
+
+ mux->membase = ctx->membase;
+- mux->lock = ctx->lock;
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->width = width;
+@@ -124,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ if (ret)
+ return ERR_PTR(ret);
+
+- if (cflags & CLOCK_FLAG_VAL_INIT) {
+- spin_lock_irqsave(&mux->lock, flags);
++ if (cflags & CLOCK_FLAG_VAL_INIT)
+ lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
+- spin_unlock_irqrestore(&mux->lock, flags);
+- }
+
+ return hw;
+ }
+@@ -137,13 +123,10 @@ static unsigned long
+ lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ {
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+- unsigned long flags;
+ unsigned int val;
+
+- spin_lock_irqsave(÷r->lock, flags);
+ val = lgm_get_clk_val(divider->membase, divider->reg,
+ divider->shift, divider->width);
+- spin_unlock_irqrestore(÷r->lock, flags);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+@@ -164,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+ {
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+- unsigned long flags;
+ int value;
+
+ value = divider_get_val(rate, prate, divider->table,
+@@ -172,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ if (value < 0)
+ return value;
+
+- spin_lock_irqsave(÷r->lock, flags);
+ lgm_set_clk_val(divider->membase, divider->reg,
+ divider->shift, divider->width, value);
+- spin_unlock_irqrestore(÷r->lock, flags);
+
+ return 0;
+ }
+@@ -183,12 +163,9 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
+ {
+ struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
+- unsigned long flags;
+
+- spin_lock_irqsave(&div->lock, flags);
+ lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
+ div->width_gate, enable);
+- spin_unlock_irqrestore(&div->lock, flags);
+ return 0;
+ }
+
+@@ -214,7 +191,7 @@ static struct clk_hw *
+ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+ {
+- unsigned long flags, cflags = list->div_flags;
++ unsigned long cflags = list->div_flags;
+ struct device *dev = ctx->dev;
+ struct lgm_clk_divider *div;
+ struct clk_init_data init = {};
+@@ -237,7 +214,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ init.num_parents = 1;
+
+ div->membase = ctx->membase;
+- div->lock = ctx->lock;
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+@@ -252,11 +228,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ if (ret)
+ return ERR_PTR(ret);
+
+- if (cflags & CLOCK_FLAG_VAL_INIT) {
+- spin_lock_irqsave(&div->lock, flags);
++ if (cflags & CLOCK_FLAG_VAL_INIT)
+ lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
+- spin_unlock_irqrestore(&div->lock, flags);
+- }
+
+ return hw;
+ }
+@@ -265,7 +238,6 @@ static struct clk_hw *
+ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+ {
+- unsigned long flags;
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
+@@ -274,12 +246,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
+- if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+- spin_lock_irqsave(&ctx->lock, flags);
++ if (list->div_flags & CLOCK_FLAG_VAL_INIT)
+ lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ list->div_width, list->div_val);
+- spin_unlock_irqrestore(&ctx->lock, flags);
+- }
+
+ return hw;
+ }
+@@ -287,13 +256,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ static int lgm_clk_gate_enable(struct clk_hw *hw)
+ {
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+- unsigned long flags;
+ unsigned int reg;
+
+- spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_EN(gate->reg);
+ lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+- spin_unlock_irqrestore(&gate->lock, flags);
+
+ return 0;
+ }
+@@ -301,25 +267,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw)
+ static void lgm_clk_gate_disable(struct clk_hw *hw)
+ {
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+- unsigned long flags;
+ unsigned int reg;
+
+- spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_DIS(gate->reg);
+ lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+- spin_unlock_irqrestore(&gate->lock, flags);
+ }
+
+ static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
+ {
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned int reg, ret;
+- unsigned long flags;
+
+- spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_STAT(gate->reg);
+ ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
+- spin_unlock_irqrestore(&gate->lock, flags);
+
+ return ret;
+ }
+@@ -334,7 +294,7 @@ static struct clk_hw *
+ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+ {
+- unsigned long flags, cflags = list->gate_flags;
++ unsigned long cflags = list->gate_flags;
+ const char *pname = list->parent_data[0].name;
+ struct device *dev = ctx->dev;
+ u8 shift = list->gate_shift;
+@@ -355,7 +315,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ init.num_parents = pname ? 1 : 0;
+
+ gate->membase = ctx->membase;
+- gate->lock = ctx->lock;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = cflags;
+@@ -367,9 +326,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+- spin_lock_irqsave(&gate->lock, flags);
+ lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
+- spin_unlock_irqrestore(&gate->lock, flags);
+ }
+
+ return hw;
+@@ -444,24 +401,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ static int lgm_clk_ddiv_enable(struct clk_hw *hw)
+ {
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+- unsigned long flags;
+
+- spin_lock_irqsave(&ddiv->lock, flags);
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ ddiv->width_gate, 1);
+- spin_unlock_irqrestore(&ddiv->lock, flags);
+ return 0;
+ }
+
+ static void lgm_clk_ddiv_disable(struct clk_hw *hw)
+ {
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+- unsigned long flags;
+
+- spin_lock_irqsave(&ddiv->lock, flags);
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ ddiv->width_gate, 0);
+- spin_unlock_irqrestore(&ddiv->lock, flags);
+ }
+
+ static int
+@@ -498,32 +449,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ u32 div, ddiv1, ddiv2;
+- unsigned long flags;
+
+ div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+
+- spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ div = div * 2;
+ }
+
+- if (div <= 0) {
+- spin_unlock_irqrestore(&ddiv->lock, flags);
++ if (div <= 0)
+ return -EINVAL;
+- }
+
+- if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
+- spin_unlock_irqrestore(&ddiv->lock, flags);
++ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2))
+ return -EINVAL;
+- }
+
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
+ ddiv1 - 1);
+
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
+ ddiv2 - 1);
+- spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ return 0;
+ }
+@@ -534,18 +478,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ u32 div, ddiv1, ddiv2;
+- unsigned long flags;
+ u64 rate64;
+
+ div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+
+ /* if predivide bit is enabled, modify div by factor of 2.5 */
+- spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ div = div * 2;
+ div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ }
+- spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ if (div <= 0)
+ return *prate;
+@@ -559,12 +500,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ do_div(rate64, ddiv2);
+
+ /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+- spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ rate64 = rate64 * 2;
+ rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
+ }
+- spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ return rate64;
+ }
+@@ -601,7 +540,6 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+ init.num_parents = 1;
+
+ ddiv->membase = ctx->membase;
+- ddiv->lock = ctx->lock;
+ ddiv->reg = list->reg;
+ ddiv->shift0 = list->shift0;
+ ddiv->width0 = list->width0;
+diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
+index dbcb664687975..0aa0f35d63a0b 100644
+--- a/drivers/clk/x86/clk-cgu.h
++++ b/drivers/clk/x86/clk-cgu.h
+@@ -18,7 +18,6 @@ struct lgm_clk_mux {
+ u8 shift;
+ u8 width;
+ unsigned long flags;
+- spinlock_t lock;
+ };
+
+ struct lgm_clk_divider {
+@@ -31,7 +30,6 @@ struct lgm_clk_divider {
+ u8 width_gate;
+ unsigned long flags;
+ const struct clk_div_table *table;
+- spinlock_t lock;
+ };
+
+ struct lgm_clk_ddiv {
+@@ -49,7 +47,6 @@ struct lgm_clk_ddiv {
+ unsigned int mult;
+ unsigned int div;
+ unsigned long flags;
+- spinlock_t lock;
+ };
+
+ struct lgm_clk_gate {
+@@ -58,7 +55,6 @@ struct lgm_clk_gate {
+ unsigned int reg;
+ u8 shift;
+ unsigned long flags;
+- spinlock_t lock;
+ };
+
+ enum lgm_clk_type {
+@@ -82,7 +78,6 @@ struct lgm_clk_provider {
+ struct device_node *np;
+ struct device *dev;
+ struct clk_hw_onecell_data clk_data;
+- spinlock_t lock;
+ };
+
+ enum pll_type {
+@@ -97,7 +92,6 @@ struct lgm_clk_pll {
+ unsigned int reg;
+ unsigned long flags;
+ enum pll_type type;
+- spinlock_t lock;
+ };
+
+ /**
+diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
+index 4fa2bcaf71c89..e312af42e97ae 100644
+--- a/drivers/clk/x86/clk-lgm.c
++++ b/drivers/clk/x86/clk-lgm.c
+@@ -444,7 +444,6 @@ static int lgm_cgu_probe(struct platform_device *pdev)
+
+ ctx->np = np;
+ ctx->dev = dev;
+- spin_lock_init(&ctx->lock);
+
+ ret = lgm_clk_register_plls(ctx, lgm_pll_clks,
+ ARRAY_SIZE(lgm_pll_clks));
+--
+2.39.0
+
--- /dev/null
+From becba302081bc75e545878e95436550ea1d8dd58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 14:48:30 +0800
+Subject: clk: mxl: Switch from direct readl/writel based IO to regmap based IO
+
+From: Rahul Tanwar <rtanwar@maxlinear.com>
+
+[ Upstream commit 036177310bac5534de44ff6a7b60a4d2c0b6567c ]
+
+Earlier version of driver used direct io remapped register read
+writes using readl/writel. But we need secure boot access which
+is only possible when registers are read & written using regmap.
+This is because the security bus/hook is written & coupled only
+with regmap layer.
+
+Switch the driver from direct readl/writel based register accesses
+to regmap based register accesses.
+
+Additionally, update the license headers to latest status.
+
+Reviewed-by: Yi xin Zhu <yzhu@maxlinear.com>
+Signed-off-by: Rahul Tanwar <rtanwar@maxlinear.com>
+Link: https://lore.kernel.org/r/2610331918206e0e3bd18babb39393a558fb34f9.1665642720.git.rtanwar@maxlinear.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Stable-dep-of: 106ef3bda210 ("clk: mxl: Fix a clk entry by adding relevant flags")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/x86/Kconfig | 5 +++--
+ drivers/clk/x86/clk-cgu-pll.c | 10 +++++----
+ drivers/clk/x86/clk-cgu.c | 5 +++--
+ drivers/clk/x86/clk-cgu.h | 38 +++++++++++++++++++----------------
+ drivers/clk/x86/clk-lgm.c | 13 ++++++++----
+ 5 files changed, 42 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig
+index 69642e15fcc1f..ced99e082e3dd 100644
+--- a/drivers/clk/x86/Kconfig
++++ b/drivers/clk/x86/Kconfig
+@@ -1,8 +1,9 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config CLK_LGM_CGU
+ depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
++ select MFD_SYSCON
+ select OF_EARLY_FLATTREE
+ bool "Clock driver for Lightning Mountain(LGM) platform"
+ help
+- Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
+- network processor SoC.
++ Clock Generation Unit(CGU) driver for MaxLinear's x86 based
++ Lightning Mountain(LGM) network processor SoC.
+diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c
+index 3179557b5f784..c83083affe88e 100644
+--- a/drivers/clk/x86/clk-cgu-pll.c
++++ b/drivers/clk/x86/clk-cgu-pll.c
+@@ -1,8 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
+ * Copyright (C) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+ */
+
+ #include <linux/clk-provider.h>
+@@ -76,8 +77,9 @@ static int lgm_pll_enable(struct clk_hw *hw)
+
+ spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
+- ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
+- val, (val & 0x1), 1, 100);
++ ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg,
++ val, (val & 0x1), 1, 100);
++
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
+index 33de600e0c38e..f5f30a18f4869 100644
+--- a/drivers/clk/x86/clk-cgu.c
++++ b/drivers/clk/x86/clk-cgu.c
+@@ -1,8 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
+ * Copyright (C) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+ */
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
+diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
+index 4e22bfb223128..dbcb664687975 100644
+--- a/drivers/clk/x86/clk-cgu.h
++++ b/drivers/clk/x86/clk-cgu.h
+@@ -1,18 +1,19 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+- * Copyright(c) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
++ * Copyright (C) 2020 Intel Corporation.
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+ */
+
+ #ifndef __CLK_CGU_H
+ #define __CLK_CGU_H
+
+-#include <linux/io.h>
++#include <linux/regmap.h>
+
+ struct lgm_clk_mux {
+ struct clk_hw hw;
+- void __iomem *membase;
++ struct regmap *membase;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+@@ -22,7 +23,7 @@ struct lgm_clk_mux {
+
+ struct lgm_clk_divider {
+ struct clk_hw hw;
+- void __iomem *membase;
++ struct regmap *membase;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+@@ -35,7 +36,7 @@ struct lgm_clk_divider {
+
+ struct lgm_clk_ddiv {
+ struct clk_hw hw;
+- void __iomem *membase;
++ struct regmap *membase;
+ unsigned int reg;
+ u8 shift0;
+ u8 width0;
+@@ -53,7 +54,7 @@ struct lgm_clk_ddiv {
+
+ struct lgm_clk_gate {
+ struct clk_hw hw;
+- void __iomem *membase;
++ struct regmap *membase;
+ unsigned int reg;
+ u8 shift;
+ unsigned long flags;
+@@ -77,7 +78,7 @@ enum lgm_clk_type {
+ * @clk_data: array of hw clocks and clk number.
+ */
+ struct lgm_clk_provider {
+- void __iomem *membase;
++ struct regmap *membase;
+ struct device_node *np;
+ struct device *dev;
+ struct clk_hw_onecell_data clk_data;
+@@ -92,7 +93,7 @@ enum pll_type {
+
+ struct lgm_clk_pll {
+ struct clk_hw hw;
+- void __iomem *membase;
++ struct regmap *membase;
+ unsigned int reg;
+ unsigned long flags;
+ enum pll_type type;
+@@ -300,29 +301,32 @@ struct lgm_clk_branch {
+ .div = _d, \
+ }
+
+-static inline void lgm_set_clk_val(void __iomem *membase, u32 reg,
++static inline void lgm_set_clk_val(struct regmap *membase, u32 reg,
+ u8 shift, u8 width, u32 set_val)
+ {
+ u32 mask = (GENMASK(width - 1, 0) << shift);
+- u32 regval;
+
+- regval = readl(membase + reg);
+- regval = (regval & ~mask) | ((set_val << shift) & mask);
+- writel(regval, membase + reg);
++ regmap_update_bits(membase, reg, mask, set_val << shift);
+ }
+
+-static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg,
++static inline u32 lgm_get_clk_val(struct regmap *membase, u32 reg,
+ u8 shift, u8 width)
+ {
+ u32 mask = (GENMASK(width - 1, 0) << shift);
+ u32 val;
+
+- val = readl(membase + reg);
++ if (regmap_read(membase, reg, &val)) {
++ WARN_ONCE(1, "Failed to read clk reg: 0x%x\n", reg);
++ return 0;
++ }
++
+ val = (val & mask) >> shift;
+
+ return val;
+ }
+
++
++
+ int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list,
+ unsigned int nr_clk);
+diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
+index 020f4e83a5ccb..4fa2bcaf71c89 100644
+--- a/drivers/clk/x86/clk-lgm.c
++++ b/drivers/clk/x86/clk-lgm.c
+@@ -1,10 +1,12 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
+ * Copyright (C) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+ */
+ #include <linux/clk-provider.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <dt-bindings/clock/intel,lgm-clk.h>
+@@ -433,9 +435,12 @@ static int lgm_cgu_probe(struct platform_device *pdev)
+
+ ctx->clk_data.num = CLK_NR_CLKS;
+
+- ctx->membase = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(ctx->membase))
++ ctx->membase = syscon_node_to_regmap(np);
++ if (IS_ERR_OR_NULL(ctx->membase)) {
++ dev_err(dev, "Failed to get clk CGU iomem\n");
+ return PTR_ERR(ctx->membase);
++ }
++
+
+ ctx->np = np;
+ ctx->dev = dev;
+--
+2.39.0
+
--- /dev/null
+From cb2c935ac9f9253467687035d298734ad8948a45 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Oct 2022 19:03:57 +0800
+Subject: clk: mxl: syscon_node_to_regmap() returns error pointers
+
+From: Rahul Tanwar <rtanwar@maxlinear.com>
+
+[ Upstream commit 7256d1f4618b40792d1e9b9b6cb1406a13cad2dd ]
+
+Commit 036177310bac ("clk: mxl: Switch from direct readl/writel based IO
+to regmap based IO") introduced code resulting in below warning issued
+by the smatch static checker.
+
+ drivers/clk/x86/clk-lgm.c:441 lgm_cgu_probe() warn: passing zero to 'PTR_ERR'
+
+Fix the warning by replacing incorrect IS_ERR_OR_NULL() with IS_ERR().
+
+Fixes: 036177310bac ("clk: mxl: Switch from direct readl/writel based IO to regmap based IO")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Rahul Tanwar <rtanwar@maxlinear.com>
+Link: https://lore.kernel.org/r/49e339d4739e4ae4c92b00c1b2918af0755d4122.1666695221.git.rtanwar@maxlinear.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/x86/clk-lgm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
+index 4de77b2c750d3..f69455dd1c980 100644
+--- a/drivers/clk/x86/clk-lgm.c
++++ b/drivers/clk/x86/clk-lgm.c
+@@ -436,7 +436,7 @@ static int lgm_cgu_probe(struct platform_device *pdev)
+ ctx->clk_data.num = CLK_NR_CLKS;
+
+ ctx->membase = syscon_node_to_regmap(np);
+- if (IS_ERR_OR_NULL(ctx->membase)) {
++ if (IS_ERR(ctx->membase)) {
+ dev_err(dev, "Failed to get clk CGU iomem\n");
+ return PTR_ERR(ctx->membase);
+ }
+--
+2.39.0
+
--- /dev/null
+From 8f11c947ca5ae70783d372b83766e4bb375b810f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Nov 2022 16:41:34 +0800
+Subject: docs: perf: Fix PMU instance name of hisi-pcie-pmu
+
+From: Yicong Yang <yangyicong@hisilicon.com>
+
+[ Upstream commit eb79f12b4c41dd2403a0d16772ee72fcd6416015 ]
+
+The PMU instance will be called hisi_pcie<sicl>_core<core> rather than
+hisi_pcie<sicl>_<core>. Fix this in the documentation.
+
+Fixes: c8602008e247 ("docs: perf: Add description for HiSilicon PCIe PMU driver")
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
+Link: https://lore.kernel.org/r/20221117084136.53572-3-yangyicong@huawei.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../admin-guide/perf/hisi-pcie-pmu.rst | 22 +++++++++----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
+index 294ebbdb22af8..bbe66480ff851 100644
+--- a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
++++ b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
+@@ -15,10 +15,10 @@ HiSilicon PCIe PMU driver
+ The PCIe PMU driver registers a perf PMU with the name of its sicl-id and PCIe
+ Core id.::
+
+- /sys/bus/event_source/hisi_pcie<sicl>_<core>
++ /sys/bus/event_source/hisi_pcie<sicl>_core<core>
+
+ PMU driver provides description of available events and filter options in sysfs,
+-see /sys/bus/event_source/devices/hisi_pcie<sicl>_<core>.
++see /sys/bus/event_source/devices/hisi_pcie<sicl>_core<core>.
+
+ The "format" directory describes all formats of the config (events) and config1
+ (filter options) fields of the perf_event_attr structure. The "events" directory
+@@ -33,13 +33,13 @@ monitored by PMU.
+ Example usage of perf::
+
+ $# perf list
+- hisi_pcie0_0/rx_mwr_latency/ [kernel PMU event]
+- hisi_pcie0_0/rx_mwr_cnt/ [kernel PMU event]
++ hisi_pcie0_core0/rx_mwr_latency/ [kernel PMU event]
++ hisi_pcie0_core0/rx_mwr_cnt/ [kernel PMU event]
+ ------------------------------------------
+
+- $# perf stat -e hisi_pcie0_0/rx_mwr_latency/
+- $# perf stat -e hisi_pcie0_0/rx_mwr_cnt/
+- $# perf stat -g -e hisi_pcie0_0/rx_mwr_latency/ -e hisi_pcie0_0/rx_mwr_cnt/
++ $# perf stat -e hisi_pcie0_core0/rx_mwr_latency/
++ $# perf stat -e hisi_pcie0_core0/rx_mwr_cnt/
++ $# perf stat -g -e hisi_pcie0_core0/rx_mwr_latency/ -e hisi_pcie0_core0/rx_mwr_cnt/
+
+ The current driver does not support sampling. So "perf record" is unsupported.
+ Also attach to a task is unsupported for PCIe PMU.
+@@ -64,7 +64,7 @@ bit8 is set, port=0x100; if these two Root Ports are both monitored, port=0x101.
+
+ Example usage of perf::
+
+- $# perf stat -e hisi_pcie0_0/rx_mwr_latency,port=0x1/ sleep 5
++ $# perf stat -e hisi_pcie0_core0/rx_mwr_latency,port=0x1/ sleep 5
+
+ -bdf
+
+@@ -76,7 +76,7 @@ For example, "bdf=0x3900" means BDF of target Endpoint is 0000:39:00.0.
+
+ Example usage of perf::
+
+- $# perf stat -e hisi_pcie0_0/rx_mrd_flux,bdf=0x3900/ sleep 5
++ $# perf stat -e hisi_pcie0_core0/rx_mrd_flux,bdf=0x3900/ sleep 5
+
+ 2. Trigger filter
+ Event statistics start when the first time TLP length is greater/smaller
+@@ -90,7 +90,7 @@ means start when TLP length < condition.
+
+ Example usage of perf::
+
+- $# perf stat -e hisi_pcie0_0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
++ $# perf stat -e hisi_pcie0_core0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
+
+ 3. Threshold filter
+ Counter counts when TLP length within the specified range. You can set the
+@@ -103,4 +103,4 @@ when TLP length < threshold.
+
+ Example usage of perf::
+
+- $# perf stat -e hisi_pcie0_0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5
++ $# perf stat -e hisi_pcie0_core0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5
+--
+2.39.0
+
--- /dev/null
+From 2a378018749c5e96afc0551454826afc6e7fc9a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 15:35:48 +0530
+Subject: drm/edid: Fix minimum bpc supported with DSC1.2 for HDMI sink
+
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+
+[ Upstream commit 18feaf6d0784dcba888859109676adf1e0260dfd ]
+
+HF-VSDB/SCDB has bits to advertise support for 16, 12 and 10 bpc.
+If none of the bits are set, the minimum bpc supported with DSC is 8.
+
+This patch corrects the min bpc supported to be 8, instead of 0.
+
+Fixes: 76ee7b905678 ("drm/edid: Parse DSC1.2 cap fields from HFVSDB block")
+Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Cc: Uma Shankar <uma.shankar@intel.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+
+v2: s/DSC1.2/DSC 1.2
+
+Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220916100551.2531750-2-ankit.k.nautiyal@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_edid.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index b36abfa915813..9d82de4c0a8b0 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5827,7 +5827,8 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
+ else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
+ hdmi_dsc->bpc_supported = 10;
+ else
+- hdmi_dsc->bpc_supported = 0;
++ /* Supports min 8 BPC if DSC 1.2 is supported*/
++ hdmi_dsc->bpc_supported = 8;
+
+ dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+--
+2.39.0
+
--- /dev/null
+From b83a0e662cd493b1e52d334f37ae04d5ac5447e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 12:40:31 +0200
+Subject: drm/etnaviv: don't truncate physical page address
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+[ Upstream commit d37c120b73128690434cc093952439eef9d56af1 ]
+
+While the interface for the MMU mapping takes phys_addr_t to hold a
+full 64bit address when necessary and MMUv2 is able to map physical
+addresses with up to 40bit, etnaviv_iommu_map() truncates the address
+to 32bits. Fix this by using the correct type.
+
+Fixes: 931e97f3afd8 ("drm/etnaviv: mmuv2: support 40 bit phys address")
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+index 55479cb8b1ac3..67bdce5326c6e 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+@@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
+ return -EINVAL;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+- u32 pa = sg_dma_address(sg) - sg->offset;
++ phys_addr_t pa = sg_dma_address(sg) - sg->offset;
+ size_t bytes = sg_dma_len(sg) + sg->offset;
+
+- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
++ VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
+
+ ret = etnaviv_context_map(context, da, pa, bytes, prot);
+ if (ret)
+--
+2.39.0
+
--- /dev/null
+From 236e0b78bcc1bb755fcc75586d8d266af822a41e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Dec 2022 11:49:44 -0800
+Subject: drm/i915: Remove __maybe_unused from mtl_info
+
+From: Lucas De Marchi <lucas.demarchi@intel.com>
+
+[ Upstream commit fff758698842fb6722be37498d8773e0fb47f000 ]
+
+The attribute __maybe_unused should remain only until the respective
+info is not in the pciidlist. The info can't be added together
+with its definition because that would cause the driver to automatically
+probe for the device, while it's still not ready for that. However once
+pciidlist contains it, the attribute can be removed.
+
+Fixes: 7835303982d1 ("drm/i915/mtl: Add MeteorLake PCI IDs")
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Reviewed-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221214194944.3670344-1-lucas.demarchi@intel.com
+(cherry picked from commit 50490ce05b7a50b0bd4108fa7d6db3ca2972fa83)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_pci.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index 34f2d9da201e2..fe4f279aaeb3e 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -1130,7 +1130,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ {}
+ };
+
+-__maybe_unused
+ static const struct intel_device_info mtl_info = {
+ XE_HP_FEATURES,
+ XE_LPDP_FEATURES,
+--
+2.39.0
+
--- /dev/null
+From b6f81558a7e02a37fd7970f1be08108d3e8f837a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Sep 2022 23:40:31 +0000
+Subject: KVM: SVM: Skip WRMSR fastpath on VM-Exit if next RIP isn't valid
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 5c30e8101e8d5d020b1d7119117889756a6ed713 ]
+
+Skip the WRMSR fastpath in SVM's VM-Exit handler if the next RIP isn't
+valid, e.g. because KVM is running with nrips=false. SVM must decode and
+emulate to skip the WRMSR if the CPU doesn't provide the next RIP.
+Getting the instruction bytes to decode the WRMSR requires reading guest
+memory, which in turn means dereferencing memslots, and that isn't safe
+because KVM doesn't hold SRCU when the fastpath runs.
+
+Don't bother trying to enable the fastpath for this case, e.g. by doing
+only the WRMSR and leaving the "skip" until later. NRIPS is supported on
+all modern CPUs (KVM has considered making it mandatory), and the next
+RIP will be valid the vast, vast majority of the time.
+
+ =============================
+ WARNING: suspicious RCU usage
+ 6.0.0-smp--4e557fcd3d80-skip #13 Tainted: G O
+ -----------------------------
+ include/linux/kvm_host.h:954 suspicious rcu_dereference_check() usage!
+
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 2, debug_locks = 1
+ 1 lock held by stable/206475:
+ #0: ffff9d9dfebcc0f0 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x8b/0x620 [kvm]
+
+ stack backtrace:
+ CPU: 152 PID: 206475 Comm: stable Tainted: G O 6.0.0-smp--4e557fcd3d80-skip #13
+ Hardware name: Google, Inc. Arcadia_IT_80/Arcadia_IT_80, BIOS 10.48.0 01/27/2022
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x69/0xaa
+ dump_stack+0x10/0x12
+ lockdep_rcu_suspicious+0x11e/0x130
+ kvm_vcpu_gfn_to_memslot+0x155/0x190 [kvm]
+ kvm_vcpu_gfn_to_hva_prot+0x18/0x80 [kvm]
+ paging64_walk_addr_generic+0x183/0x450 [kvm]
+ paging64_gva_to_gpa+0x63/0xd0 [kvm]
+ kvm_fetch_guest_virt+0x53/0xc0 [kvm]
+ __do_insn_fetch_bytes+0x18b/0x1c0 [kvm]
+ x86_decode_insn+0xf0/0xef0 [kvm]
+ x86_emulate_instruction+0xba/0x790 [kvm]
+ kvm_emulate_instruction+0x17/0x20 [kvm]
+ __svm_skip_emulated_instruction+0x85/0x100 [kvm_amd]
+ svm_skip_emulated_instruction+0x13/0x20 [kvm_amd]
+ handle_fastpath_set_msr_irqoff+0xae/0x180 [kvm]
+ svm_vcpu_run+0x4b8/0x5a0 [kvm_amd]
+ vcpu_enter_guest+0x16ca/0x22f0 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x39d/0x900 [kvm]
+ kvm_vcpu_ioctl+0x538/0x620 [kvm]
+ __se_sys_ioctl+0x77/0xc0
+ __x64_sys_ioctl+0x1d/0x20
+ do_syscall_64+0x3d/0x80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Fixes: 404d5d7bff0d ("KVM: X86: Introduce more exit_fastpath_completion enum values")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/20220930234031.1732249-1-seanjc@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/svm.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index ce362e88a5676..0434bb7b456bd 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3889,8 +3889,14 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
+
+ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+ {
+- if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+- to_svm(vcpu)->vmcb->control.exit_info_1)
++ struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
++
++ /*
++ * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
++ * can't read guest memory (dereference memslots) to decode the WRMSR.
++ */
++ if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
++ nrips && control->next_rip)
+ return handle_fastpath_set_msr_irqoff(vcpu);
+
+ return EXIT_FASTPATH_NONE;
+--
+2.39.0
+
--- /dev/null
+From 2386e6d2c86b56016086d3b28012121ff0357548 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 14:36:20 -0700
+Subject: KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
+
+From: Jim Mattson <jmattson@google.com>
+
+[ Upstream commit 2e7eab81425ad6c875f2ed47c0ce01e78afc38a5 ]
+
+According to Intel's document on Indirect Branch Restricted
+Speculation, "Enabling IBRS does not prevent software from controlling
+the predicted targets of indirect branches of unrelated software
+executed later at the same predictor mode (for example, between two
+different user applications, or two different virtual machines). Such
+isolation can be ensured through use of the Indirect Branch Predictor
+Barrier (IBPB) command." This applies to both basic and enhanced IBRS.
+
+Since L1 and L2 VMs share hardware predictor modes (guest-user and
+guest-kernel), hardware IBRS is not sufficient to virtualize
+IBRS. (The way that basic IBRS is implemented on pre-eIBRS parts,
+hardware IBRS is actually sufficient in practice, even though it isn't
+sufficient architecturally.)
+
+For virtual CPUs that support IBRS, add an indirect branch prediction
+barrier on emulated VM-exit, to ensure that the predicted targets of
+indirect branches executed in L1 cannot be controlled by software that
+was executed in L2.
+
+Since we typically don't intercept guest writes to IA32_SPEC_CTRL,
+perform the IBPB at emulated VM-exit regardless of the current
+IA32_SPEC_CTRL.IBRS value, even though the IBPB could technically be
+deferred until L1 sets IA32_SPEC_CTRL.IBRS, if IA32_SPEC_CTRL.IBRS is
+clear at emulated VM-exit.
+
+This is CVE-2022-2196.
+
+Fixes: 5c911beff20a ("KVM: nVMX: Skip IBPB when switching between vmcs01 and vmcs02")
+Cc: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/20221019213620.1953281-3-jmattson@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/vmx/nested.c | 11 +++++++++++
+ arch/x86/kvm/vmx/vmx.c | 6 ++++--
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 10c63b1bf92fa..df8995977ec2d 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4767,6 +4767,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+
++ /*
++ * If IBRS is advertised to the vCPU, KVM must flush the indirect
++ * branch predictors when transitioning from L2 to L1, as L1 expects
++ * hardware (KVM in this case) to provide separate predictor modes.
++ * Bare metal isolates VMX root (host) from VMX non-root (guest), but
++ * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
++ * separate modes for L2 vs L1.
++ */
++ if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++ indirect_branch_prediction_barrier();
++
+ /* Update any VMCS fields that might have changed while L2 ran */
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 4ae248e87f5ed..95ed874fbbcc3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1348,8 +1348,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
+
+ /*
+ * No indirect branch prediction barrier needed when switching
+- * the active VMCS within a guest, e.g. on nested VM-Enter.
+- * The L1 VMM can protect itself with retpolines, IBPB or IBRS.
++ * the active VMCS within a vCPU, unless IBRS is advertised to
++ * the vCPU. To minimize the number of IBPBs executed, KVM
++ * performs IBPB on nested VM-Exit (a single nested transition
++ * may switch the active VMCS multiple times).
+ */
+ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
+ indirect_branch_prediction_barrier();
+--
+2.39.0
+
--- /dev/null
+From e0e35a0935df846f249ee7cbb957313190dfe71f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Sep 2022 23:36:32 +0000
+Subject: KVM: x86: Fail emulation during EMULTYPE_SKIP on any exception
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 17122c06b86c9f77f45b86b8e62c3ed440847a59 ]
+
+Treat any exception during instruction decode for EMULTYPE_SKIP as a
+"full" emulation failure, i.e. signal failure instead of queuing the
+exception. When decoding purely to skip an instruction, KVM and/or the
+CPU has already done some amount of emulation that cannot be unwound,
+e.g. on an EPT misconfig VM-Exit KVM has already processeed the emulated
+MMIO. KVM already does this if a #UD is encountered, but not for other
+exceptions, e.g. if a #PF is encountered during fetch.
+
+In SVM's soft-injection use case, queueing the exception is particularly
+problematic as queueing exceptions while injecting events can put KVM
+into an infinite loop due to bailing from VM-Enter to service the newly
+pending exception. E.g. multiple warnings to detect such behavior fire:
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 3 PID: 1017 at arch/x86/kvm/x86.c:9873 kvm_arch_vcpu_ioctl_run+0x1de5/0x20a0 [kvm]
+ Modules linked in: kvm_amd ccp kvm irqbypass
+ CPU: 3 PID: 1017 Comm: svm_nested_soft Not tainted 6.0.0-rc1+ #220
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+ RIP: 0010:kvm_arch_vcpu_ioctl_run+0x1de5/0x20a0 [kvm]
+ Call Trace:
+ kvm_vcpu_ioctl+0x223/0x6d0 [kvm]
+ __x64_sys_ioctl+0x85/0xc0
+ do_syscall_64+0x2b/0x50
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+ ---[ end trace 0000000000000000 ]---
+ ------------[ cut here ]------------
+ WARNING: CPU: 3 PID: 1017 at arch/x86/kvm/x86.c:9987 kvm_arch_vcpu_ioctl_run+0x12a3/0x20a0 [kvm]
+ Modules linked in: kvm_amd ccp kvm irqbypass
+ CPU: 3 PID: 1017 Comm: svm_nested_soft Tainted: G W 6.0.0-rc1+ #220
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+ RIP: 0010:kvm_arch_vcpu_ioctl_run+0x12a3/0x20a0 [kvm]
+ Call Trace:
+ kvm_vcpu_ioctl+0x223/0x6d0 [kvm]
+ __x64_sys_ioctl+0x85/0xc0
+ do_syscall_64+0x2b/0x50
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+ ---[ end trace 0000000000000000 ]---
+
+Fixes: 6ea6e84309ca ("KVM: x86: inject exceptions produced by x86_decode_insn")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Link: https://lore.kernel.org/r/20220930233632.1725475-1-seanjc@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 05ca303d7fd98..68827b8dc37a5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8860,7 +8860,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ write_fault_to_spt,
+ emulation_type))
+ return 1;
+- if (ctxt->have_exception) {
++
++ if (ctxt->have_exception &&
++ !(emulation_type & EMULTYPE_SKIP)) {
+ /*
+ * #UD should result in just EMULATION_FAILED, and trap-like
+ * exception should not be encountered during decode.
+--
+2.39.0
+
--- /dev/null
+From f39621f148d4f59f362703d41af27c0a1c2ee37d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Dec 2022 05:33:41 -0500
+Subject: KVM: x86: fix deadlock for KVM_XEN_EVTCHN_RESET
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit a79b53aaaab53de017517bf9579b6106397a523c ]
+
+While KVM_XEN_EVTCHN_RESET is usually called with no vCPUs running,
+if that happened it could cause a deadlock. This is due to
+kvm_xen_eventfd_reset() doing a synchronize_srcu() inside
+a kvm->lock critical section.
+
+To avoid this, first collect all the evtchnfd objects in an
+array and free all of them once the kvm->lock critical section
+is over and th SRCU grace period has expired.
+
+Reported-by: Michal Luczaj <mhal@rbox.co>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/xen.c | 30 +++++++++++++++++--
+ .../selftests/kvm/x86_64/xen_shinfo_test.c | 6 ++++
+ 2 files changed, 33 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index f3098c0e386a8..a58a426e6b1c0 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -1757,18 +1757,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
+
+ static int kvm_xen_eventfd_reset(struct kvm *kvm)
+ {
+- struct evtchnfd *evtchnfd;
++ struct evtchnfd *evtchnfd, **all_evtchnfds;
+ int i;
++ int n = 0;
+
+ mutex_lock(&kvm->lock);
++
++ /*
++ * Because synchronize_srcu() cannot be called inside the
++ * critical section, first collect all the evtchnfd objects
++ * in an array as they are removed from evtchn_ports.
++ */
++ idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
++ n++;
++
++ all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
++ if (!all_evtchnfds) {
++ mutex_unlock(&kvm->lock);
++ return -ENOMEM;
++ }
++
++ n = 0;
+ idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
++ all_evtchnfds[n++] = evtchnfd;
+ idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
+- synchronize_srcu(&kvm->srcu);
++ }
++ mutex_unlock(&kvm->lock);
++
++ synchronize_srcu(&kvm->srcu);
++
++ while (n--) {
++ evtchnfd = all_evtchnfds[n];
+ if (!evtchnfd->deliver.port.port)
+ eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
+ kfree(evtchnfd);
+ }
+- mutex_unlock(&kvm->lock);
++ kfree(all_evtchnfds);
+
+ return 0;
+ }
+diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+index 2a5727188c8d3..8383457e66990 100644
+--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
++++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+@@ -942,6 +942,12 @@ int main(int argc, char *argv[])
+ }
+
+ done:
++ struct kvm_xen_hvm_attr evt_reset = {
++ .type = KVM_XEN_ATTR_TYPE_EVTCHN,
++ .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
++ };
++ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
++
+ alarm(0);
+ clock_gettime(CLOCK_REALTIME, &max_ts);
+
+--
+2.39.0
+
--- /dev/null
+From 943f7963ca9c3798552e70c6e1e5d69085a8b2fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 02:47:19 +0100
+Subject: netfilter: conntrack: fix bug in for_each_sctp_chunk
+
+From: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+
+[ Upstream commit 98ee0077452527f971567db01386de3c3d97ce13 ]
+
+skb_header_pointer() will return NULL if offset + sizeof(_sch) exceeds
+skb->len, so this offset < skb->len test is redundant.
+
+if sch->length == 0, this will end up in an infinite loop, add a check
+for sch->length > 0
+
+Fixes: 9fb9cbb1082d ("[NETFILTER]: Add nf_conntrack subsystem.")
+Suggested-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_proto_sctp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 895e0ca542994..444f02271b36a 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -144,8 +144,8 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
+
+ #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
+ for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
+- (offset) < (skb)->len && \
+- ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \
++ ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
++ (sch)->length; \
+ (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
+
+ /* Some validity checks to make sure the chunks are fine */
+--
+2.39.0
+
--- /dev/null
+From fea69dce89b7e2c165a4983f617d7fb36eebe0ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Feb 2023 11:16:50 +0100
+Subject: nfp: ethtool: fix the bug of setting unsupported port speed
+
+From: Yu Xiao <yu.xiao@corigine.com>
+
+[ Upstream commit 821de68c1f9c0236b0b9c10834cda900ae9b443c ]
+
+Unsupported port speed can be set and cause error. Now fixing it
+and return an error if setting unsupported speed.
+
+This fix depends on the following, which was included in v6.2-rc1:
+commit a61474c41e8c ("nfp: ethtool: support reporting link modes").
+
+Fixes: 7c698737270f ("nfp: add support for .set_link_ksettings()")
+Signed-off-by: Yu Xiao <yu.xiao@corigine.com>
+Signed-off-by: Simon Horman <simon.horman@corigine.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/netronome/nfp/nfp_net_ethtool.c | 194 ++++++++++++++----
+ drivers/net/ethernet/netronome/nfp/nfp_port.h | 12 ++
+ 2 files changed, 170 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+index 377c3b1185ee0..af376b9000677 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+@@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
+ }
+ }
+
+-static const u16 nfp_eth_media_table[] = {
+- [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+- [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+- [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+- [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+- [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+- [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+- [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+- [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+- [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+- [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+- [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+- [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+- [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+- [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+- [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+- [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+- [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+- [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+- [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+- [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+- [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+- [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+- [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+- [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+- [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+- [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+- [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+- [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++static const struct nfp_eth_media_link_mode {
++ u16 ethtool_link_mode;
++ u16 speed;
++} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
++ [NFP_MEDIA_1000BASE_CX] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++ .speed = NFP_SPEED_1G,
++ },
++ [NFP_MEDIA_1000BASE_KX] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++ .speed = NFP_SPEED_1G,
++ },
++ [NFP_MEDIA_10GBASE_KX4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++ .speed = NFP_SPEED_10G,
++ },
++ [NFP_MEDIA_10GBASE_KR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
++ .speed = NFP_SPEED_10G,
++ },
++ [NFP_MEDIA_10GBASE_CX4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++ .speed = NFP_SPEED_10G,
++ },
++ [NFP_MEDIA_10GBASE_CR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
++ .speed = NFP_SPEED_10G,
++ },
++ [NFP_MEDIA_10GBASE_SR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
++ .speed = NFP_SPEED_10G,
++ },
++ [NFP_MEDIA_10GBASE_ER] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
++ .speed = NFP_SPEED_10G,
++ },
++ [NFP_MEDIA_25GBASE_KR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
++ .speed = NFP_SPEED_25G,
++ },
++ [NFP_MEDIA_25GBASE_KR_S] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
++ .speed = NFP_SPEED_25G,
++ },
++ [NFP_MEDIA_25GBASE_CR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
++ .speed = NFP_SPEED_25G,
++ },
++ [NFP_MEDIA_25GBASE_CR_S] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
++ .speed = NFP_SPEED_25G,
++ },
++ [NFP_MEDIA_25GBASE_SR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
++ .speed = NFP_SPEED_25G,
++ },
++ [NFP_MEDIA_40GBASE_CR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
++ .speed = NFP_SPEED_40G,
++ },
++ [NFP_MEDIA_40GBASE_KR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
++ .speed = NFP_SPEED_40G,
++ },
++ [NFP_MEDIA_40GBASE_SR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
++ .speed = NFP_SPEED_40G,
++ },
++ [NFP_MEDIA_40GBASE_LR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
++ .speed = NFP_SPEED_40G,
++ },
++ [NFP_MEDIA_50GBASE_KR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
++ .speed = NFP_SPEED_50G,
++ },
++ [NFP_MEDIA_50GBASE_SR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
++ .speed = NFP_SPEED_50G,
++ },
++ [NFP_MEDIA_50GBASE_CR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
++ .speed = NFP_SPEED_50G,
++ },
++ [NFP_MEDIA_50GBASE_LR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++ .speed = NFP_SPEED_50G,
++ },
++ [NFP_MEDIA_50GBASE_ER] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++ .speed = NFP_SPEED_50G,
++ },
++ [NFP_MEDIA_50GBASE_FR] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++ .speed = NFP_SPEED_50G,
++ },
++ [NFP_MEDIA_100GBASE_KR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
++ .speed = NFP_SPEED_100G,
++ },
++ [NFP_MEDIA_100GBASE_SR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
++ .speed = NFP_SPEED_100G,
++ },
++ [NFP_MEDIA_100GBASE_CR4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++ .speed = NFP_SPEED_100G,
++ },
++ [NFP_MEDIA_100GBASE_KP4] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
++ .speed = NFP_SPEED_100G,
++ },
++ [NFP_MEDIA_100GBASE_CR10] = {
++ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++ .speed = NFP_SPEED_100G,
++ },
++};
++
++static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
++ [NFP_SPEED_1G] = SPEED_1000,
++ [NFP_SPEED_10G] = SPEED_10000,
++ [NFP_SPEED_25G] = SPEED_25000,
++ [NFP_SPEED_40G] = SPEED_40000,
++ [NFP_SPEED_50G] = SPEED_50000,
++ [NFP_SPEED_100G] = SPEED_100000,
+ };
+
+ static void nfp_add_media_link_mode(struct nfp_port *port,
+@@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
+ };
+ struct nfp_cpp *cpp = port->app->cpp;
+
+- if (nfp_eth_read_media(cpp, ðm))
++ if (nfp_eth_read_media(cpp, ðm)) {
++ bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
+ return;
++ }
++
++ bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
+
+ for (u32 i = 0; i < 2; i++) {
+ supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
+@@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
+
+ for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
+ if (i < 64) {
+- if (supported_modes[0] & BIT_ULL(i))
+- __set_bit(nfp_eth_media_table[i],
++ if (supported_modes[0] & BIT_ULL(i)) {
++ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.supported);
++ __set_bit(nfp_eth_media_table[i].speed,
++ port->speed_bitmap);
++ }
+
+ if (advertised_modes[0] & BIT_ULL(i))
+- __set_bit(nfp_eth_media_table[i],
++ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.advertising);
+ } else {
+- if (supported_modes[1] & BIT_ULL(i - 64))
+- __set_bit(nfp_eth_media_table[i],
++ if (supported_modes[1] & BIT_ULL(i - 64)) {
++ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.supported);
++ __set_bit(nfp_eth_media_table[i].speed,
++ port->speed_bitmap);
++ }
+
+ if (advertised_modes[1] & BIT_ULL(i - 64))
+- __set_bit(nfp_eth_media_table[i],
++ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.advertising);
+ }
+ }
+@@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
+
+ if (cmd->base.speed != SPEED_UNKNOWN) {
+ u32 speed = cmd->base.speed / eth_port->lanes;
++ bool is_supported = false;
++
++ for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
++ if (cmd->base.speed == nfp_eth_speed_map[i] &&
++ test_bit(i, port->speed_bitmap)) {
++ is_supported = true;
++ break;
++ }
++ }
++
++ if (!is_supported) {
++ netdev_err(netdev, "Speed %u is not supported.\n",
++ cmd->base.speed);
++ err = -EINVAL;
++ goto err_bad_set;
++ }
+
+ if (req_aneg) {
+ netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
+index 6793cdf9ff115..c31812287ded1 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
++++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
+@@ -38,6 +38,16 @@ enum nfp_port_flags {
+ NFP_PORT_CHANGED = 0,
+ };
+
++enum {
++ NFP_SPEED_1G,
++ NFP_SPEED_10G,
++ NFP_SPEED_25G,
++ NFP_SPEED_40G,
++ NFP_SPEED_50G,
++ NFP_SPEED_100G,
++ NFP_SUP_SPEED_NUMBER
++};
++
+ /**
+ * struct nfp_port - structure representing NFP port
+ * @netdev: backpointer to associated netdev
+@@ -52,6 +62,7 @@ enum nfp_port_flags {
+ * @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
+ * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
+ * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
++ * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
+ * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
+ * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
+ * @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
+@@ -78,6 +89,7 @@ struct nfp_port {
+ bool eth_forced;
+ struct nfp_eth_table_port *eth_port;
+ u8 __iomem *eth_stats;
++ DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
+ };
+ /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
+ struct {
+--
+2.39.0
+
--- /dev/null
+From c58cb0df650e2663d456fc20eac0095447e9cc17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Nov 2022 12:30:30 +0100
+Subject: nfp: ethtool: support reporting link modes
+
+From: Yu Xiao <yu.xiao@corigine.com>
+
+[ Upstream commit a61474c41e8c530c54a26db4f5434f050ef7718d ]
+
+Add support for reporting link modes,
+including `Supported link modes` and `Advertised link modes`,
+via ethtool $DEV.
+
+A new command `SPCODE_READ_MEDIA` is added to read info from
+management firmware. Also, the mapping table `nfp_eth_media_table`
+associates the link modes between NFP and kernel. Both of them
+help to support this ability.
+
+Signed-off-by: Yu Xiao <yu.xiao@corigine.com>
+Reviewed-by: Louis Peens <louis.peens@corigine.com>
+Signed-off-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
+Link: https://lore.kernel.org/r/20221125113030.141642-1-simon.horman@corigine.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 821de68c1f9c ("nfp: ethtool: fix the bug of setting unsupported port speed")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/netronome/nfp/nfp_main.h | 1 +
+ .../ethernet/netronome/nfp/nfp_net_ethtool.c | 73 +++++++++++++++++++
+ .../ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 17 +++++
+ .../ethernet/netronome/nfp/nfpcore/nfp_nsp.h | 56 ++++++++++++++
+ .../netronome/nfp/nfpcore/nfp_nsp_eth.c | 26 +++++++
+ 5 files changed, 173 insertions(+)
+
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
+index afd3edfa24283..b9266cf72a172 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
++++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
+@@ -28,6 +28,7 @@ struct nfp_hwinfo;
+ struct nfp_mip;
+ struct nfp_net;
+ struct nfp_nsp_identify;
++struct nfp_eth_media_buf;
+ struct nfp_port;
+ struct nfp_rtsym;
+ struct nfp_rtsym_table;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+index 991059d6cb32e..377c3b1185ee0 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+@@ -293,6 +293,76 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
+ }
+ }
+
++static const u16 nfp_eth_media_table[] = {
++ [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++ [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++ [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++ [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
++ [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++ [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
++ [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
++ [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
++ [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
++ [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
++ [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
++ [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
++ [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
++ [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
++ [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
++ [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
++ [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
++ [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
++ [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
++ [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
++ [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++ [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++ [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++ [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
++ [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
++ [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++ [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
++ [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++};
++
++static void nfp_add_media_link_mode(struct nfp_port *port,
++ struct nfp_eth_table_port *eth_port,
++ struct ethtool_link_ksettings *cmd)
++{
++ u64 supported_modes[2], advertised_modes[2];
++ struct nfp_eth_media_buf ethm = {
++ .eth_index = eth_port->eth_index,
++ };
++ struct nfp_cpp *cpp = port->app->cpp;
++
++ if (nfp_eth_read_media(cpp, ðm))
++ return;
++
++ for (u32 i = 0; i < 2; i++) {
++ supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
++ advertised_modes[i] = le64_to_cpu(ethm.advertised_modes[i]);
++ }
++
++ for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
++ if (i < 64) {
++ if (supported_modes[0] & BIT_ULL(i))
++ __set_bit(nfp_eth_media_table[i],
++ cmd->link_modes.supported);
++
++ if (advertised_modes[0] & BIT_ULL(i))
++ __set_bit(nfp_eth_media_table[i],
++ cmd->link_modes.advertising);
++ } else {
++ if (supported_modes[1] & BIT_ULL(i - 64))
++ __set_bit(nfp_eth_media_table[i],
++ cmd->link_modes.supported);
++
++ if (advertised_modes[1] & BIT_ULL(i - 64))
++ __set_bit(nfp_eth_media_table[i],
++ cmd->link_modes.advertising);
++ }
++ }
++}
++
+ /**
+ * nfp_net_get_link_ksettings - Get Link Speed settings
+ * @netdev: network interface device structure
+@@ -311,6 +381,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
+ u16 sts;
+
+ /* Init to unknowns */
++ ethtool_link_ksettings_zero_link_mode(cmd, supported);
++ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+@@ -321,6 +393,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
+ if (eth_port) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
++ nfp_add_media_link_mode(port, eth_port, cmd);
+ if (eth_port->supp_aneg) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (eth_port->aneg == NFP_ANEG_AUTO) {
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+index 730fea214b8ab..7136bc48530ba 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+@@ -100,6 +100,7 @@ enum nfp_nsp_cmd {
+ SPCODE_FW_LOADED = 19, /* Is application firmware loaded */
+ SPCODE_VERSIONS = 21, /* Report FW versions */
+ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
++ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */
+ };
+
+ struct nfp_nsp_dma_buf {
+@@ -1100,4 +1101,20 @@ int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ kfree(buf);
+
+ return ret;
++};
++
++int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size)
++{
++ struct nfp_nsp_command_buf_arg media = {
++ {
++ .code = SPCODE_READ_MEDIA,
++ .option = size,
++ },
++ .in_buf = buf,
++ .in_size = size,
++ .out_buf = buf,
++ .out_size = size,
++ };
++
++ return nfp_nsp_command_buf(state, &media);
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+index 992d72ac98d38..8f5cab0032d08 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+@@ -65,6 +65,11 @@ static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+ return nfp_nsp_get_abi_ver_minor(state) > 28;
+ }
+
++static inline bool nfp_nsp_has_read_media(struct nfp_nsp *state)
++{
++ return nfp_nsp_get_abi_ver_minor(state) > 33;
++}
++
+ enum nfp_eth_interface {
+ NFP_INTERFACE_NONE = 0,
+ NFP_INTERFACE_SFP = 1,
+@@ -97,6 +102,47 @@ enum nfp_eth_fec {
+ NFP_FEC_DISABLED_BIT,
+ };
+
++/* link modes about RJ45 haven't been used, so there's no mapping to them */
++enum nfp_ethtool_link_mode_list {
++ NFP_MEDIA_W0_RJ45_10M,
++ NFP_MEDIA_W0_RJ45_10M_HD,
++ NFP_MEDIA_W0_RJ45_100M,
++ NFP_MEDIA_W0_RJ45_100M_HD,
++ NFP_MEDIA_W0_RJ45_1G,
++ NFP_MEDIA_W0_RJ45_2P5G,
++ NFP_MEDIA_W0_RJ45_5G,
++ NFP_MEDIA_W0_RJ45_10G,
++ NFP_MEDIA_1000BASE_CX,
++ NFP_MEDIA_1000BASE_KX,
++ NFP_MEDIA_10GBASE_KX4,
++ NFP_MEDIA_10GBASE_KR,
++ NFP_MEDIA_10GBASE_CX4,
++ NFP_MEDIA_10GBASE_CR,
++ NFP_MEDIA_10GBASE_SR,
++ NFP_MEDIA_10GBASE_ER,
++ NFP_MEDIA_25GBASE_KR,
++ NFP_MEDIA_25GBASE_KR_S,
++ NFP_MEDIA_25GBASE_CR,
++ NFP_MEDIA_25GBASE_CR_S,
++ NFP_MEDIA_25GBASE_SR,
++ NFP_MEDIA_40GBASE_CR4,
++ NFP_MEDIA_40GBASE_KR4,
++ NFP_MEDIA_40GBASE_SR4,
++ NFP_MEDIA_40GBASE_LR4,
++ NFP_MEDIA_50GBASE_KR,
++ NFP_MEDIA_50GBASE_SR,
++ NFP_MEDIA_50GBASE_CR,
++ NFP_MEDIA_50GBASE_LR,
++ NFP_MEDIA_50GBASE_ER,
++ NFP_MEDIA_50GBASE_FR,
++ NFP_MEDIA_100GBASE_KR4,
++ NFP_MEDIA_100GBASE_SR4,
++ NFP_MEDIA_100GBASE_CR4,
++ NFP_MEDIA_100GBASE_KP4,
++ NFP_MEDIA_100GBASE_CR10,
++ NFP_MEDIA_LINK_MODES_NUMBER
++};
++
+ #define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
+ #define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
+ #define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
+@@ -256,6 +302,16 @@ enum nfp_nsp_sensor_id {
+ int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val);
+
++struct nfp_eth_media_buf {
++ u8 eth_index;
++ u8 reserved[7];
++ __le64 supported_modes[2];
++ __le64 advertised_modes[2];
++};
++
++int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size);
++int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm);
++
+ #define NFP_NSP_VERSION_BUFSZ 1024 /* reasonable size, not in the ABI */
+
+ enum nfp_nsp_versions {
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+index bb64efec4c46b..570ac1bb2122f 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+@@ -647,3 +647,29 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ lanes, NSP_ETH_CTRL_SET_LANES);
+ }
++
++int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm)
++{
++ struct nfp_nsp *nsp;
++ int ret;
++
++ nsp = nfp_nsp_open(cpp);
++ if (IS_ERR(nsp)) {
++ nfp_err(cpp, "Failed to access the NSP: %pe\n", nsp);
++ return PTR_ERR(nsp);
++ }
++
++ if (!nfp_nsp_has_read_media(nsp)) {
++ nfp_warn(cpp, "Reading media link modes not supported. Please update flash\n");
++ ret = -EOPNOTSUPP;
++ goto exit_close_nsp;
++ }
++
++ ret = nfp_nsp_read_media(nsp, ethm, sizeof(*ethm));
++ if (ret)
++ nfp_err(cpp, "Reading media link modes failed: %pe\n", ERR_PTR(ret));
++
++exit_close_nsp:
++ nfp_nsp_close(nsp);
++ return ret;
++}
+--
+2.39.0
+
--- /dev/null
+From d306f8989aff77a35a0ea9eedf9446307d8571cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Jan 2023 23:47:53 +1100
+Subject: powerpc/64s/radix: Fix RWX mapping with relocated kernel
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 111bcb37385353f0510e5847d5abcd1c613dba23 ]
+
+If a relocatable kernel is loaded at a non-zero address and told not to
+relocate to zero (kdump or RELOCATABLE_TEST), the mapping of the
+interrupt code at zero is left with RWX permissions.
+
+That is a security weakness, and leads to a warning at boot if
+CONFIG_DEBUG_WX is enabled:
+
+ powerpc/mm: Found insecure W+X mapping at address 00000000056435bc/0xc000000000000000
+ WARNING: CPU: 1 PID: 1 at arch/powerpc/mm/ptdump/ptdump.c:193 note_page+0x484/0x4c0
+ CPU: 1 PID: 1 Comm: swapper/0 Not tainted 6.2.0-rc1-00001-g8ae8e98aea82-dirty #175
+ Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,git-dd0dca hv:linux,kvm pSeries
+ NIP: c0000000004a1c34 LR: c0000000004a1c30 CTR: 0000000000000000
+ REGS: c000000003503770 TRAP: 0700 Not tainted (6.2.0-rc1-00001-g8ae8e98aea82-dirty)
+ MSR: 8000000002029033 <SF,VEC,EE,ME,IR,DR,RI,LE> CR: 24000220 XER: 00000000
+ CFAR: c000000000545a58 IRQMASK: 0
+ ...
+ NIP note_page+0x484/0x4c0
+ LR note_page+0x480/0x4c0
+ Call Trace:
+ note_page+0x480/0x4c0 (unreliable)
+ ptdump_pmd_entry+0xc8/0x100
+ walk_pgd_range+0x618/0xab0
+ walk_page_range_novma+0x74/0xc0
+ ptdump_walk_pgd+0x98/0x170
+ ptdump_check_wx+0x94/0x100
+ mark_rodata_ro+0x30/0x70
+ kernel_init+0x78/0x1a0
+ ret_from_kernel_thread+0x5c/0x64
+
+The fix has two parts. Firstly the pages from zero up to the end of
+interrupts need to be marked read-only, so that they are left with R-X
+permissions. Secondly the mapping logic needs to be taught to ensure
+there is a page boundary at the end of the interrupt region, so that the
+permission change only applies to the interrupt text, and not the region
+following it.
+
+Fixes: c55d7b5e6426 ("powerpc: Remove STRICT_KERNEL_RWX incompatibility with RELOCATABLE")
+Reported-by: Sachin Sant <sachinp@linux.ibm.com>
+Tested-by: Sachin Sant <sachinp@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20230110124753.1325426-2-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/mm/book3s64/radix_pgtable.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 5a2384ed17279..26245aaf12b8b 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
+ end = (unsigned long)__end_rodata;
+
+ radix__change_memory_range(start, end, _PAGE_WRITE);
++
++ for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
++ end = start + PAGE_SIZE;
++ if (overlaps_interrupt_vector_text(start, end))
++ radix__change_memory_range(start, end, _PAGE_WRITE);
++ else
++ break;
++ }
+ }
+
+ void radix__mark_initmem_nx(void)
+@@ -268,6 +276,11 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end)
+
+ // Relocatable kernel running at non-zero real address
+ if (stext_phys != 0) {
++ // The end of interrupts code at zero is a rodata boundary
++ unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
++ if (addr < end_intr)
++ return end_intr;
++
+ // Start of relocated kernel text is a rodata boundary
+ if (addr < stext_phys)
+ return stext_phys;
+--
+2.39.0
+
--- /dev/null
+From 7b0be02212b672ba8fde4a92289e80799df9e37a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Dec 2022 12:29:37 -0500
+Subject: powerpc: dts: t208x: Disable 10G on MAC1 and MAC2
+
+From: Sean Anderson <sean.anderson@seco.com>
+
+[ Upstream commit 8d8bee13ae9e316443c6666286360126a19c8d94 ]
+
+There aren't enough resources to run these ports at 10G speeds. Disable
+10G for these ports, reverting to the previous speed.
+
+Fixes: 36926a7d70c2 ("powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G")
+Reported-by: Camelia Alexandra Groza <camelia.groza@nxp.com>
+Signed-off-by: Sean Anderson <sean.anderson@seco.com>
+Reviewed-by: Camelia Groza <camelia.groza@nxp.com>
+Tested-by: Camelia Groza <camelia.groza@nxp.com>
+Link: https://lore.kernel.org/r/20221216172937.2960054-1-sean.anderson@seco.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/boot/dts/fsl/t2081si-post.dtsi | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+index 74e17e134387d..27714dc2f04a5 100644
+--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
++++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+@@ -659,3 +659,19 @@
+ interrupts = <16 2 1 9>;
+ };
+ };
++
++&fman0_rx_0x08 {
++ /delete-property/ fsl,fman-10g-port;
++};
++
++&fman0_tx_0x28 {
++ /delete-property/ fsl,fman-10g-port;
++};
++
++&fman0_rx_0x09 {
++ /delete-property/ fsl,fman-10g-port;
++};
++
++&fman0_tx_0x29 {
++ /delete-property/ fsl,fman-10g-port;
++};
+--
+2.39.0
+
--- /dev/null
+From fd45546e599fcf9dfbc6ef860e63ef57084e08ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 16:22:39 -0400
+Subject: powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G
+
+From: Sean Anderson <sean.anderson@seco.com>
+
+[ Upstream commit 36926a7d70c2d462fca1ed85bfee000d17fd8662 ]
+
+On the T208X SoCs, MAC1 and MAC2 support XGMII. Add some new MAC dtsi
+fragments, and mark the QMAN ports as 10G.
+
+Fixes: da414bb923d9 ("powerpc/mpc85xx: Add FSL QorIQ DPAA FMan support to the SoC device tree(s)")
+Signed-off-by: Sean Anderson <sean.anderson@seco.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi | 44 +++++++++++++++++++
+ .../boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi | 44 +++++++++++++++++++
+ arch/powerpc/boot/dts/fsl/t2081si-post.dtsi | 4 +-
+ 3 files changed, 90 insertions(+), 2 deletions(-)
+ create mode 100644 arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
+ create mode 100644 arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
+
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
+new file mode 100644
+index 0000000000000..437dab3fc0176
+--- /dev/null
++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
+@@ -0,0 +1,44 @@
++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
++/*
++ * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
++ *
++ * Copyright 2022 Sean Anderson <sean.anderson@seco.com>
++ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
++ */
++
++fman@400000 {
++ fman0_rx_0x08: port@88000 {
++ cell-index = <0x8>;
++ compatible = "fsl,fman-v3-port-rx";
++ reg = <0x88000 0x1000>;
++ fsl,fman-10g-port;
++ };
++
++ fman0_tx_0x28: port@a8000 {
++ cell-index = <0x28>;
++ compatible = "fsl,fman-v3-port-tx";
++ reg = <0xa8000 0x1000>;
++ fsl,fman-10g-port;
++ };
++
++ ethernet@e0000 {
++ cell-index = <0>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe0000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy0>;
++ };
++
++ mdio@e1000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe1000 0x1000>;
++ fsl,erratum-a011043; /* must ignore read errors */
++
++ pcsphy0: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
+new file mode 100644
+index 0000000000000..ad116b17850a8
+--- /dev/null
++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
+@@ -0,0 +1,44 @@
++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
++/*
++ * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
++ *
++ * Copyright 2022 Sean Anderson <sean.anderson@seco.com>
++ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
++ */
++
++fman@400000 {
++ fman0_rx_0x09: port@89000 {
++ cell-index = <0x9>;
++ compatible = "fsl,fman-v3-port-rx";
++ reg = <0x89000 0x1000>;
++ fsl,fman-10g-port;
++ };
++
++ fman0_tx_0x29: port@a9000 {
++ cell-index = <0x29>;
++ compatible = "fsl,fman-v3-port-tx";
++ reg = <0xa9000 0x1000>;
++ fsl,fman-10g-port;
++ };
++
++ ethernet@e2000 {
++ cell-index = <1>;
++ compatible = "fsl,fman-memac";
++ reg = <0xe2000 0x1000>;
++ fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
++ ptp-timer = <&ptp_timer0>;
++ pcsphy-handle = <&pcsphy1>;
++ };
++
++ mdio@e3000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++ reg = <0xe3000 0x1000>;
++ fsl,erratum-a011043; /* must ignore read errors */
++
++ pcsphy1: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ };
++};
+diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+index ecbb447920bc6..74e17e134387d 100644
+--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
++++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+@@ -609,8 +609,8 @@
+ /include/ "qoriq-bman1.dtsi"
+
+ /include/ "qoriq-fman3-0.dtsi"
+-/include/ "qoriq-fman3-0-1g-0.dtsi"
+-/include/ "qoriq-fman3-0-1g-1.dtsi"
++/include/ "qoriq-fman3-0-10g-2.dtsi"
++/include/ "qoriq-fman3-0-10g-3.dtsi"
+ /include/ "qoriq-fman3-0-1g-2.dtsi"
+ /include/ "qoriq-fman3-0-1g-3.dtsi"
+ /include/ "qoriq-fman3-0-1g-4.dtsi"
+--
+2.39.0
+
--- /dev/null
+From f494cf995565d88f52933194c7def9b135d2d363 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 22:45:33 +0200
+Subject: random: always mix cycle counter in add_latent_entropy()
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+[ Upstream commit d7bf7f3b813e3755226bcb5114ad2ac477514ebf ]
+
+add_latent_entropy() is called every time a process forks, in
+kernel_clone(). This in turn calls add_device_randomness() using the
+latent entropy global state. add_device_randomness() does two things:
+
+ 2) Mixes into the input pool the latent entropy argument passed; and
+ 1) Mixes in a cycle counter, a sort of measurement of when the event
+ took place, the high precision bits of which are presumably
+ difficult to predict.
+
+(2) is impossible without CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y. But (1) is
+always possible. However, currently CONFIG_GCC_PLUGIN_LATENT_ENTROPY=n
+disables both (1) and (2), instead of just (2).
+
+This commit causes the CONFIG_GCC_PLUGIN_LATENT_ENTROPY=n case to still
+do (1) by passing NULL (len 0) to add_device_randomness() when add_latent_
+entropy() is called.
+
+Cc: Dominik Brodowski <linux@dominikbrodowski.net>
+Cc: PaX Team <pageexec@freemail.hu>
+Cc: Emese Revfy <re.emese@gmail.com>
+Fixes: 38addce8b600 ("gcc-plugins: Add latent_entropy plugin")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/random.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/random.h b/include/linux/random.h
+index bd954ecbef901..51133627ba73a 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code,
+ void add_interrupt_randomness(int irq) __latent_entropy;
+ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+
+-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ static inline void add_latent_entropy(void)
+ {
++#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+-}
+ #else
+-static inline void add_latent_entropy(void) { }
++ add_device_randomness(NULL, 0);
+ #endif
++}
+
+ #if IS_ENABLED(CONFIG_VMGENID)
+ void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+--
+2.39.0
+
--- /dev/null
+From d9d9c2ee2bd9adc43afa74981da8f9976c87eef0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 02:35:21 +0100
+Subject: Revert "netfilter: conntrack: fix bug in for_each_sctp_chunk"
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit bd0e06f0def75ba26572a94e5350324474a55562 ]
+
+There is no bug. If sch->length == 0, this would result in an infinite
+loop, but first caller, do_basic_checks(), errors out in this case.
+
+After this change, packets with bogus zero-length chunks are no longer
+detected as invalid, so revert & add comment wrt. 0 length check.
+
+Fixes: 98ee00774525 ("netfilter: conntrack: fix bug in for_each_sctp_chunk")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_proto_sctp.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 444f02271b36a..490b11192e8aa 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -142,10 +142,11 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
+ }
+ #endif
+
++/* do_basic_checks ensures sch->length > 0, do not use before */
+ #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
+ for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
+- ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
+- (sch)->length; \
++ (offset) < (skb)->len && \
++ ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \
+ (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
+
+ /* Some validity checks to make sure the chunks are fine */
+--
+2.39.0
+
--- /dev/null
+From 0633885931cfd0460c90f241464e1791a82954d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Oct 2022 12:45:41 -0700
+Subject: sched/psi: Stop relying on timer_pending() for poll_work rescheduling
+
+From: Suren Baghdasaryan <surenb@google.com>
+
+[ Upstream commit 710ffe671e014d5ccbcff225130a178b088ef090 ]
+
+Psi polling mechanism is trying to minimize the number of wakeups to
+run psi_poll_work and is currently relying on timer_pending() to detect
+when this work is already scheduled. This provides a window of opportunity
+for psi_group_change to schedule an immediate psi_poll_work after
+poll_timer_fn got called but before psi_poll_work could reschedule itself.
+Below is the depiction of this entire window:
+
+poll_timer_fn
+ wake_up_interruptible(&group->poll_wait);
+
+psi_poll_worker
+ wait_event_interruptible(group->poll_wait, ...)
+ psi_poll_work
+ psi_schedule_poll_work
+ if (timer_pending(&group->poll_timer)) return;
+ ...
+ mod_timer(&group->poll_timer, jiffies + delay);
+
+Prior to 461daba06bdc we used to rely on poll_scheduled atomic which was
+reset and set back inside psi_poll_work and therefore this race window
+was much smaller.
+The larger window causes increased number of wakeups and our partners
+report visible power regression of ~10mA after applying 461daba06bdc.
+Bring back the poll_scheduled atomic and make this race window even
+narrower by resetting poll_scheduled only when we reach polling expiration
+time. This does not completely eliminate the possibility of extra wakeups
+caused by a race with psi_group_change however it will limit it to the
+worst case scenario of one extra wakeup per every tracking window (0.5s
+in the worst case).
+This patch also ensures correct ordering between clearing poll_scheduled
+flag and obtaining changed_states using memory barrier. Correct ordering
+between updating changed_states and setting poll_scheduled is ensured by
+atomic_xchg operation.
+By tracing the number of immediate rescheduling attempts performed by
+psi_group_change and the number of these attempts being blocked due to
+psi monitor being already active, we can assess the effects of this change:
+
+Before the patch:
+ Run#1 Run#2 Run#3
+Immediate reschedules attempted: 684365 1385156 1261240
+Immediate reschedules blocked: 682846 1381654 1258682
+Immediate reschedules (delta): 1519 3502 2558
+Immediate reschedules (% of attempted): 0.22% 0.25% 0.20%
+
+After the patch:
+ Run#1 Run#2 Run#3
+Immediate reschedules attempted: 882244 770298 426218
+Immediate reschedules blocked: 881996 769796 426074
+Immediate reschedules (delta): 248 502 144
+Immediate reschedules (% of attempted): 0.03% 0.07% 0.03%
+
+The number of non-blocked immediate reschedules dropped from 0.22-0.25%
+to 0.03-0.07%. The drop is attributed to the decrease in the race window
+size and the fact that we allow this race only when psi monitors reach
+polling window expiration time.
+
+Fixes: 461daba06bdc ("psi: eliminate kthread_worker from psi trigger scheduling mechanism")
+Reported-by: Kathleen Chang <yt.chang@mediatek.com>
+Reported-by: Wenju Xu <wenju.xu@mediatek.com>
+Reported-by: Jonathan Chen <jonathan.jmchen@mediatek.com>
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Chengming Zhou <zhouchengming@bytedance.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Tested-by: SH Chen <show-hong.chen@mediatek.com>
+Link: https://lore.kernel.org/r/20221028194541.813985-1-surenb@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/psi_types.h | 1 +
+ kernel/sched/psi.c | 62 ++++++++++++++++++++++++++++++++-------
+ 2 files changed, 53 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
+index 6e43727350689..14a1ebb74e11f 100644
+--- a/include/linux/psi_types.h
++++ b/include/linux/psi_types.h
+@@ -177,6 +177,7 @@ struct psi_group {
+ struct timer_list poll_timer;
+ wait_queue_head_t poll_wait;
+ atomic_t poll_wakeup;
++ atomic_t poll_scheduled;
+
+ /* Protects data used by the monitor */
+ struct mutex trigger_lock;
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 48fedeee15c5b..e83c321461cf4 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -189,6 +189,7 @@ static void group_init(struct psi_group *group)
+ INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+ mutex_init(&group->avgs_lock);
+ /* Init trigger-related members */
++ atomic_set(&group->poll_scheduled, 0);
+ mutex_init(&group->trigger_lock);
+ INIT_LIST_HEAD(&group->triggers);
+ group->poll_min_period = U32_MAX;
+@@ -565,18 +566,17 @@ static u64 update_triggers(struct psi_group *group, u64 now)
+ return now + group->poll_min_period;
+ }
+
+-/* Schedule polling if it's not already scheduled. */
+-static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
++/* Schedule polling if it's not already scheduled or forced. */
++static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
++ bool force)
+ {
+ struct task_struct *task;
+
+ /*
+- * Do not reschedule if already scheduled.
+- * Possible race with a timer scheduled after this check but before
+- * mod_timer below can be tolerated because group->polling_next_update
+- * will keep updates on schedule.
++ * atomic_xchg should be called even when !force to provide a
++ * full memory barrier (see the comment inside psi_poll_work).
+ */
+- if (timer_pending(&group->poll_timer))
++ if (atomic_xchg(&group->poll_scheduled, 1) && !force)
+ return;
+
+ rcu_read_lock();
+@@ -588,12 +588,15 @@ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+ */
+ if (likely(task))
+ mod_timer(&group->poll_timer, jiffies + delay);
++ else
++ atomic_set(&group->poll_scheduled, 0);
+
+ rcu_read_unlock();
+ }
+
+ static void psi_poll_work(struct psi_group *group)
+ {
++ bool force_reschedule = false;
+ u32 changed_states;
+ u64 now;
+
+@@ -601,6 +604,43 @@ static void psi_poll_work(struct psi_group *group)
+
+ now = sched_clock();
+
++ if (now > group->polling_until) {
++ /*
++ * We are either about to start or might stop polling if no
++ * state change was recorded. Resetting poll_scheduled leaves
++ * a small window for psi_group_change to sneak in and schedule
++ * an immediate poll_work before we get to rescheduling. One
++ * potential extra wakeup at the end of the polling window
++ * should be negligible and polling_next_update still keeps
++ * updates correctly on schedule.
++ */
++ atomic_set(&group->poll_scheduled, 0);
++ /*
++ * A task change can race with the poll worker that is supposed to
++ * report on it. To avoid missing events, ensure ordering between
++ * poll_scheduled and the task state accesses, such that if the poll
++ * worker misses the state update, the task change is guaranteed to
++ * reschedule the poll worker:
++ *
++ * poll worker:
++ * atomic_set(poll_scheduled, 0)
++ * smp_mb()
++ * LOAD states
++ *
++ * task change:
++ * STORE states
++ * if atomic_xchg(poll_scheduled, 1) == 0:
++ * schedule poll worker
++ *
++ * The atomic_xchg() implies a full barrier.
++ */
++ smp_mb();
++ } else {
++ /* Polling window is not over, keep rescheduling */
++ force_reschedule = true;
++ }
++
++
+ collect_percpu_times(group, PSI_POLL, &changed_states);
+
+ if (changed_states & group->poll_states) {
+@@ -626,7 +666,8 @@ static void psi_poll_work(struct psi_group *group)
+ group->polling_next_update = update_triggers(group, now);
+
+ psi_schedule_poll_work(group,
+- nsecs_to_jiffies(group->polling_next_update - now) + 1);
++ nsecs_to_jiffies(group->polling_next_update - now) + 1,
++ force_reschedule);
+
+ out:
+ mutex_unlock(&group->trigger_lock);
+@@ -787,7 +828,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ write_seqcount_end(&groupc->seq);
+
+ if (state_mask & group->poll_states)
+- psi_schedule_poll_work(group, 1);
++ psi_schedule_poll_work(group, 1, false);
+
+ if (wake_clock && !delayed_work_pending(&group->avgs_work))
+ schedule_delayed_work(&group->avgs_work, PSI_FREQ);
+@@ -941,7 +982,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
+ write_seqcount_end(&groupc->seq);
+
+ if (group->poll_states & (1 << PSI_IRQ_FULL))
+- psi_schedule_poll_work(group, 1);
++ psi_schedule_poll_work(group, 1, false);
+ } while ((group = group->parent));
+ }
+ #endif
+@@ -1328,6 +1369,7 @@ void psi_trigger_destroy(struct psi_trigger *t)
+ * can no longer be found through group->poll_task.
+ */
+ kthread_stop(task_to_destroy);
++ atomic_set(&group->poll_scheduled, 0);
+ }
+ kfree(t);
+ }
+--
+2.39.0
+
--- /dev/null
+From efa8db7e79f545227a86fcce1ef26831d47056af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Nov 2022 16:37:13 +0800
+Subject: scsi: hisi_sas: Fix SATA devices missing issue during I_T nexus reset
+
+From: Jie Zhan <zhanjie9@hisilicon.com>
+
+[ Upstream commit 3c2673a09cf1181318c07b7dbc1bc532ba3d33e3 ]
+
+SATA devices on an expander may be removed and not be found again when I_T
+nexus reset and revalidation are processed simultaneously.
+
+The issue comes from:
+
+ - Revalidation can remove SATA devices in link reset, e.g. in
+ hisi_sas_clear_nexus_ha().
+
+ - However, hisi_sas_debug_I_T_nexus_reset() polls the state of a SATA
+ device on an expander after sending link_reset, where it calls:
+ hisi_sas_debug_I_T_nexus_reset
+ sas_ata_wait_after_reset
+ ata_wait_after_reset
+ ata_wait_ready
+ smp_ata_check_ready
+ sas_ex_phy_discover
+ sas_ex_phy_discover_helper
+ sas_set_ex_phy
+
+ The ex_phy's change count is updated in sas_set_ex_phy(), so SATA
+ devices after a link reset may not be found later through revalidation.
+
+A similar issue was reported in:
+commit 0f3fce5cc77e ("[SCSI] libsas: fix ata_eh clobbering ex_phys via
+smp_ata_check_ready")
+commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery competing
+with ata error handling").
+
+To address this issue, in hisi_sas_debug_I_T_nexus_reset(), we now call
+smp_ata_check_ready_type() that only polls the device type while not
+updating the ex_phy's data of libsas.
+
+Fixes: 71453bd9d1bf ("scsi: hisi_sas: Use sas_ata_wait_after_reset() in IT nexus reset")
+Signed-off-by: Jie Zhan <zhanjie9@hisilicon.com>
+Link: https://lore.kernel.org/r/20221118083714.4034612-5-zhanjie9@hisilicon.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/hisi_sas/hisi_sas_main.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 02fa3c00dcccf..a8142e2b96435 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1708,13 +1708,15 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
+ return rc;
+ }
+
++ /* Remote phy */
+ if (rc)
+ return rc;
+
+- /* Remote phy */
+ if (dev_is_sata(device)) {
+- rc = sas_ata_wait_after_reset(device,
+- HISI_SAS_WAIT_PHYUP_TIMEOUT);
++ struct ata_link *link = &device->sata_dev.ap->link;
++
++ rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
++ smp_ata_check_ready_type);
+ } else {
+ msleep(2000);
+ }
+--
+2.39.0
+
--- /dev/null
+From 98b68ffd29a1e4c3aa7b6167b98cae369c329f16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Nov 2022 16:37:12 +0800
+Subject: scsi: libsas: Add smp_ata_check_ready_type()
+
+From: Jie Zhan <zhanjie9@hisilicon.com>
+
+[ Upstream commit 9181ce3cb5d96f0ee28246a857ca651830fa3746 ]
+
+Create function smp_ata_check_ready_type() for LLDDs to wait for SATA
+devices to come up after a link reset.
+
+Signed-off-by: Jie Zhan <zhanjie9@hisilicon.com>
+Link: https://lore.kernel.org/r/20221118083714.4034612-4-zhanjie9@hisilicon.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: 3c2673a09cf1 ("scsi: hisi_sas: Fix SATA devices missing issue during I_T nexus reset")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libsas/sas_ata.c | 25 +++++++++++++++++++++++++
+ drivers/scsi/libsas/sas_expander.c | 4 ++--
+ drivers/scsi/libsas/sas_internal.h | 2 ++
+ include/scsi/sas_ata.h | 6 ++++++
+ 4 files changed, 35 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index d35c9296f7388..2fd55ef9ffca5 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -287,6 +287,31 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
+ return 1;
+ }
+
++int smp_ata_check_ready_type(struct ata_link *link)
++{
++ struct domain_device *dev = link->ap->private_data;
++ struct sas_phy *phy = sas_get_local_phy(dev);
++ struct domain_device *ex_dev = dev->parent;
++ enum sas_device_type type = SAS_PHY_UNUSED;
++ u8 sas_addr[SAS_ADDR_SIZE];
++ int res;
++
++ res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type);
++ sas_put_local_phy(phy);
++ if (res)
++ return res;
++
++ switch (type) {
++ case SAS_SATA_PENDING:
++ return 0;
++ case SAS_END_DEVICE:
++ return 1;
++ default:
++ return -ENODEV;
++ }
++}
++EXPORT_SYMBOL_GPL(smp_ata_check_ready_type);
++
+ static int smp_ata_check_ready(struct ata_link *link)
+ {
+ int res;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 5ce2518301040..63a23251fb1d8 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -1693,8 +1693,8 @@ static int sas_get_phy_change_count(struct domain_device *dev,
+ return res;
+ }
+
+-static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+- u8 *sas_addr, enum sas_device_type *type)
++int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
++ u8 *sas_addr, enum sas_device_type *type)
+ {
+ int res;
+ struct smp_disc_resp *disc_resp;
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index 8d0ad3abc7b5c..a94bd0790b055 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -84,6 +84,8 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+ int sas_ex_phy_discover(struct domain_device *dev, int single);
+ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+ struct smp_rps_resp *rps_resp);
++int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
++ u8 *sas_addr, enum sas_device_type *type);
+ int sas_try_ata_reset(struct asd_sas_phy *phy);
+ void sas_hae_reset(struct work_struct *work);
+
+diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
+index a1df4f9d57a31..ec646217e7f6e 100644
+--- a/include/scsi/sas_ata.h
++++ b/include/scsi/sas_ata.h
+@@ -35,6 +35,7 @@ void sas_ata_end_eh(struct ata_port *ap);
+ int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ int force_phy_id);
+ int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline);
++int smp_ata_check_ready_type(struct ata_link *link);
+ #else
+
+
+@@ -98,6 +99,11 @@ static inline int sas_ata_wait_after_reset(struct domain_device *dev,
+ {
+ return -ETIMEDOUT;
+ }
++
++static inline int smp_ata_check_ready_type(struct ata_link *link)
++{
++ return 0;
++}
+ #endif
+
+ #endif /* _SAS_ATA_H_ */
+--
+2.39.0
+
--- /dev/null
+From fb6b943ac8df6a9a1ad1957000b01c7057d71d5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 22 Jan 2023 04:04:50 -0500
+Subject: selftests: kvm: move declaration at the beginning of main()
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 50aa870ba2f7735f556e52d15f61cd0f359c4c0b ]
+
+Placing a declaration of evt_reset is pedantically invalid
+according to the C standard. While GCC does not really care
+and only warns with -Wpedantic, clang ignores the declaration
+altogether with an error:
+
+x86_64/xen_shinfo_test.c:965:2: error: expected expression
+ struct kvm_xen_hvm_attr evt_reset = {
+ ^
+x86_64/xen_shinfo_test.c:969:38: error: use of undeclared identifier evt_reset
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
+ ^
+
+Reported-by: Yu Zhang <yu.c.zhang@linux.intel.com>
+Reported-by: Sean Christopherson <seanjc@google.com>
+Fixes: a79b53aaaab5 ("KVM: x86: fix deadlock for KVM_XEN_EVTCHN_RESET", 2022-12-28)
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+index 8383457e66990..0668ec542cccd 100644
+--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
++++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+@@ -428,6 +428,7 @@ static void *juggle_shinfo_state(void *arg)
+ int main(int argc, char *argv[])
+ {
+ struct timespec min_ts, max_ts, vm_ts;
++ struct kvm_xen_hvm_attr evt_reset;
+ struct kvm_vm *vm;
+ pthread_t thread;
+ bool verbose;
+@@ -942,10 +943,8 @@ int main(int argc, char *argv[])
+ }
+
+ done:
+- struct kvm_xen_hvm_attr evt_reset = {
+- .type = KVM_XEN_ATTR_TYPE_EVTCHN,
+- .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
+- };
++ evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
++ evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
+
+ alarm(0);
+--
+2.39.0
+
--- /dev/null
+drm-etnaviv-don-t-truncate-physical-page-address.patch
+wifi-ath11k-fix-warning-in-dma_free_coherent-of-memo.patch
+wifi-rtl8xxxu-gen2-turn-on-the-rate-control.patch
+drm-edid-fix-minimum-bpc-supported-with-dsc1.2-for-h.patch
+clk-mxl-switch-from-direct-readl-writel-based-io-to-.patch
+clk-mxl-remove-redundant-spinlocks.patch
+clk-mxl-add-option-to-override-gate-clks.patch
+clk-mxl-fix-a-clk-entry-by-adding-relevant-flags.patch
+powerpc-dts-t208x-mark-mac1-and-mac2-as-10g.patch
+clk-mxl-syscon_node_to_regmap-returns-error-pointers.patch
+sched-psi-stop-relying-on-timer_pending-for-poll_wor.patch
+random-always-mix-cycle-counter-in-add_latent_entrop.patch
+scsi-libsas-add-smp_ata_check_ready_type.patch
+scsi-hisi_sas-fix-sata-devices-missing-issue-during-.patch
+spi-mediatek-enable-irq-when-pdata-is-ready.patch
+docs-perf-fix-pmu-instance-name-of-hisi-pcie-pmu.patch
+kvm-x86-fail-emulation-during-emultype_skip-on-any-e.patch
+kvm-svm-skip-wrmsr-fastpath-on-vm-exit-if-next-rip-i.patch
+kvm-vmx-execute-ibpb-on-emulated-vm-exit-when-guest-.patch
+can-kvaser_usb-hydra-help-gcc-13-to-figure-out-cmd_l.patch
+powerpc-dts-t208x-disable-10g-on-mac1-and-mac2.patch
+spi-mediatek-enable-irq-before-the-spi-registration.patch
+drm-i915-remove-__maybe_unused-from-mtl_info.patch
+kvm-x86-fix-deadlock-for-kvm_xen_evtchn_reset.patch
+selftests-kvm-move-declaration-at-the-beginning-of-m.patch
+netfilter-conntrack-fix-bug-in-for_each_sctp_chunk.patch
+powerpc-64s-radix-fix-rwx-mapping-with-relocated-ker.patch
+revert-netfilter-conntrack-fix-bug-in-for_each_sctp_.patch
+nfp-ethtool-support-reporting-link-modes.patch
+nfp-ethtool-fix-the-bug-of-setting-unsupported-port-.patch
--- /dev/null
+From a3e79533df60a5597832037e09c84132b8c8d7bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Dec 2022 09:37:12 +0100
+Subject: spi: mediatek: Enable irq before the spi registration
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit b24cded8c065d7cef8690b2c7b82b828cce57708 ]
+
+If the irq is enabled after the spi si registered, there can be a race
+with the initialization of the devices on the spi bus.
+
+Eg:
+mtk-spi 1100a000.spi: spi-mem transfer timeout
+spi-nor: probe of spi0.0 failed with error -110
+Unable to handle kernel NULL pointer dereference at virtual address
+0000000000000010
+...
+Call trace:
+ mtk_spi_can_dma+0x0/0x2c
+
+Fixes: c6f7874687f7 ("spi: mediatek: Enable irq when pdata is ready")
+Reported-by: Daniel Golle <daniel@makrotopia.org>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Tested-by: Daniel Golle <daniel@makrotopia.org>
+Link: https://lore.kernel.org/r/20221225-mtk-spi-fixes-v1-0-bb6c14c232f8@chromium.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-mt65xx.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 6de8360e5c2a9..9eab6c20dbc56 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1253,6 +1253,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+ addr_bits, ret);
+
++ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
++ IRQF_TRIGGER_NONE, dev_name(dev), master);
++ if (ret)
++ return dev_err_probe(dev, ret, "failed to register irq\n");
++
+ pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
+@@ -1261,13 +1266,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, ret, "failed to register master\n");
+ }
+
+- ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+- IRQF_TRIGGER_NONE, dev_name(dev), master);
+- if (ret) {
+- pm_runtime_disable(dev);
+- return dev_err_probe(dev, ret, "failed to register irq\n");
+- }
+-
+ return 0;
+ }
+
+--
+2.39.0
+
--- /dev/null
+From 8f586773b6314db19589e23adcf872b6c851b541 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Nov 2022 12:00:01 +0100
+Subject: spi: mediatek: Enable irq when pdata is ready
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit c6f7874687f7027d7c4b2f53ff6e4d22850f915d ]
+
+If the device does not come straight from reset, we might receive an IRQ
+before we are ready to handle it.
+
+Fixes:
+
+[ 0.832328] Unable to handle kernel read from unreadable memory at virtual address 0000000000000010
+[ 1.040343] Call trace:
+[ 1.040347] mtk_spi_can_dma+0xc/0x40
+...
+[ 1.262265] start_kernel+0x338/0x42c
+
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://lore.kernel.org/r/20221128-spi-mt65xx-v1-0-509266830665@chromium.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-mt65xx.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index d6aff909fc365..6de8360e5c2a9 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1192,11 +1192,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ else
+ dma_set_max_seg_size(dev, SZ_256K);
+
+- ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+- IRQF_TRIGGER_NONE, dev_name(dev), master);
+- if (ret)
+- return dev_err_probe(dev, ret, "failed to register irq\n");
+-
+ mdata->parent_clk = devm_clk_get(dev, "parent-clk");
+ if (IS_ERR(mdata->parent_clk))
+ return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
+@@ -1266,6 +1261,13 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, ret, "failed to register master\n");
+ }
+
++ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
++ IRQF_TRIGGER_NONE, dev_name(dev), master);
++ if (ret) {
++ pm_runtime_disable(dev);
++ return dev_err_probe(dev, ret, "failed to register irq\n");
++ }
++
+ return 0;
+ }
+
+--
+2.39.0
+
--- /dev/null
+From a883a28f144f2a2ce230e3695a9dc1bd4fce9868 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 03:38:32 -0400
+Subject: wifi: ath11k: fix warning in dma_free_coherent() of memory chunks
+ while recovery
+
+From: Wen Gong <quic_wgong@quicinc.com>
+
+[ Upstream commit f74878433d5ade360447da5d92e9c2e535780d80 ]
+
+Commit 26f3a021b37c ("ath11k: allocate smaller chunks of memory for
+firmware") and commit f6f92968e1e5 ("ath11k: qmi: try to allocate a
+big block of DMA memory first") change ath11k to allocate the memory
+chunks for target twice while wlan load. It fails for the 1st time
+because of large memory and then changed to allocate many small chunks
+for the 2nd time sometimes as below log.
+
+1st time failed:
+[10411.640620] ath11k_pci 0000:05:00.0: qmi firmware request memory request
+[10411.640625] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 6881280
+[10411.640630] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 3784704
+[10411.640658] ath11k_pci 0000:05:00.0: qmi dma allocation failed (6881280 B type 1), will try later with small size
+[10411.640671] ath11k_pci 0000:05:00.0: qmi delays mem_request 2
+[10411.640677] ath11k_pci 0000:05:00.0: qmi respond memory request delayed 1
+2nd time success:
+[10411.642004] ath11k_pci 0000:05:00.0: qmi firmware request memory request
+[10411.642008] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642012] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642014] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642016] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642018] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642020] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642022] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642024] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642027] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642029] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+[10411.642031] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 458752
+[10411.642033] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 131072
+[10411.642035] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 524288
+[10411.642037] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 524288
+[10411.642039] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 524288
+[10411.642041] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 524288
+[10411.642043] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 524288
+[10411.642045] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 524288
+[10411.642047] ath11k_pci 0000:05:00.0: qmi mem seg type 4 size 491520
+[10411.642049] ath11k_pci 0000:05:00.0: qmi mem seg type 1 size 524288
+
+And then commit 5962f370ce41 ("ath11k: Reuse the available memory after
+firmware reload") skip the ath11k_qmi_free_resource() which frees the
+memory chunks while recovery, after that, when run recovery test on
+WCN6855, a warning happened every time as below and finally leads fail
+for recovery.
+
+[ 159.570318] BUG: Bad page state in process kworker/u16:5 pfn:33300
+[ 159.570320] page:0000000096ffdbb9 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x33300
+[ 159.570324] flags: 0xfffffc0000000(node=0|zone=1|lastcpupid=0x1fffff)
+[ 159.570329] raw: 000fffffc0000000 0000000000000000 dead000000000122 0000000000000000
+[ 159.570332] raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000
+[ 159.570334] page dumped because: nonzero _refcount
+[ 159.570440] firewire_ohci syscopyarea sysfillrect psmouse sdhci_pci ahci sysimgblt firewire_core fb_sys_fops libahci crc_itu_t cqhci drm sdhci e1000e wmi video
+[ 159.570460] CPU: 2 PID: 217 Comm: kworker/u16:5 Kdump: loaded Tainted: G B 5.19.0-rc1-wt-ath+ #3
+[ 159.570465] Hardware name: LENOVO 418065C/418065C, BIOS 83ET63WW (1.33 ) 07/29/2011
+[ 159.570467] Workqueue: qmi_msg_handler qmi_data_ready_work [qmi_helpers]
+[ 159.570475] Call Trace:
+[ 159.570476] <TASK>
+[ 159.570478] dump_stack_lvl+0x49/0x5f
+[ 159.570486] dump_stack+0x10/0x12
+[ 159.570493] bad_page+0xab/0xf0
+[ 159.570502] check_free_page_bad+0x66/0x70
+[ 159.570511] __free_pages_ok+0x530/0x9a0
+[ 159.570517] ? __dev_printk+0x58/0x6b
+[ 159.570525] ? _dev_printk+0x56/0x72
+[ 159.570534] ? qmi_decode+0x119/0x470 [qmi_helpers]
+[ 159.570543] __free_pages+0x91/0xd0
+[ 159.570548] dma_free_contiguous+0x50/0x60
+[ 159.570556] dma_direct_free+0xe5/0x140
+[ 159.570564] dma_free_attrs+0x35/0x50
+[ 159.570570] ath11k_qmi_msg_mem_request_cb+0x2ae/0x3c0 [ath11k]
+[ 159.570620] qmi_invoke_handler+0xac/0xe0 [qmi_helpers]
+[ 159.570630] qmi_handle_message+0x6d/0x180 [qmi_helpers]
+[ 159.570643] qmi_data_ready_work+0x2ca/0x440 [qmi_helpers]
+[ 159.570656] process_one_work+0x227/0x440
+[ 159.570667] worker_thread+0x31/0x3d0
+[ 159.570676] ? process_one_work+0x440/0x440
+[ 159.570685] kthread+0xfe/0x130
+[ 159.570692] ? kthread_complete_and_exit+0x20/0x20
+[ 159.570701] ret_from_fork+0x22/0x30
+[ 159.570712] </TASK>
+
+The reason is because when wlan start to recovery, the type, size and
+count is not same for the 1st and 2nd QMI_WLFW_REQUEST_MEM_IND message,
+Then it leads the parameter size is not correct for the dma_free_coherent().
+For the chunk[1], the actual dma size is 524288 which allocate in the
+2nd time of the initial wlan load phase, and the size which pass to
+dma_free_coherent() is 3784704 which is got in the 1st time of recovery
+phase, then warning above happened.
+
+Change to use prev_size of struct target_mem_chunk for the paramter of
+dma_free_coherent() since prev_size is the real size of last load/recovery.
+Also change to check both type and size of struct target_mem_chunk to
+reuse the memory to avoid mismatch buffer size for target. Then the
+warning disappear and recovery success. When the 1st QMI_WLFW_REQUEST_MEM_IND
+for recovery arrived, the trunk[0] is freed in ath11k_qmi_alloc_target_mem_chunk()
+and then dma_alloc_coherent() failed caused by large size, and then
+trunk[1] is freed in ath11k_qmi_free_target_mem_chunk(), the left 18
+trunks will be reuse for the 2nd QMI_WLFW_REQUEST_MEM_IND message.
+
+Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3
+
+Fixes: 5962f370ce41 ("ath11k: Reuse the available memory after firmware reload")
+Signed-off-by: Wen Gong <quic_wgong@quicinc.com>
+Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
+Link: https://lore.kernel.org/r/20220928073832.16251-1-quic_wgong@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath11k/qmi.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 7cc5fa3251521..381c6b390dd78 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -1964,7 +1964,7 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
+ continue;
+
+ dma_free_coherent(ab->dev,
+- ab->qmi.target_mem[i].size,
++ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].vaddr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].vaddr = NULL;
+@@ -1985,12 +1985,12 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+ * in such case, no need to allocate memory for FW again.
+ */
+ if (chunk->vaddr) {
+- if (chunk->prev_type == chunk->type ||
++ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ continue;
+
+ /* cannot reuse the existing chunk */
+- dma_free_coherent(ab->dev, chunk->size,
++ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->vaddr, chunk->paddr);
+ chunk->vaddr = NULL;
+ }
+--
+2.39.0
+
--- /dev/null
+From c4c3dd82dd04fc4e3361b790d4b86eaad3d8866f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 23:36:51 +0300
+Subject: wifi: rtl8xxxu: gen2: Turn on the rate control
+
+From: Bitterblue Smith <rtl8821cerfe2@gmail.com>
+
+[ Upstream commit 791082ec0ab843e0be07c8ce3678e4c2afd2e33d ]
+
+Re-enable the function rtl8xxxu_gen2_report_connect.
+
+It informs the firmware when connecting to a network. This makes the
+firmware enable the rate control, which makes the upload faster.
+
+It also informs the firmware when disconnecting from a network. In the
+past this made reconnecting impossible because it was sending the
+auth on queue 0x7 (TXDESC_QUEUE_VO) instead of queue 0x12
+(TXDESC_QUEUE_MGNT):
+
+wlp0s20f0u3: send auth to 90:55:de:__:__:__ (try 1/3)
+wlp0s20f0u3: send auth to 90:55:de:__:__:__ (try 2/3)
+wlp0s20f0u3: send auth to 90:55:de:__:__:__ (try 3/3)
+wlp0s20f0u3: authentication with 90:55:de:__:__:__ timed out
+
+Probably the firmware disables the unnecessary TX queues when it
+knows it's disconnected.
+
+However, this was fixed in commit edd5747aa12e ("wifi: rtl8xxxu: Fix
+skb misuse in TX queue selection").
+
+Fixes: c59f13bbead4 ("rtl8xxxu: Work around issue with 8192eu and 8723bu devices not reconnecting")
+Signed-off-by: Bitterblue Smith <rtl8821cerfe2@gmail.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/43200afc-0c65-ee72-48f8-231edd1df493@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index e9c1b62c9c3c2..e445084e358f9 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -4389,12 +4389,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
+ {
+-#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
+ /*
+- * Barry Day reports this causes issues with 8192eu and 8723bu
+- * devices reconnecting. The reason for this is unclear, but
+- * until it is better understood, leave the code in place but
+- * disabled, so it is not lost.
++ * The firmware turns on the rate control when it knows it's
++ * connected to a network.
+ */
+ struct h2c_cmd h2c;
+
+@@ -4407,7 +4404,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ h2c.media_status_rpt.parm &= ~BIT(0);
+
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
+-#endif
+ }
+
+ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
+--
+2.39.0
+