+++ /dev/null
-From 80763571c2d254f14c943d127dc4f57de7c3d7c0 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 29 Sep 2021 11:42:51 +0800
-Subject: arm64: dts: qcom: Fix IPQ8074 PCIe PHY nodes
-
-From: Shawn Guo <shawn.guo@linaro.org>
-
-[ Upstream commit 942bcd33ed455ad40b71a59901bd926bbf4a500e ]
-
-IPQ8074 PCIe PHY nodes are broken in the many ways:
-
-- '#address-cells', '#size-cells' and 'ranges' are missing.
-- Child phy/lane node is missing, and the child properties like
- '#phy-cells' and 'clocks' are mistakenly put into parent node.
-- The clocks properties for parent node are missing.
-
-Fix them to get the nodes comply with the bindings schema.
-
-Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
-Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
-Link: https://lore.kernel.org/r/20210929034253.24570-9-shawn.guo@linaro.org
-Stable-dep-of: ed22cc93abae ("arm64: dts: qcom: ipq8074: fix PCIe PHY serdes size")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/arm64/boot/dts/qcom/ipq8074.dtsi | 46 +++++++++++++++++++++------
- 1 file changed, 36 insertions(+), 10 deletions(-)
-
-diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
-index 99e2488b92dc..e8b3ecb591dd 100644
---- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
-@@ -167,34 +167,60 @@
- resets = <&gcc GCC_QUSB2_0_PHY_BCR>;
- };
-
-- pcie_phy0: phy@86000 {
-+ pcie_qmp0: phy@86000 {
- compatible = "qcom,ipq8074-qmp-pcie-phy";
- reg = <0x00086000 0x1000>;
-- #phy-cells = <0>;
-- clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
-- clock-names = "pipe_clk";
-- clock-output-names = "pcie20_phy0_pipe_clk";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ ranges;
-
-+ clocks = <&gcc GCC_PCIE0_AUX_CLK>,
-+ <&gcc GCC_PCIE0_AHB_CLK>;
-+ clock-names = "aux", "cfg_ahb";
- resets = <&gcc GCC_PCIE0_PHY_BCR>,
- <&gcc GCC_PCIE0PHY_PHY_BCR>;
- reset-names = "phy",
- "common";
- status = "disabled";
-+
-+ pcie_phy0: phy@86200 {
-+ reg = <0x86200 0x16c>,
-+ <0x86400 0x200>,
-+ <0x86800 0x4f4>;
-+ #phy-cells = <0>;
-+ #clock-cells = <0>;
-+ clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
-+ clock-names = "pipe0";
-+ clock-output-names = "pcie_0_pipe_clk";
-+ };
- };
-
-- pcie_phy1: phy@8e000 {
-+ pcie_qmp1: phy@8e000 {
- compatible = "qcom,ipq8074-qmp-pcie-phy";
- reg = <0x0008e000 0x1000>;
-- #phy-cells = <0>;
-- clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
-- clock-names = "pipe_clk";
-- clock-output-names = "pcie20_phy1_pipe_clk";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ ranges;
-
-+ clocks = <&gcc GCC_PCIE1_AUX_CLK>,
-+ <&gcc GCC_PCIE1_AHB_CLK>;
-+ clock-names = "aux", "cfg_ahb";
- resets = <&gcc GCC_PCIE1_PHY_BCR>,
- <&gcc GCC_PCIE1PHY_PHY_BCR>;
- reset-names = "phy",
- "common";
- status = "disabled";
-+
-+ pcie_phy1: phy@8e200 {
-+ reg = <0x8e200 0x16c>,
-+ <0x8e400 0x200>,
-+ <0x8e800 0x4f4>;
-+ #phy-cells = <0>;
-+ #clock-cells = <0>;
-+ clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
-+ clock-names = "pipe0";
-+ clock-output-names = "pcie_1_pipe_clk";
-+ };
- };
-
- tlmm: pinctrl@1000000 {
---
-2.35.1
-
+++ /dev/null
-From 08db360111814e0d3316ac6b0c437164c45c1275 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 26 Aug 2021 11:49:14 -0700
-Subject: clk: qcom: gcc-sdm660: Move parent tables after PLLs
-
-From: Stephen Boyd <sboyd@kernel.org>
-
-[ Upstream commit a61ca021fe28ab7163ca879fc3532c3cca25063c ]
-
-In the next patch we're going to change these tables to reference the
-PLL structures directly. Let's move them here so the diff is easier to
-read. No functional change in this patch.
-
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
-Stable-dep-of: 6956c18f4ad9 ("clk: qcom: gcc-sdm660: Use floor ops for SDCC1 clock")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/clk/qcom/gcc-sdm660.c | 204 +++++++++++++++++-----------------
- 1 file changed, 102 insertions(+), 102 deletions(-)
-
-diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
-index 31258795e7b8..aac1d4024ab7 100644
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -37,108 +37,6 @@ enum {
- P_GPLL1_EARLY_DIV,
- };
-
--static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = {
-- { P_XO, 0 },
-- { P_GPLL0, 1 },
-- { P_GPLL0_EARLY_DIV, 6 },
--};
--
--static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div[] = {
-- "xo",
-- "gpll0",
-- "gpll0_early_div",
--};
--
--static const struct parent_map gcc_parent_map_xo_gpll0[] = {
-- { P_XO, 0 },
-- { P_GPLL0, 1 },
--};
--
--static const char * const gcc_parent_names_xo_gpll0[] = {
-- "xo",
-- "gpll0",
--};
--
--static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-- { P_XO, 0 },
-- { P_GPLL0, 1 },
-- { P_SLEEP_CLK, 5 },
-- { P_GPLL0_EARLY_DIV, 6 },
--};
--
--static const char * const gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-- "xo",
-- "gpll0",
-- "sleep_clk",
-- "gpll0_early_div",
--};
--
--static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
-- { P_XO, 0 },
-- { P_SLEEP_CLK, 5 },
--};
--
--static const char * const gcc_parent_names_xo_sleep_clk[] = {
-- "xo",
-- "sleep_clk",
--};
--
--static const struct parent_map gcc_parent_map_xo_gpll4[] = {
-- { P_XO, 0 },
-- { P_GPLL4, 5 },
--};
--
--static const char * const gcc_parent_names_xo_gpll4[] = {
-- "xo",
-- "gpll4",
--};
--
--static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-- { P_XO, 0 },
-- { P_GPLL0, 1 },
-- { P_GPLL0_EARLY_DIV, 3 },
-- { P_GPLL1, 4 },
-- { P_GPLL4, 5 },
-- { P_GPLL1_EARLY_DIV, 6 },
--};
--
--static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-- "xo",
-- "gpll0",
-- "gpll0_early_div",
-- "gpll1",
-- "gpll4",
-- "gpll1_early_div",
--};
--
--static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = {
-- { P_XO, 0 },
-- { P_GPLL0, 1 },
-- { P_GPLL4, 5 },
-- { P_GPLL0_EARLY_DIV, 6 },
--};
--
--static const char * const gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div[] = {
-- "xo",
-- "gpll0",
-- "gpll4",
-- "gpll0_early_div",
--};
--
--static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = {
-- { P_XO, 0 },
-- { P_GPLL0, 1 },
-- { P_GPLL0_EARLY_DIV, 2 },
-- { P_GPLL4, 5 },
--};
--
--static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4[] = {
-- "xo",
-- "gpll0",
-- "gpll0_early_div",
-- "gpll4",
--};
--
- static struct clk_fixed_factor xo = {
- .mult = 1,
- .div = 1,
-@@ -251,6 +149,108 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- },
- };
-
-+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = {
-+ { P_XO, 0 },
-+ { P_GPLL0, 1 },
-+ { P_GPLL0_EARLY_DIV, 6 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div[] = {
-+ "xo",
-+ "gpll0",
-+ "gpll0_early_div",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_gpll0[] = {
-+ { P_XO, 0 },
-+ { P_GPLL0, 1 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll0[] = {
-+ "xo",
-+ "gpll0",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-+ { P_XO, 0 },
-+ { P_GPLL0, 1 },
-+ { P_SLEEP_CLK, 5 },
-+ { P_GPLL0_EARLY_DIV, 6 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-+ "xo",
-+ "gpll0",
-+ "sleep_clk",
-+ "gpll0_early_div",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
-+ { P_XO, 0 },
-+ { P_SLEEP_CLK, 5 },
-+};
-+
-+static const char * const gcc_parent_names_xo_sleep_clk[] = {
-+ "xo",
-+ "sleep_clk",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_gpll4[] = {
-+ { P_XO, 0 },
-+ { P_GPLL4, 5 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll4[] = {
-+ "xo",
-+ "gpll4",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-+ { P_XO, 0 },
-+ { P_GPLL0, 1 },
-+ { P_GPLL0_EARLY_DIV, 3 },
-+ { P_GPLL1, 4 },
-+ { P_GPLL4, 5 },
-+ { P_GPLL1_EARLY_DIV, 6 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-+ "xo",
-+ "gpll0",
-+ "gpll0_early_div",
-+ "gpll1",
-+ "gpll4",
-+ "gpll1_early_div",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = {
-+ { P_XO, 0 },
-+ { P_GPLL0, 1 },
-+ { P_GPLL4, 5 },
-+ { P_GPLL0_EARLY_DIV, 6 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div[] = {
-+ "xo",
-+ "gpll0",
-+ "gpll4",
-+ "gpll0_early_div",
-+};
-+
-+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = {
-+ { P_XO, 0 },
-+ { P_GPLL0, 1 },
-+ { P_GPLL0_EARLY_DIV, 2 },
-+ { P_GPLL4, 5 },
-+};
-+
-+static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4[] = {
-+ "xo",
-+ "gpll0",
-+ "gpll0_early_div",
-+ "gpll4",
-+};
-+
- static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(50000000, P_GPLL0, 12, 0, 0),
---
-2.35.1
-
+++ /dev/null
-From 7b567a29852bc2e5980324cb784174c90c39e56a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 25 Aug 2021 13:45:17 -0700
-Subject: clk: qcom: gcc-sdm660: Replace usage of parent_names
-
-From: Bjorn Andersson <bjorn.andersson@linaro.org>
-
-[ Upstream commit da09577ab562e2700f0aba3f17cc741717ca9e38 ]
-
-Using parent_data and parent_hws, instead of parent_names, does protect
-against some cases of incompletely defined clock trees. While it turns
-out that the bug being chased this time was totally unrelated, this
-patch converts the SDM660 GCC driver to avoid such issues.
-
-The "xo" fixed_factor clock is unused within the gcc driver, but
-referenced from the DSI PHY. So it's left in place until the DSI driver
-is updated.
-
-Tested-by: Marijn Suijten <marijn.suijten@somainline.org>
-Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>
-Reviewed-by: Marijn Suijten <marijn.suijten@somainline.org>
-Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
-Link: https://lore.kernel.org/r/20210825204517.1278130-1-bjorn.andersson@linaro.org
-[sboyd@kernel.org: Reduce diff by moving enum and tables back to
-original position in previous patch]
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
-Stable-dep-of: 6956c18f4ad9 ("clk: qcom: gcc-sdm660: Use floor ops for SDCC1 clock")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/clk/qcom/gcc-sdm660.c | 370 ++++++++++++++++++----------------
- 1 file changed, 194 insertions(+), 176 deletions(-)
-
-diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
-index aac1d4024ab7..b8e59b4f0bde 100644
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -42,7 +42,9 @@ static struct clk_fixed_factor xo = {
- .div = 1,
- .hw.init = &(struct clk_init_data){
- .name = "xo",
-- .parent_names = (const char *[]){ "xo_board" },
-+ .parent_data = &(const struct clk_parent_data) {
-+ .fw_name = "xo"
-+ },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-@@ -56,7 +58,9 @@ static struct clk_alpha_pll gpll0_early = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gpll0_early",
-- .parent_names = (const char *[]){ "xo" },
-+ .parent_data = &(const struct clk_parent_data){
-+ .fw_name = "xo",
-+ },
- .num_parents = 1,
- .ops = &clk_alpha_pll_ops,
- },
-@@ -68,7 +72,9 @@ static struct clk_fixed_factor gpll0_early_div = {
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "gpll0_early_div",
-- .parent_names = (const char *[]){ "gpll0_early" },
-+ .parent_hws = (const struct clk_hw*[]){
-+ &gpll0_early.clkr.hw,
-+ },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-@@ -79,7 +85,9 @@ static struct clk_alpha_pll_postdiv gpll0 = {
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll0",
-- .parent_names = (const char *[]){ "gpll0_early" },
-+ .parent_hws = (const struct clk_hw*[]){
-+ &gpll0_early.clkr.hw,
-+ },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
- },
-@@ -93,7 +101,9 @@ static struct clk_alpha_pll gpll1_early = {
- .enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data){
- .name = "gpll1_early",
-- .parent_names = (const char *[]){ "xo" },
-+ .parent_data = &(const struct clk_parent_data){
-+ .fw_name = "xo",
-+ },
- .num_parents = 1,
- .ops = &clk_alpha_pll_ops,
- },
-@@ -105,7 +115,9 @@ static struct clk_fixed_factor gpll1_early_div = {
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "gpll1_early_div",
-- .parent_names = (const char *[]){ "gpll1_early" },
-+ .parent_hws = (const struct clk_hw*[]){
-+ &gpll1_early.clkr.hw,
-+ },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-@@ -116,7 +128,9 @@ static struct clk_alpha_pll_postdiv gpll1 = {
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll1",
-- .parent_names = (const char *[]){ "gpll1_early" },
-+ .parent_hws = (const struct clk_hw*[]){
-+ &gpll1_early.clkr.hw,
-+ },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
- },
-@@ -130,7 +144,9 @@ static struct clk_alpha_pll gpll4_early = {
- .enable_mask = BIT(4),
- .hw.init = &(struct clk_init_data){
- .name = "gpll4_early",
-- .parent_names = (const char *[]){ "xo" },
-+ .parent_data = &(const struct clk_parent_data){
-+ .fw_name = "xo",
-+ },
- .num_parents = 1,
- .ops = &clk_alpha_pll_ops,
- },
-@@ -143,7 +159,9 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- .clkr.hw.init = &(struct clk_init_data)
- {
- .name = "gpll4",
-- .parent_names = (const char *[]) { "gpll4_early" },
-+ .parent_hws = (const struct clk_hw*[]){
-+ &gpll4_early.clkr.hw,
-+ },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
- },
-@@ -155,10 +173,10 @@ static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = {
- { P_GPLL0_EARLY_DIV, 6 },
- };
-
--static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div[] = {
-- "xo",
-- "gpll0",
-- "gpll0_early_div",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll0_early_div[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll0.clkr.hw },
-+ { .hw = &gpll0_early_div.hw },
- };
-
- static const struct parent_map gcc_parent_map_xo_gpll0[] = {
-@@ -166,9 +184,9 @@ static const struct parent_map gcc_parent_map_xo_gpll0[] = {
- { P_GPLL0, 1 },
- };
-
--static const char * const gcc_parent_names_xo_gpll0[] = {
-- "xo",
-- "gpll0",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll0[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll0.clkr.hw },
- };
-
- static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-@@ -178,11 +196,11 @@ static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div
- { P_GPLL0_EARLY_DIV, 6 },
- };
-
--static const char * const gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-- "xo",
-- "gpll0",
-- "sleep_clk",
-- "gpll0_early_div",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll0.clkr.hw },
-+ { .fw_name = "sleep_clk" },
-+ { .hw = &gpll0_early_div.hw },
- };
-
- static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
-@@ -190,9 +208,9 @@ static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
- { P_SLEEP_CLK, 5 },
- };
-
--static const char * const gcc_parent_names_xo_sleep_clk[] = {
-- "xo",
-- "sleep_clk",
-+static const struct clk_parent_data gcc_parent_data_xo_sleep_clk[] = {
-+ { .fw_name = "xo" },
-+ { .fw_name = "sleep_clk" },
- };
-
- static const struct parent_map gcc_parent_map_xo_gpll4[] = {
-@@ -200,9 +218,9 @@ static const struct parent_map gcc_parent_map_xo_gpll4[] = {
- { P_GPLL4, 5 },
- };
-
--static const char * const gcc_parent_names_xo_gpll4[] = {
-- "xo",
-- "gpll4",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll4[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll4.clkr.hw },
- };
-
- static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-@@ -214,13 +232,13 @@ static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpl
- { P_GPLL1_EARLY_DIV, 6 },
- };
-
--static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-- "xo",
-- "gpll0",
-- "gpll0_early_div",
-- "gpll1",
-- "gpll4",
-- "gpll1_early_div",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll0.clkr.hw },
-+ { .hw = &gpll0_early_div.hw },
-+ { .hw = &gpll1.clkr.hw },
-+ { .hw = &gpll4.clkr.hw },
-+ { .hw = &gpll1_early_div.hw },
- };
-
- static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = {
-@@ -230,11 +248,11 @@ static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] =
- { P_GPLL0_EARLY_DIV, 6 },
- };
-
--static const char * const gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div[] = {
-- "xo",
-- "gpll0",
-- "gpll4",
-- "gpll0_early_div",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll0.clkr.hw },
-+ { .hw = &gpll4.clkr.hw },
-+ { .hw = &gpll0_early_div.hw },
- };
-
- static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = {
-@@ -244,11 +262,11 @@ static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] =
- { P_GPLL4, 5 },
- };
-
--static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4[] = {
-- "xo",
-- "gpll0",
-- "gpll0_early_div",
-- "gpll4",
-+static const struct clk_parent_data gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4[] = {
-+ { .fw_name = "xo" },
-+ { .hw = &gpll0.clkr.hw },
-+ { .hw = &gpll0_early_div.hw },
-+ { .hw = &gpll4.clkr.hw },
- };
-
- static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
-@@ -265,7 +283,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup1_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -290,7 +308,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup1_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -304,7 +322,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup2_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -318,7 +336,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup2_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -332,7 +350,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup3_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -346,7 +364,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup3_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -360,7 +378,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup4_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -374,7 +392,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup4_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -407,7 +425,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_uart1_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -421,7 +439,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_uart2_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -435,7 +453,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup1_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -449,7 +467,7 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup1_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -463,7 +481,7 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup2_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -477,7 +495,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup2_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -491,7 +509,7 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup3_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -505,7 +523,7 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup3_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -519,7 +537,7 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup4_i2c_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -533,7 +551,7 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup4_spi_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -547,7 +565,7 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_uart1_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -561,7 +579,7 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
- .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_uart2_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -582,7 +600,7 @@ static struct clk_rcg2 gp1_clk_src = {
- .freq_tbl = ftbl_gp1_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp1_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
- .ops = &clk_rcg2_ops,
- },
-@@ -596,7 +614,7 @@ static struct clk_rcg2 gp2_clk_src = {
- .freq_tbl = ftbl_gp1_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp2_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
- .ops = &clk_rcg2_ops,
- },
-@@ -610,7 +628,7 @@ static struct clk_rcg2 gp3_clk_src = {
- .freq_tbl = ftbl_gp1_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp3_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
- .ops = &clk_rcg2_ops,
- },
-@@ -630,7 +648,7 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
- .freq_tbl = ftbl_hmss_gpll0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_gpll0_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -651,7 +669,7 @@ static struct clk_rcg2 hmss_gpll4_clk_src = {
- .freq_tbl = ftbl_hmss_gpll4_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_gpll4_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll4,
-+ .parent_data = gcc_parent_data_xo_gpll4,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-@@ -670,7 +688,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
- .freq_tbl = ftbl_hmss_rbcpr_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_rbcpr_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0,
-+ .parent_data = gcc_parent_data_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-@@ -689,7 +707,7 @@ static struct clk_rcg2 pdm2_clk_src = {
- .freq_tbl = ftbl_pdm2_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "pdm2_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -711,7 +729,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
- .freq_tbl = ftbl_qspi_ser_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "qspi_ser_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
- .num_parents = 6,
- .ops = &clk_rcg2_ops,
- },
-@@ -737,7 +755,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
- .freq_tbl = ftbl_sdcc1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc1_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
- .num_parents = 4,
- .ops = &clk_rcg2_ops,
- },
-@@ -759,7 +777,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
- .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc1_ice_core_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -785,7 +803,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
- .freq_tbl = ftbl_sdcc2_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc2_apps_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4,
- .num_parents = 4,
- .ops = &clk_rcg2_floor_ops,
- },
-@@ -808,7 +826,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
- .freq_tbl = ftbl_ufs_axi_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_axi_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -829,7 +847,7 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
- .freq_tbl = ftbl_ufs_ice_core_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_ice_core_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -843,7 +861,7 @@ static struct clk_rcg2 ufs_phy_aux_clk_src = {
- .freq_tbl = ftbl_hmss_rbcpr_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_phy_aux_clk_src",
-- .parent_names = gcc_parent_names_xo_sleep_clk,
-+ .parent_data = gcc_parent_data_xo_sleep_clk,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-@@ -864,7 +882,7 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
- .freq_tbl = ftbl_ufs_unipro_core_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_unipro_core_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -885,7 +903,7 @@ static struct clk_rcg2 usb20_master_clk_src = {
- .freq_tbl = ftbl_usb20_master_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb20_master_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -905,7 +923,7 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
- .freq_tbl = ftbl_usb20_mock_utmi_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb20_mock_utmi_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -930,7 +948,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
- .freq_tbl = ftbl_usb30_master_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_master_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -951,7 +969,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
- .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_mock_utmi_clk_src",
-- .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
-+ .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- },
-@@ -971,7 +989,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
- .freq_tbl = ftbl_usb3_phy_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb3_phy_aux_clk_src",
-- .parent_names = gcc_parent_names_xo_sleep_clk,
-+ .parent_data = gcc_parent_data_xo_sleep_clk,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-@@ -985,8 +1003,8 @@ static struct clk_branch gcc_aggre2_ufs_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre2_ufs_axi_clk",
-- .parent_names = (const char *[]){
-- "ufs_axi_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &ufs_axi_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1002,8 +1020,8 @@ static struct clk_branch gcc_aggre2_usb3_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre2_usb3_axi_clk",
-- .parent_names = (const char *[]){
-- "usb30_master_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb30_master_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1071,8 +1089,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup1_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup1_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1089,8 +1107,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup1_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup1_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1107,8 +1125,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup2_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup2_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1125,8 +1143,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup2_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup2_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1143,8 +1161,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup3_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup3_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1161,8 +1179,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup3_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup3_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1179,8 +1197,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup4_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup4_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1197,8 +1215,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_qup4_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_qup4_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1215,8 +1233,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_uart1_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_uart1_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_uart1_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1233,8 +1251,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_uart2_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp1_uart2_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp1_uart2_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1264,8 +1282,8 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup1_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup1_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1282,8 +1300,8 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup1_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup1_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1300,8 +1318,8 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup2_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup2_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1318,8 +1336,8 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup2_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup2_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1336,8 +1354,8 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup3_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup3_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1354,8 +1372,8 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup3_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup3_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1372,8 +1390,8 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup4_i2c_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup4_i2c_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1390,8 +1408,8 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_qup4_spi_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_qup4_spi_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1408,8 +1426,8 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_uart1_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_uart1_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_uart1_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1426,8 +1444,8 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_uart2_apps_clk",
-- .parent_names = (const char *[]){
-- "blsp2_uart2_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &blsp2_uart2_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1457,8 +1475,8 @@ static struct clk_branch gcc_cfg_noc_usb2_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cfg_noc_usb2_axi_clk",
-- .parent_names = (const char *[]){
-- "usb20_master_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb20_master_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1474,8 +1492,8 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cfg_noc_usb3_axi_clk",
-- .parent_names = (const char *[]){
-- "usb30_master_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb30_master_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1503,8 +1521,8 @@ static struct clk_branch gcc_gp1_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_gp1_clk",
-- .parent_names = (const char *[]){
-- "gp1_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gp1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1521,8 +1539,8 @@ static struct clk_branch gcc_gp2_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_gp2_clk",
-- .parent_names = (const char *[]){
-- "gp2_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gp2_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1539,8 +1557,8 @@ static struct clk_branch gcc_gp3_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_gp3_clk",
-- .parent_names = (const char *[]){
-- "gp3_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gp3_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1583,8 +1601,8 @@ static struct clk_branch gcc_gpu_gpll0_clk = {
- .enable_mask = BIT(4),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_gpu_gpll0_clk",
-- .parent_names = (const char *[]){
-- "gpll0",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gpll0.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1600,8 +1618,8 @@ static struct clk_branch gcc_gpu_gpll0_div_clk = {
- .enable_mask = BIT(3),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_gpu_gpll0_div_clk",
-- .parent_names = (const char *[]){
-- "gpll0_early_div",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gpll0_early_div.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1631,8 +1649,8 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_hmss_rbcpr_clk",
-- .parent_names = (const char *[]){
-- "hmss_rbcpr_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &hmss_rbcpr_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1649,8 +1667,8 @@ static struct clk_branch gcc_mmss_gpll0_clk = {
- .enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mmss_gpll0_clk",
-- .parent_names = (const char *[]){
-- "gpll0",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gpll0.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1666,8 +1684,8 @@ static struct clk_branch gcc_mmss_gpll0_div_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mmss_gpll0_div_clk",
-- .parent_names = (const char *[]){
-- "gpll0_early_div",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &gpll0_early_div.hw,
- },
- .num_parents = 1,
- .ops = &clk_branch2_ops,
-@@ -1760,8 +1778,8 @@ static struct clk_branch gcc_pdm2_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pdm2_clk",
-- .parent_names = (const char *[]){
-- "pdm2_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &pdm2_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1817,8 +1835,8 @@ static struct clk_branch gcc_qspi_ser_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_qspi_ser_clk",
-- .parent_names = (const char *[]){
-- "qspi_ser_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &qspi_ser_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1874,8 +1892,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc1_apps_clk",
-- .parent_names = (const char *[]){
-- "sdcc1_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &sdcc1_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1892,8 +1910,8 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc1_ice_core_clk",
-- .parent_names = (const char *[]){
-- "sdcc1_ice_core_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &sdcc1_ice_core_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1923,8 +1941,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc2_apps_clk",
-- .parent_names = (const char *[]){
-- "sdcc2_apps_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &sdcc2_apps_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1954,8 +1972,8 @@ static struct clk_branch gcc_ufs_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_ufs_axi_clk",
-- .parent_names = (const char *[]){
-- "ufs_axi_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &ufs_axi_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -1985,8 +2003,8 @@ static struct clk_branch gcc_ufs_ice_core_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_ufs_ice_core_clk",
-- .parent_names = (const char *[]){
-- "ufs_ice_core_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &ufs_ice_core_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -2003,8 +2021,8 @@ static struct clk_branch gcc_ufs_phy_aux_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_ufs_phy_aux_clk",
-- .parent_names = (const char *[]){
-- "ufs_phy_aux_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &ufs_phy_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -2060,8 +2078,8 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_ufs_unipro_core_clk",
-- .parent_names = (const char *[]){
-- "ufs_unipro_core_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &ufs_unipro_core_clk_src.clkr.hw,
- },
- .flags = CLK_SET_RATE_PARENT,
- .num_parents = 1,
-@@ -2078,8 +2096,8 @@ static struct clk_branch gcc_usb20_master_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb20_master_clk",
-- .parent_names = (const char *[]){
-- "usb20_master_clk_src"
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb20_master_clk_src.clkr.hw,
- },
- .flags = CLK_SET_RATE_PARENT,
- .num_parents = 1,
-@@ -2096,8 +2114,8 @@ static struct clk_branch gcc_usb20_mock_utmi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb20_mock_utmi_clk",
-- .parent_names = (const char *[]){
-- "usb20_mock_utmi_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb20_mock_utmi_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -2127,8 +2145,8 @@ static struct clk_branch gcc_usb30_master_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb30_master_clk",
-- .parent_names = (const char *[]){
-- "usb30_master_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb30_master_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -2145,8 +2163,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb30_mock_utmi_clk",
-- .parent_names = (const char *[]){
-- "usb30_mock_utmi_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb30_mock_utmi_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
-@@ -2189,8 +2207,8 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb3_phy_aux_clk",
-- .parent_names = (const char *[]){
-- "usb3_phy_aux_clk_src",
-+ .parent_hws = (const struct clk_hw*[]) {
-+ &usb3_phy_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
---
-2.35.1
-
+++ /dev/null
-From 023dc6f51709ee0a0b7d491cd86c580fb7a758cf Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sun, 29 Aug 2021 22:48:19 +0200
-Subject: clk: qcom: gcc-sdm660: Use ARRAY_SIZE for num_parents
-
-From: Marijn Suijten <marijn.suijten@somainline.org>
-
-[ Upstream commit 00ff818888fd436b687dbef457ea5a9135c60b15 ]
-
-Where possible, use ARRAY_SIZE to determine the number of parents in
-clk_parent_data instead of hardcoding a number that relies on an array
-defined hundreds of lines above.
-
-Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
-Link: https://lore.kernel.org/r/20210829204822.289829-2-marijn.suijten@somainline.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
-Stable-dep-of: 6956c18f4ad9 ("clk: qcom: gcc-sdm660: Use floor ops for SDCC1 clock")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/clk/qcom/gcc-sdm660.c | 80 +++++++++++++++++------------------
- 1 file changed, 40 insertions(+), 40 deletions(-)
-
-diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
-index b8e59b4f0bde..95712cf38bab 100644
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -284,7 +284,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -309,7 +309,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup1_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -323,7 +323,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -337,7 +337,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup2_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -351,7 +351,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -365,7 +365,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup3_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -379,7 +379,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -393,7 +393,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup4_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -426,7 +426,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_uart1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -440,7 +440,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_uart2_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -454,7 +454,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -468,7 +468,7 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup1_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -482,7 +482,7 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -496,7 +496,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup2_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -510,7 +510,7 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -524,7 +524,7 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup3_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -538,7 +538,7 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -552,7 +552,7 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup4_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -566,7 +566,7 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_uart1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -580,7 +580,7 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_uart2_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -601,7 +601,7 @@ static struct clk_rcg2 gp1_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp1_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -615,7 +615,7 @@ static struct clk_rcg2 gp2_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp2_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -629,7 +629,7 @@ static struct clk_rcg2 gp3_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp3_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -649,7 +649,7 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_gpll0_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -670,7 +670,7 @@ static struct clk_rcg2 hmss_gpll4_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_gpll4_clk_src",
- .parent_data = gcc_parent_data_xo_gpll4,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll4),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -689,7 +689,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_rbcpr_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -708,7 +708,7 @@ static struct clk_rcg2 pdm2_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "pdm2_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -730,7 +730,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "qspi_ser_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
-- .num_parents = 6,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -756,7 +756,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -778,7 +778,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc1_ice_core_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -804,7 +804,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc2_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4),
- .ops = &clk_rcg2_floor_ops,
- },
- };
-@@ -827,7 +827,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_axi_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -848,7 +848,7 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_ice_core_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -862,7 +862,7 @@ static struct clk_rcg2 ufs_phy_aux_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_phy_aux_clk_src",
- .parent_data = gcc_parent_data_xo_sleep_clk,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -883,7 +883,7 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_unipro_core_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -904,7 +904,7 @@ static struct clk_rcg2 usb20_master_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb20_master_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -924,7 +924,7 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb20_mock_utmi_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -949,7 +949,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_master_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -970,7 +970,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_mock_utmi_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -990,7 +990,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb3_phy_aux_clk_src",
- .parent_data = gcc_parent_data_xo_sleep_clk,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
- .ops = &clk_rcg2_ops,
- },
- };
---
-2.35.1
-
+++ /dev/null
-From 6a2334df85eec46ed28495ea50e595590668b866 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 14 Jul 2022 22:38:22 +0200
-Subject: clk: qcom: gcc-sdm660: Use floor ops for SDCC1 clock
-
-From: Marijn Suijten <marijn.suijten@somainline.org>
-
-[ Upstream commit 6956c18f4ad9200aa945f7ea37d65a05afc49d51 ]
-
-In commit 3f905469c8ce ("clk: qcom: gcc: Use floor ops for SDCC clocks")
-floor ops were applied to SDCC2 only, but flooring is also required on
-the SDCC1 apps clock which is used by the eMMC card on Sony's Nile
-platform, and otherwise result in the typicial "Card appears
-overclocked" warnings observed on many other platforms before:
-
- mmc0: Card appears overclocked; req 52000000 Hz, actual 100000000 Hz
- mmc0: Card appears overclocked; req 52000000 Hz, actual 100000000 Hz
- mmc0: Card appears overclocked; req 104000000 Hz, actual 192000000 Hz
-
-Fixes: f2a76a2955c0 ("clk: qcom: Add Global Clock controller (GCC) driver for SDM660")
-Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
-Tested-by: Alexey Minnekhanov <alexeymin@postmarketos.org>
-Reviewed-by: Stephen Boyd <sboyd@kernel.org>
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
-Link: https://lore.kernel.org/r/20220714203822.186448-1-marijn.suijten@somainline.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/clk/qcom/gcc-sdm660.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
-index 95712cf38bab..905705559f59 100644
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -757,7 +757,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
- .name = "sdcc1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
- .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
-- .ops = &clk_rcg2_ops,
-+ .ops = &clk_rcg2_floor_ops,
- },
- };
-
---
-2.35.1
-
+++ /dev/null
-From d05bdb66e941d57a57168e4eb9f769add88a7afd Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 1 Apr 2021 17:38:14 +0300
-Subject: KVM: x86: pending exceptions must not be blocked by an injected event
-
-From: Maxim Levitsky <mlevitsk@redhat.com>
-
-[ Upstream commit 4020da3b9f0c7e403b654c43da989f8c0bb05b57 ]
-
-Injected interrupts/nmi should not block a pending exception,
-but rather be either lost if nested hypervisor doesn't
-intercept the pending exception (as in stock x86), or be delivered
-in exitintinfo/IDT_VECTORING_INFO field, as a part of a VMexit
-that corresponds to the pending exception.
-
-The only reason for an exception to be blocked is when nested run
-is pending (and that can't really happen currently
-but still worth checking for).
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Message-Id: <20210401143817.1030695-2-mlevitsk@redhat.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Stable-dep-of: 8d178f460772 ("KVM: nVMX: Treat General Detect #DB (DR7.GD=1) as fault-like")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/kvm/svm/nested.c | 8 +++++++-
- arch/x86/kvm/vmx/nested.c | 10 ++++++++--
- 2 files changed, 15 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
-index e7feaa7910ab..5277edd4825f 100644
---- a/arch/x86/kvm/svm/nested.c
-+++ b/arch/x86/kvm/svm/nested.c
-@@ -1025,7 +1025,13 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
- }
-
- if (vcpu->arch.exception.pending) {
-- if (block_nested_events)
-+ /*
-+ * Only a pending nested run can block a pending exception.
-+ * Otherwise an injected NMI/interrupt should either be
-+ * lost or delivered to the nested hypervisor in the EXITINTINFO
-+ * vmcb field, while delivering the pending exception.
-+ */
-+ if (svm->nested.nested_run_pending)
- return -EBUSY;
- if (!nested_exit_on_exception(svm))
- return 0;
-diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
-index 7f15e2b2a0d6..313ace5dc75b 100644
---- a/arch/x86/kvm/vmx/nested.c
-+++ b/arch/x86/kvm/vmx/nested.c
-@@ -3862,9 +3862,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
-
- /*
- * Process any exceptions that are not debug traps before MTF.
-+ *
-+ * Note that only a pending nested run can block a pending exception.
-+ * Otherwise an injected NMI/interrupt should either be
-+ * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
-+ * while delivering the pending exception.
- */
-+
- if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
-- if (block_nested_events)
-+ if (vmx->nested.nested_run_pending)
- return -EBUSY;
- if (!nested_vmx_check_exception(vcpu, &exit_qual))
- goto no_vmexit;
-@@ -3881,7 +3887,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
- }
-
- if (vcpu->arch.exception.pending) {
-- if (block_nested_events)
-+ if (vmx->nested.nested_run_pending)
- return -EBUSY;
- if (!nested_vmx_check_exception(vcpu, &exit_qual))
- goto no_vmexit;
---
-2.35.1
-
+++ /dev/null
-From 9440a65e8e13ed8cb353b44182ce6c51d154e08a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 6 May 2022 11:12:59 -0700
-Subject: net: wwan: t7xx: Add control DMA interface
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Haijun Liu <haijun.liu@mediatek.com>
-
-[ Upstream commit 39d439047f1dc88f98b755d6f3a53a4ef8f0de21 ]
-
-Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control
-path of Host-Modem data transfers. CLDMA HIF layer provides a common
-interface to the Port Layer.
-
-CLDMA manages 8 independent RX/TX physical channels with data flow
-control in HW queues. CLDMA uses ring buffers of General Packet
-Descriptors (GPD) for TX/RX. GPDs can represent multiple or single
-data buffers (DB).
-
-CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA
-interrupts, and initializes CLDMA HW registers.
-
-CLDMA TX flow:
-1. Port Layer write
-2. Get DB address
-3. Configure GPD
-4. Triggering processing via HW register write
-
-CLDMA RX flow:
-1. CLDMA HW sends a RX "done" to host
-2. Driver starts thread to safely read GPD
-3. DB is sent to Port layer
-4. Create a new buffer for GPD ring
-
-Note: This patch does not enable compilation since it has dependencies
-such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and
-struct t7xx_pci_dev which are added by the core patch.
-
-Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
-Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
-Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
-Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
-Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
-Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
-Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Stable-dep-of: 2ac6cdd581f4 ("drm/dp_mst: fix drm_dp_dpcd_read return value checks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/wwan/t7xx/t7xx_cldma.c | 281 ++++++
- drivers/net/wwan/t7xx/t7xx_cldma.h | 180 ++++
- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 1192 ++++++++++++++++++++++++
- drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 126 +++
- drivers/net/wwan/t7xx/t7xx_reg.h | 33 +
- 5 files changed, 1812 insertions(+)
- create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.c
- create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.h
- create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.c
- create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.h
- create mode 100644 drivers/net/wwan/t7xx/t7xx_reg.h
-
-diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
-new file mode 100644
-index 000000000000..9f43f256db1d
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
-@@ -0,0 +1,281 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ */
-+
-+#include <linux/bits.h>
-+#include <linux/delay.h>
-+#include <linux/io.h>
-+#include <linux/io-64-nonatomic-lo-hi.h>
-+#include <linux/types.h>
-+
-+#include "t7xx_cldma.h"
-+
-+#define ADDR_SIZE 8
-+
-+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
-+{
-+ u32 val;
-+
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
-+ val |= IP_BUSY_WAKEUP;
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
-+}
-+
-+/**
-+ * t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
-+ * @hw_info: Pointer to struct t7xx_cldma_hw.
-+ *
-+ * Restore HW after resume. Writes uplink configuration for CLDMA HW.
-+ */
-+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
-+{
-+ u32 ul_cfg;
-+
-+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
-+
-+ if (hw_info->hw_mode == MODE_BIT_64)
-+ ul_cfg |= UL_CFG_BIT_MODE_64;
-+ else if (hw_info->hw_mode == MODE_BIT_40)
-+ ul_cfg |= UL_CFG_BIT_MODE_40;
-+ else if (hw_info->hw_mode == MODE_BIT_36)
-+ ul_cfg |= UL_CFG_BIT_MODE_36;
-+
-+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ /* Disable TX and RX invalid address check */
-+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
-+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
-+}
-+
-+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val, reg);
-+}
-+
-+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
-+{
-+ /* Enable the TX & RX interrupts */
-+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
-+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
-+ /* Enable the empty queue interrupt */
-+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
-+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
-+}
-+
-+void t7xx_cldma_hw_reset(void __iomem *ao_base)
-+{
-+ u32 val;
-+
-+ val = ioread32(ao_base + REG_INFRA_RST2_SET);
-+ val |= RST2_PMIC_SW_RST_SET;
-+ iowrite32(val, ao_base + REG_INFRA_RST2_SET);
-+ val = ioread32(ao_base + REG_INFRA_RST4_SET);
-+ val |= RST4_CLDMA1_SW_RST_SET;
-+ iowrite32(val, ao_base + REG_INFRA_RST4_SET);
-+ udelay(1);
-+
-+ val = ioread32(ao_base + REG_INFRA_RST4_CLR);
-+ val |= RST4_CLDMA1_SW_RST_CLR;
-+ iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
-+ val = ioread32(ao_base + REG_INFRA_RST2_CLR);
-+ val |= RST2_PMIC_SW_RST_CLR;
-+ iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
-+}
-+
-+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
-+{
-+ u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
-+
-+ return ioread64(hw_info->ap_pdn_base + offset);
-+}
-+
-+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
-+ enum mtk_txrx tx_rx)
-+{
-+ u32 offset = qno * ADDR_SIZE;
-+ void __iomem *reg;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
-+ iowrite64(address, reg + offset);
-+}
-+
-+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *base = hw_info->ap_pdn_base;
-+
-+ if (tx_rx == MTK_RX)
-+ iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
-+ else
-+ iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
-+}
-+
-+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 mask, val;
-+
-+ mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
-+ val = ioread32(reg);
-+
-+ return val & mask;
-+}
-+
-+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
-+{
-+ unsigned int ch_id;
-+
-+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+ ch_id &= bitmask;
-+ /* Clear the ch IDs in the TX interrupt status register */
-+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+}
-+
-+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
-+{
-+ unsigned int ch_id;
-+
-+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+ ch_id &= bitmask;
-+ /* Clear the ch IDs in the RX interrupt status register */
-+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+}
-+
-+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
-+ val = ioread32(reg);
-+ return val & bitmask;
-+}
-+
-+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val, reg);
-+}
-+
-+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
-+}
-+
-+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val, reg);
-+}
-+
-+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
-+}
-+
-+/**
-+ * t7xx_cldma_hw_init() - Initialize CLDMA HW.
-+ * @hw_info: Pointer to struct t7xx_cldma_hw.
-+ *
-+ * Write uplink and downlink configuration to CLDMA HW.
-+ */
-+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
-+{
-+ u32 ul_cfg, dl_cfg;
-+
-+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
-+ /* Configure the DRAM address mode */
-+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
-+ dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
-+
-+ if (hw_info->hw_mode == MODE_BIT_64) {
-+ ul_cfg |= UL_CFG_BIT_MODE_64;
-+ dl_cfg |= DL_CFG_BIT_MODE_64;
-+ } else if (hw_info->hw_mode == MODE_BIT_40) {
-+ ul_cfg |= UL_CFG_BIT_MODE_40;
-+ dl_cfg |= DL_CFG_BIT_MODE_40;
-+ } else if (hw_info->hw_mode == MODE_BIT_36) {
-+ ul_cfg |= UL_CFG_BIT_MODE_36;
-+ dl_cfg |= DL_CFG_BIT_MODE_36;
-+ }
-+
-+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ dl_cfg |= DL_CFG_UP_HW_LAST;
-+ iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
-+ iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
-+ iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
-+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
-+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
-+}
-+
-+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
-+ iowrite32(CLDMA_ALL_Q, reg);
-+}
-+
-+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
-+ iowrite32(TXRX_STATUS_BITMASK, reg);
-+ iowrite32(EMPTY_STATUS_BITMASK, reg);
-+}
-diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h
-new file mode 100644
-index 000000000000..8949e8377fb0
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_cldma.h
-@@ -0,0 +1,180 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ */
-+
-+#ifndef __T7XX_CLDMA_H__
-+#define __T7XX_CLDMA_H__
-+
-+#include <linux/bits.h>
-+#include <linux/types.h>
-+
-+#define CLDMA_TXQ_NUM 8
-+#define CLDMA_RXQ_NUM 8
-+#define CLDMA_ALL_Q GENMASK(7, 0)
-+
-+/* Interrupt status bits */
-+#define EMPTY_STATUS_BITMASK GENMASK(15, 8)
-+#define TXRX_STATUS_BITMASK GENMASK(7, 0)
-+#define EQ_STA_BIT_OFFSET 8
-+#define L2_INT_BIT_COUNT 16
-+#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK)
-+
-+#define TQ_ERR_INT_BITMASK GENMASK(23, 16)
-+#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
-+
-+#define RQ_ERR_INT_BITMASK GENMASK(23, 16)
-+#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
-+
-+#define CLDMA0_AO_BASE 0x10049000
-+#define CLDMA0_PD_BASE 0x1021d000
-+#define CLDMA1_AO_BASE 0x1004b000
-+#define CLDMA1_PD_BASE 0x1021f000
-+
-+#define CLDMA_R_AO_BASE 0x10023000
-+#define CLDMA_R_PD_BASE 0x1023d000
-+
-+/* CLDMA TX */
-+#define REG_CLDMA_UL_START_ADDRL_0 0x0004
-+#define REG_CLDMA_UL_START_ADDRH_0 0x0008
-+#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044
-+#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048
-+#define REG_CLDMA_UL_STATUS 0x0084
-+#define REG_CLDMA_UL_START_CMD 0x0088
-+#define REG_CLDMA_UL_RESUME_CMD 0x008c
-+#define REG_CLDMA_UL_STOP_CMD 0x0090
-+#define REG_CLDMA_UL_ERROR 0x0094
-+#define REG_CLDMA_UL_CFG 0x0098
-+#define UL_CFG_BIT_MODE_36 BIT(5)
-+#define UL_CFG_BIT_MODE_40 BIT(6)
-+#define UL_CFG_BIT_MODE_64 BIT(7)
-+#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5)
-+
-+#define REG_CLDMA_UL_MEM 0x009c
-+#define UL_MEM_CHECK_DIS BIT(0)
-+
-+/* CLDMA RX */
-+#define REG_CLDMA_DL_START_CMD 0x05bc
-+#define REG_CLDMA_DL_RESUME_CMD 0x05c0
-+#define REG_CLDMA_DL_STOP_CMD 0x05c4
-+#define REG_CLDMA_DL_MEM 0x0508
-+#define DL_MEM_CHECK_DIS BIT(0)
-+
-+#define REG_CLDMA_DL_CFG 0x0404
-+#define DL_CFG_UP_HW_LAST BIT(2)
-+#define DL_CFG_BIT_MODE_36 BIT(10)
-+#define DL_CFG_BIT_MODE_40 BIT(11)
-+#define DL_CFG_BIT_MODE_64 BIT(12)
-+#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10)
-+
-+#define REG_CLDMA_DL_START_ADDRL_0 0x0478
-+#define REG_CLDMA_DL_START_ADDRH_0 0x047c
-+#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8
-+#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc
-+#define REG_CLDMA_DL_STATUS 0x04f8
-+
-+/* CLDMA MISC */
-+#define REG_CLDMA_L2TISAR0 0x0810
-+#define REG_CLDMA_L2TISAR1 0x0814
-+#define REG_CLDMA_L2TIMR0 0x0818
-+#define REG_CLDMA_L2TIMR1 0x081c
-+#define REG_CLDMA_L2TIMCR0 0x0820
-+#define REG_CLDMA_L2TIMCR1 0x0824
-+#define REG_CLDMA_L2TIMSR0 0x0828
-+#define REG_CLDMA_L2TIMSR1 0x082c
-+#define REG_CLDMA_L3TISAR0 0x0830
-+#define REG_CLDMA_L3TISAR1 0x0834
-+#define REG_CLDMA_L2RISAR0 0x0850
-+#define REG_CLDMA_L2RISAR1 0x0854
-+#define REG_CLDMA_L3RISAR0 0x0870
-+#define REG_CLDMA_L3RISAR1 0x0874
-+#define REG_CLDMA_IP_BUSY 0x08b4
-+#define IP_BUSY_WAKEUP BIT(0)
-+#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0)
-+#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0)
-+
-+/* CLDMA MISC */
-+#define REG_CLDMA_L2RIMR0 0x0858
-+#define REG_CLDMA_L2RIMR1 0x085c
-+#define REG_CLDMA_L2RIMCR0 0x0860
-+#define REG_CLDMA_L2RIMCR1 0x0864
-+#define REG_CLDMA_L2RIMSR0 0x0868
-+#define REG_CLDMA_L2RIMSR1 0x086c
-+#define REG_CLDMA_BUSY_MASK 0x0954
-+#define BUSY_MASK_PCIE BIT(0)
-+#define BUSY_MASK_AP BIT(1)
-+#define BUSY_MASK_MD BIT(2)
-+
-+#define REG_CLDMA_INT_MASK 0x0960
-+
-+/* CLDMA RESET */
-+#define REG_INFRA_RST4_SET 0x0730
-+#define RST4_CLDMA1_SW_RST_SET BIT(20)
-+
-+#define REG_INFRA_RST4_CLR 0x0734
-+#define RST4_CLDMA1_SW_RST_CLR BIT(20)
-+
-+#define REG_INFRA_RST2_SET 0x0140
-+#define RST2_PMIC_SW_RST_SET BIT(18)
-+
-+#define REG_INFRA_RST2_CLR 0x0144
-+#define RST2_PMIC_SW_RST_CLR BIT(18)
-+
-+enum mtk_txrx {
-+ MTK_TX,
-+ MTK_RX,
-+};
-+
-+enum t7xx_hw_mode {
-+ MODE_BIT_32,
-+ MODE_BIT_36,
-+ MODE_BIT_40,
-+ MODE_BIT_64,
-+};
-+
-+struct t7xx_cldma_hw {
-+ enum t7xx_hw_mode hw_mode;
-+ void __iomem *ap_ao_base;
-+ void __iomem *ap_pdn_base;
-+ u32 phy_interrupt_id;
-+};
-+
-+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx);
-+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info);
-+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info);
-+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
-+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
-+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info,
-+ unsigned int qno, u64 address, enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_reset(void __iomem *ao_base);
-+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
-+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info);
-+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info);
-+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
-+#endif
-diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-new file mode 100644
-index 000000000000..c756b1d0b519
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-@@ -0,0 +1,1192 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ *
-+ * Contributors:
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ */
-+
-+#include <linux/bits.h>
-+#include <linux/bitops.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/dmapool.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma-direction.h>
-+#include <linux/gfp.h>
-+#include <linux/io.h>
-+#include <linux/io-64-nonatomic-lo-hi.h>
-+#include <linux/iopoll.h>
-+#include <linux/irqreturn.h>
-+#include <linux/kernel.h>
-+#include <linux/kthread.h>
-+#include <linux/list.h>
-+#include <linux/netdevice.h>
-+#include <linux/pci.h>
-+#include <linux/sched.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+#include <linux/types.h>
-+#include <linux/wait.h>
-+#include <linux/workqueue.h>
-+
-+#include "t7xx_cldma.h"
-+#include "t7xx_hif_cldma.h"
-+#include "t7xx_mhccif.h"
-+#include "t7xx_pci.h"
-+#include "t7xx_pcie_mac.h"
-+#include "t7xx_reg.h"
-+#include "t7xx_state_monitor.h"
-+
-+#define MAX_TX_BUDGET 16
-+#define MAX_RX_BUDGET 16
-+
-+#define CHECK_Q_STOP_TIMEOUT_US 1000000
-+#define CHECK_Q_STOP_STEP_US 10000
-+
-+#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */
-+
-+static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
-+ enum mtk_txrx tx_rx, unsigned int index)
-+{
-+ queue->dir = tx_rx;
-+ queue->index = index;
-+ queue->md_ctrl = md_ctrl;
-+ queue->tr_ring = NULL;
-+ queue->tr_done = NULL;
-+ queue->tx_next = NULL;
-+}
-+
-+static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
-+ enum mtk_txrx tx_rx, unsigned int index)
-+{
-+ md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
-+ init_waitqueue_head(&queue->req_wq);
-+ spin_lock_init(&queue->ring_lock);
-+}
-+
-+static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
-+{
-+ gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
-+ gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
-+}
-+
-+static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
-+{
-+ gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
-+ gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
-+}
-+
-+static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
-+ size_t size)
-+{
-+ req->skb = __dev_alloc_skb(size, GFP_KERNEL);
-+ if (!req->skb)
-+ return -ENOMEM;
-+
-+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data,
-+ skb_data_area_size(req->skb), DMA_FROM_DEVICE);
-+ if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
-+ dev_kfree_skb_any(req->skb);
-+ req->skb = NULL;
-+ req->mapped_buff = 0;
-+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ unsigned int hwo_polling_count = 0;
-+ struct t7xx_cldma_hw *hw_info;
-+ bool rx_not_done = true;
-+ unsigned long flags;
-+ int count = 0;
-+
-+ hw_info = &md_ctrl->hw_info;
-+
-+ do {
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ struct sk_buff *skb;
-+ int ret;
-+
-+ req = queue->tr_done;
-+ if (!req)
-+ return -ENODATA;
-+
-+ gpd = req->gpd;
-+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
-+ dma_addr_t gpd_addr;
-+
-+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
-+ dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
-+ return -ENODEV;
-+ }
-+
-+ gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
-+ queue->index * sizeof(u64));
-+ if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
-+ return 0;
-+
-+ udelay(1);
-+ continue;
-+ }
-+
-+ hwo_polling_count = 0;
-+ skb = req->skb;
-+
-+ if (req->mapped_buff) {
-+ dma_unmap_single(md_ctrl->dev, req->mapped_buff,
-+ skb_data_area_size(skb), DMA_FROM_DEVICE);
-+ req->mapped_buff = 0;
-+ }
-+
-+ skb->len = 0;
-+ skb_reset_tail_pointer(skb);
-+ skb_put(skb, le16_to_cpu(gpd->data_buff_len));
-+
-+ ret = md_ctrl->recv_skb(queue, skb);
-+ /* Break processing, will try again later */
-+ if (ret < 0)
-+ return ret;
-+
-+ req->skb = NULL;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
-+
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+ req = queue->rx_refill;
-+
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
-+ if (ret)
-+ return ret;
-+
-+ gpd = req->gpd;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
-+ gpd->data_buff_len = 0;
-+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
-+
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ rx_not_done = ++count < budget || !need_resched();
-+ } while (rx_not_done);
-+
-+ *over_budget = true;
-+ return 0;
-+}
-+
-+static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct t7xx_cldma_hw *hw_info;
-+ unsigned int pending_rx_int;
-+ bool over_budget = false;
-+ unsigned long flags;
-+ int ret;
-+
-+ hw_info = &md_ctrl->hw_info;
-+
-+ do {
-+ ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
-+ if (ret == -ENODATA)
-+ return 0;
-+ else if (ret)
-+ return ret;
-+
-+ pending_rx_int = 0;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->rxq_active & BIT(queue->index)) {
-+ if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
-+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
-+
-+ pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
-+ MTK_RX);
-+ if (pending_rx_int) {
-+ t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
-+
-+ if (over_budget) {
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ return -EAGAIN;
-+ }
-+ }
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ } while (pending_rx_int);
-+
-+ return 0;
-+}
-+
-+static void t7xx_cldma_rx_done(struct work_struct *work)
-+{
-+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ int value;
-+
-+ value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
-+ if (value && md_ctrl->rxq_active & BIT(queue->index)) {
-+ queue_work(queue->worker, &queue->cldma_work);
-+ return;
-+ }
-+
-+ t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
-+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
-+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
-+}
-+
-+static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ unsigned int dma_len, count = 0;
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ unsigned long flags;
-+ dma_addr_t dma_free;
-+ struct sk_buff *skb;
-+
-+ while (!kthread_should_stop()) {
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ req = queue->tr_done;
-+ if (!req) {
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+ break;
-+ }
-+ gpd = req->gpd;
-+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+ break;
-+ }
-+ queue->budget++;
-+ dma_free = req->mapped_buff;
-+ dma_len = le16_to_cpu(gpd->data_buff_len);
-+ skb = req->skb;
-+ req->skb = NULL;
-+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ count++;
-+ dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
-+ dev_kfree_skb_any(skb);
-+ }
-+
-+ if (count)
-+ wake_up_nr(&queue->req_wq, count);
-+
-+ return count;
-+}
-+
-+static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct cldma_request *req;
-+ dma_addr_t ul_curr_addr;
-+ unsigned long flags;
-+ bool pending_gpd;
-+
-+ if (!(md_ctrl->txq_active & BIT(queue->index)))
-+ return;
-+
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (pending_gpd) {
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+
-+ /* Check current processing TGPD, 64-bit address is in a table by Q index */
-+ ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
-+ queue->index * sizeof(u64));
-+ if (req->gpd_addr != ul_curr_addr) {
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
-+ md_ctrl->hif_id, queue->index);
-+ return;
-+ }
-+
-+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static void t7xx_cldma_tx_done(struct work_struct *work)
-+{
-+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct t7xx_cldma_hw *hw_info;
-+ unsigned int l2_tx_int;
-+ unsigned long flags;
-+
-+ hw_info = &md_ctrl->hw_info;
-+ t7xx_cldma_gpd_tx_collect(queue);
-+ l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
-+ MTK_TX);
-+ if (l2_tx_int & EQ_STA_BIT(queue->index)) {
-+ t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
-+ t7xx_cldma_txq_empty_hndl(queue);
-+ }
-+
-+ if (l2_tx_int & BIT(queue->index)) {
-+ t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
-+ queue_work(queue->worker, &queue->cldma_work);
-+ return;
-+ }
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->txq_active & BIT(queue->index)) {
-+ t7xx_cldma_clear_ip_busy(hw_info);
-+ t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
-+ t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
-+ struct cldma_ring *ring, enum dma_data_direction tx_rx)
-+{
-+ struct cldma_request *req_cur, *req_next;
-+
-+ list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
-+ if (req_cur->mapped_buff && req_cur->skb) {
-+ dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
-+ skb_data_area_size(req_cur->skb), tx_rx);
-+ req_cur->mapped_buff = 0;
-+ }
-+
-+ dev_kfree_skb_any(req_cur->skb);
-+
-+ if (req_cur->gpd)
-+ dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
-+
-+ list_del(&req_cur->entry);
-+ kfree(req_cur);
-+ }
-+}
-+
-+static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
-+{
-+ struct cldma_request *req;
-+ int val;
-+
-+ req = kzalloc(sizeof(*req), GFP_KERNEL);
-+ if (!req)
-+ return NULL;
-+
-+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
-+ if (!req->gpd)
-+ goto err_free_req;
-+
-+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
-+ if (val)
-+ goto err_free_pool;
-+
-+ return req;
-+
-+err_free_pool:
-+ dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
-+
-+err_free_req:
-+ kfree(req);
-+
-+ return NULL;
-+}
-+
-+static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
-+{
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ int i;
-+
-+ INIT_LIST_HEAD(&ring->gpd_ring);
-+ ring->length = MAX_RX_BUDGET;
-+
-+ for (i = 0; i < ring->length; i++) {
-+ req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
-+ if (!req) {
-+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ gpd = req->gpd;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
-+ gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
-+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
-+ INIT_LIST_HEAD(&req->entry);
-+ list_add_tail(&req->entry, &ring->gpd_ring);
-+ }
-+
-+ /* Link previous GPD to next GPD, circular */
-+ list_for_each_entry(req, &ring->gpd_ring, entry) {
-+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
-+ gpd = req->gpd;
-+ }
-+
-+ return 0;
-+}
-+
-+static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
-+{
-+ struct cldma_request *req;
-+
-+ req = kzalloc(sizeof(*req), GFP_KERNEL);
-+ if (!req)
-+ return NULL;
-+
-+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
-+ if (!req->gpd) {
-+ kfree(req);
-+ return NULL;
-+ }
-+
-+ return req;
-+}
-+
-+static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
-+{
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ int i;
-+
-+ INIT_LIST_HEAD(&ring->gpd_ring);
-+ ring->length = MAX_TX_BUDGET;
-+
-+ for (i = 0; i < ring->length; i++) {
-+ req = t7xx_alloc_tx_request(md_ctrl);
-+ if (!req) {
-+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ gpd = req->gpd;
-+ gpd->flags = GPD_FLAGS_IOC;
-+ INIT_LIST_HEAD(&req->entry);
-+ list_add_tail(&req->entry, &ring->gpd_ring);
-+ }
-+
-+ /* Link previous GPD to next GPD, circular */
-+ list_for_each_entry(req, &ring->gpd_ring, entry) {
-+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
-+ gpd = req->gpd;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
-+ * @queue: Pointer to the queue structure.
-+ *
-+ * Called with ring_lock (unless called during initialization phase)
-+ */
-+static void t7xx_cldma_q_reset(struct cldma_queue *queue)
-+{
-+ struct cldma_request *req;
-+
-+ req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
-+ queue->tr_done = req;
-+ queue->budget = queue->tr_ring->length;
-+
-+ if (queue->dir == MTK_TX)
-+ queue->tx_next = req;
-+ else
-+ queue->rx_refill = req;
-+}
-+
-+static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+
-+ queue->dir = MTK_RX;
-+ queue->tr_ring = &md_ctrl->rx_ring[queue->index];
-+ t7xx_cldma_q_reset(queue);
-+}
-+
-+static void t7xx_cldma_txq_init(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+
-+ queue->dir = MTK_TX;
-+ queue->tr_ring = &md_ctrl->tx_ring[queue->index];
-+ t7xx_cldma_q_reset(queue);
-+}
-+
-+static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
-+}
-+
-+static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
-+}
-+
-+static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
-+{
-+ unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ int i;
-+
-+ /* L2 raw interrupt status */
-+ l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+ l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+ l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
-+ l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
-+ l2_tx_int &= ~l2_tx_int_msk;
-+ l2_rx_int &= ~l2_rx_int_msk;
-+
-+ if (l2_tx_int) {
-+ if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
-+ /* Read and clear L3 TX interrupt status */
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
-+ }
-+
-+ t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
-+ if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
-+ for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
-+ if (i < CLDMA_TXQ_NUM) {
-+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
-+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
-+ queue_work(md_ctrl->txq[i].worker,
-+ &md_ctrl->txq[i].cldma_work);
-+ } else {
-+ t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
-+ }
-+ }
-+ }
-+ }
-+
-+ if (l2_rx_int) {
-+ if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
-+ /* Read and clear L3 RX interrupt status */
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
-+ }
-+
-+ t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
-+ if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
-+ l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
-+ for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
-+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
-+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
-+ queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
-+ }
-+ }
-+ }
-+}
-+
-+static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ unsigned int tx_active;
-+ unsigned int rx_active;
-+
-+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
-+ return false;
-+
-+ tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
-+ rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
-+
-+ return tx_active || rx_active;
-+}
-+
-+/**
-+ * t7xx_cldma_stop() - Stop CLDMA.
-+ * @md_ctrl: CLDMA context structure.
-+ *
-+ * Stop TX and RX queues. Disable L1 and L2 interrupts.
-+ * Clear status registers.
-+ *
-+ * Return:
-+ * * 0 - Success.
-+ * * -ERROR - Error code from polling cldma_queues_active.
-+ */
-+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ bool active;
-+ int i, ret;
-+
-+ md_ctrl->rxq_active = 0;
-+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
-+ md_ctrl->txq_active = 0;
-+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
-+ md_ctrl->txq_started = 0;
-+ t7xx_cldma_disable_irq(md_ctrl);
-+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
-+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
-+ t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
-+ t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
-+
-+ if (md_ctrl->is_late_init) {
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ flush_work(&md_ctrl->txq[i].cldma_work);
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
-+ flush_work(&md_ctrl->rxq[i].cldma_work);
-+ }
-+
-+ ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
-+ CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
-+ if (ret)
-+ dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
-+
-+ return ret;
-+}
-+
-+static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
-+{
-+ int i;
-+
-+ if (!md_ctrl->is_late_init)
-+ return;
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
-+
-+ dma_pool_destroy(md_ctrl->gpd_dmapool);
-+ md_ctrl->gpd_dmapool = NULL;
-+ md_ctrl->is_late_init = false;
-+}
-+
-+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
-+{
-+ unsigned long flags;
-+ int i;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ md_ctrl->txq_active = 0;
-+ md_ctrl->rxq_active = 0;
-+ t7xx_cldma_disable_irq(md_ctrl);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ cancel_work_sync(&md_ctrl->txq[i].cldma_work);
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ }
-+
-+ t7xx_cldma_late_release(md_ctrl);
-+}
-+
-+/**
-+ * t7xx_cldma_start() - Start CLDMA.
-+ * @md_ctrl: CLDMA context structure.
-+ *
-+ * Set TX/RX start address.
-+ * Start all RX queues and enable L2 interrupt.
-+ */
-+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->is_late_init) {
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ int i;
-+
-+ t7xx_cldma_enable_irq(md_ctrl);
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ if (md_ctrl->txq[i].tr_done)
-+ t7xx_cldma_hw_set_start_addr(hw_info, i,
-+ md_ctrl->txq[i].tr_done->gpd_addr,
-+ MTK_TX);
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ if (md_ctrl->rxq[i].tr_done)
-+ t7xx_cldma_hw_set_start_addr(hw_info, i,
-+ md_ctrl->rxq[i].tr_done->gpd_addr,
-+ MTK_RX);
-+ }
-+
-+ /* Enable L2 interrupt */
-+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
-+ t7xx_cldma_hw_start(hw_info);
-+ md_ctrl->txq_started = 0;
-+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
-+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
-+{
-+ struct cldma_queue *txq = &md_ctrl->txq[qnum];
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&txq->ring_lock, flags);
-+ t7xx_cldma_q_reset(txq);
-+ list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
-+ gpd = req->gpd;
-+ gpd->flags &= ~GPD_FLAGS_HWO;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
-+ gpd->data_buff_len = 0;
-+ dev_kfree_skb_any(req->skb);
-+ req->skb = NULL;
-+ }
-+ spin_unlock_irqrestore(&txq->ring_lock, flags);
-+}
-+
-+static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
-+{
-+ struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ unsigned long flags;
-+ int ret = 0;
-+
-+ spin_lock_irqsave(&rxq->ring_lock, flags);
-+ t7xx_cldma_q_reset(rxq);
-+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
-+ gpd = req->gpd;
-+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
-+ gpd->data_buff_len = 0;
-+
-+ if (req->skb) {
-+ req->skb->len = 0;
-+ skb_reset_tail_pointer(req->skb);
-+ }
-+ }
-+
-+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
-+ if (req->skb)
-+ continue;
-+
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
-+ if (ret)
-+ break;
-+
-+ t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
-+ }
-+ spin_unlock_irqrestore(&rxq->ring_lock, flags);
-+
-+ return ret;
-+}
-+
-+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
-+{
-+ int i;
-+
-+ if (tx_rx == MTK_TX) {
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ t7xx_cldma_clear_txq(md_ctrl, i);
-+ } else {
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
-+ t7xx_cldma_clear_rxq(md_ctrl, i);
-+ }
-+}
-+
-+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
-+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
-+ if (tx_rx == MTK_RX)
-+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
-+ else
-+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
-+ t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
-+ struct sk_buff *skb)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct cldma_gpd *gpd = tx_req->gpd;
-+ unsigned long flags;
-+
-+ /* Update GPD */
-+ tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
-+
-+ if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
-+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
-+ gpd->data_buff_len = cpu_to_le16(skb->len);
-+
-+ /* This lock must cover TGPD setting, as even without a resume operation,
-+ * CLDMA can send next HWO=1 if last TGPD just finished.
-+ */
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->txq_active & BIT(queue->index))
-+ gpd->flags |= GPD_FLAGS_HWO;
-+
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ tx_req->skb = skb;
-+ return 0;
-+}
-+
-+/* Called with cldma_lock */
-+static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
-+ struct cldma_request *prev_req)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+
-+ /* Check whether the device was powered off (CLDMA start address is not set) */
-+ if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
-+ t7xx_cldma_hw_init(hw_info);
-+ t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
-+ md_ctrl->txq_started &= ~BIT(qno);
-+ }
-+
-+ if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
-+ if (md_ctrl->txq_started & BIT(qno))
-+ t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
-+ else
-+ t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
-+
-+ md_ctrl->txq_started |= BIT(qno);
-+ }
-+}
-+
-+/**
-+ * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
-+ * @md_ctrl: CLDMA context structure.
-+ * @recv_skb: Receiving skb callback.
-+ */
-+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
-+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
-+{
-+ md_ctrl->recv_skb = recv_skb;
-+}
-+
-+/**
-+ * t7xx_cldma_send_skb() - Send control data to modem.
-+ * @md_ctrl: CLDMA context structure.
-+ * @qno: Queue number.
-+ * @skb: Socket buffer.
-+ *
-+ * Return:
-+ * * 0 - Success.
-+ * * -ENOMEM - Allocation failure.
-+ * * -EINVAL - Invalid queue request.
-+ * * -EIO - Queue is not active.
-+ * * -ETIMEDOUT - Timeout waiting for the device to wake up.
-+ */
-+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
-+{
-+ struct cldma_request *tx_req;
-+ struct cldma_queue *queue;
-+ unsigned long flags;
-+ int ret;
-+
-+ if (qno >= CLDMA_TXQ_NUM)
-+ return -EINVAL;
-+
-+ queue = &md_ctrl->txq[qno];
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (!(md_ctrl->txq_active & BIT(qno))) {
-+ ret = -EIO;
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ goto allow_sleep;
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ do {
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ tx_req = queue->tx_next;
-+ if (queue->budget > 0 && !tx_req->skb) {
-+ struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
-+
-+ queue->budget--;
-+ t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
-+ queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ /* Protect the access to the modem for queues operations (resume/start)
-+ * which access shared locations by all the queues.
-+ * cldma_lock is independent of ring_lock which is per queue.
-+ */
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ break;
-+ }
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ }
-+
-+ ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
-+ } while (!ret);
-+
-+allow_sleep:
-+ return ret;
-+}
-+
-+static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
-+{
-+ char dma_pool_name[32];
-+ int i, j, ret;
-+
-+ if (md_ctrl->is_late_init) {
-+ dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
-+ return -EALREADY;
-+ }
-+
-+ snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
-+
-+ md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
-+ sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
-+ if (!md_ctrl->gpd_dmapool) {
-+ dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
-+ if (ret) {
-+ dev_err(md_ctrl->dev, "control TX ring init fail\n");
-+ goto err_free_tx_ring;
-+ }
-+ }
-+
-+ for (j = 0; j < CLDMA_RXQ_NUM; j++) {
-+ md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-+
-+ if (j == CLDMA_RXQ_NUM - 1)
-+ md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-+
-+ ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
-+ if (ret) {
-+ dev_err(md_ctrl->dev, "Control RX ring init fail\n");
-+ goto err_free_rx_ring;
-+ }
-+ }
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ t7xx_cldma_txq_init(&md_ctrl->txq[i]);
-+
-+ for (j = 0; j < CLDMA_RXQ_NUM; j++)
-+ t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
-+
-+ md_ctrl->is_late_init = true;
-+ return 0;
-+
-+err_free_rx_ring:
-+ while (j--)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
-+
-+err_free_tx_ring:
-+ while (i--)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
-+
-+ return ret;
-+}
-+
-+static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
-+{
-+ return addr + phy_addr - addr_trs1;
-+}
-+
-+static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ u32 phy_ao_base, phy_pd_base;
-+
-+ if (md_ctrl->hif_id != CLDMA_ID_MD)
-+ return;
-+
-+ phy_ao_base = CLDMA1_AO_BASE;
-+ phy_pd_base = CLDMA1_PD_BASE;
-+ hw_info->phy_interrupt_id = CLDMA1_INT;
-+ hw_info->hw_mode = MODE_BIT_64;
-+ hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
-+ pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
-+ hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
-+ pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
-+}
-+
-+static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
-+{
-+ dev_kfree_skb_any(skb);
-+ return 0;
-+}
-+
-+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
-+{
-+ struct device *dev = &t7xx_dev->pdev->dev;
-+ struct cldma_ctrl *md_ctrl;
-+
-+ md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
-+ if (!md_ctrl)
-+ return -ENOMEM;
-+
-+ md_ctrl->t7xx_dev = t7xx_dev;
-+ md_ctrl->dev = dev;
-+ md_ctrl->hif_id = hif_id;
-+ md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
-+ t7xx_hw_info_init(md_ctrl);
-+ t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
-+ return 0;
-+}
-+
-+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
-+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
-+ t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
-+ t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
-+ t7xx_cldma_hw_init(hw_info);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
-+{
-+ struct cldma_ctrl *md_ctrl = data;
-+ u32 interrupt;
-+
-+ interrupt = md_ctrl->hw_info.phy_interrupt_id;
-+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
-+ t7xx_cldma_irq_work_cb(md_ctrl);
-+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
-+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
-+ return IRQ_HANDLED;
-+}
-+
-+static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
-+{
-+ int i;
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ if (md_ctrl->txq[i].worker) {
-+ destroy_workqueue(md_ctrl->txq[i].worker);
-+ md_ctrl->txq[i].worker = NULL;
-+ }
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ if (md_ctrl->rxq[i].worker) {
-+ destroy_workqueue(md_ctrl->rxq[i].worker);
-+ md_ctrl->rxq[i].worker = NULL;
-+ }
-+ }
-+}
-+
-+/**
-+ * t7xx_cldma_init() - Initialize CLDMA.
-+ * @md_ctrl: CLDMA context structure.
-+ *
-+ * Initialize HIF TX/RX queue structure.
-+ * Register CLDMA callback ISR with PCIe driver.
-+ *
-+ * Return:
-+ * * 0 - Success.
-+ * * -ERROR - Error code from failure sub-initializations.
-+ */
-+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ int i;
-+
-+ md_ctrl->txq_active = 0;
-+ md_ctrl->rxq_active = 0;
-+ md_ctrl->is_late_init = false;
-+
-+ spin_lock_init(&md_ctrl->cldma_lock);
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
-+ md_ctrl->txq[i].worker =
-+ alloc_workqueue("md_hif%d_tx%d_worker",
-+ WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
-+ 1, md_ctrl->hif_id, i);
-+ if (!md_ctrl->txq[i].worker)
-+ goto err_workqueue;
-+
-+ INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
-+ INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
-+
-+ md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
-+ WQ_UNBOUND | WQ_MEM_RECLAIM,
-+ 1, md_ctrl->hif_id, i);
-+ if (!md_ctrl->rxq[i].worker)
-+ goto err_workqueue;
-+ }
-+
-+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
-+ md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
-+ md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
-+ md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
-+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
-+ return 0;
-+
-+err_workqueue:
-+ t7xx_cldma_destroy_wqs(md_ctrl);
-+ return -ENOMEM;
-+}
-+
-+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_cldma_late_release(md_ctrl);
-+ t7xx_cldma_late_init(md_ctrl);
-+}
-+
-+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_cldma_stop(md_ctrl);
-+ t7xx_cldma_late_release(md_ctrl);
-+ t7xx_cldma_destroy_wqs(md_ctrl);
-+}
-diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
-new file mode 100644
-index 000000000000..deb239e4f803
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
-@@ -0,0 +1,126 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ */
-+
-+#ifndef __T7XX_HIF_CLDMA_H__
-+#define __T7XX_HIF_CLDMA_H__
-+
-+#include <linux/bits.h>
-+#include <linux/device.h>
-+#include <linux/dmapool.h>
-+#include <linux/pci.h>
-+#include <linux/skbuff.h>
-+#include <linux/spinlock.h>
-+#include <linux/wait.h>
-+#include <linux/workqueue.h>
-+#include <linux/types.h>
-+
-+#include "t7xx_cldma.h"
-+#include "t7xx_pci.h"
-+
-+/**
-+ * enum cldma_id - Identifiers for CLDMA HW units.
-+ * @CLDMA_ID_MD: Modem control channel.
-+ * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
-+ * @CLDMA_NUM: Number of CLDMA HW units available.
-+ */
-+enum cldma_id {
-+ CLDMA_ID_MD,
-+ CLDMA_ID_AP,
-+ CLDMA_NUM
-+};
-+
-+struct cldma_gpd {
-+ u8 flags;
-+ u8 not_used1;
-+ __le16 rx_data_allow_len;
-+ __le32 next_gpd_ptr_h;
-+ __le32 next_gpd_ptr_l;
-+ __le32 data_buff_bd_ptr_h;
-+ __le32 data_buff_bd_ptr_l;
-+ __le16 data_buff_len;
-+ __le16 not_used2;
-+};
-+
-+struct cldma_request {
-+ struct cldma_gpd *gpd; /* Virtual address for CPU */
-+ dma_addr_t gpd_addr; /* Physical address for DMA */
-+ struct sk_buff *skb;
-+ dma_addr_t mapped_buff;
-+ struct list_head entry;
-+};
-+
-+struct cldma_ring {
-+ struct list_head gpd_ring; /* Ring of struct cldma_request */
-+ unsigned int length; /* Number of struct cldma_request */
-+ int pkt_size;
-+};
-+
-+struct cldma_queue {
-+ struct cldma_ctrl *md_ctrl;
-+ enum mtk_txrx dir;
-+ unsigned int index;
-+ struct cldma_ring *tr_ring;
-+ struct cldma_request *tr_done;
-+ struct cldma_request *rx_refill;
-+ struct cldma_request *tx_next;
-+ int budget; /* Same as ring buffer size by default */
-+ spinlock_t ring_lock;
-+ wait_queue_head_t req_wq; /* Only for TX */
-+ struct workqueue_struct *worker;
-+ struct work_struct cldma_work;
-+};
-+
-+struct cldma_ctrl {
-+ enum cldma_id hif_id;
-+ struct device *dev;
-+ struct t7xx_pci_dev *t7xx_dev;
-+ struct cldma_queue txq[CLDMA_TXQ_NUM];
-+ struct cldma_queue rxq[CLDMA_RXQ_NUM];
-+ unsigned short txq_active;
-+ unsigned short rxq_active;
-+ unsigned short txq_started;
-+ spinlock_t cldma_lock; /* Protects CLDMA structure */
-+ /* Assumes T/R GPD/BD/SPD have the same size */
-+ struct dma_pool *gpd_dmapool;
-+ struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
-+ struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
-+ struct t7xx_cldma_hw hw_info;
-+ bool is_late_init;
-+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
-+};
-+
-+#define GPD_FLAGS_HWO BIT(0)
-+#define GPD_FLAGS_IOC BIT(7)
-+#define GPD_DMAPOOL_ALIGN 16
-+
-+#define CLDMA_MTU 3584 /* 3.5kB */
-+
-+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
-+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
-+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
-+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
-+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
-+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
-+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
-+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
-+
-+#endif /* __T7XX_HIF_CLDMA_H__ */
-diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
-new file mode 100644
-index 000000000000..7dc6c77a59e3
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
-@@ -0,0 +1,33 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ */
-+
-+#ifndef __T7XX_REG_H__
-+#define __T7XX_REG_H__
-+
-+enum t7xx_int {
-+ DPMAIF_INT,
-+ CLDMA0_INT,
-+ CLDMA1_INT,
-+ CLDMA2_INT,
-+ MHCCIF_INT,
-+ DPMAIF2_INT,
-+ SAP_RGU_INT,
-+ CLDMA3_INT,
-+};
-+
-+#endif /* __T7XX_REG_H__ */
---
-2.35.1
-
+++ /dev/null
-From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001
-From: Yang Yingliang <yangyingliang@huawei.com>
-Date: Thu, 19 May 2022 11:21:08 +0800
-Subject: net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr()
-
-From: Yang Yingliang <yangyingliang@huawei.com>
-
-commit 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 upstream.
-
-Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
-so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass
-the flag.
-
-Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
-Reported-by: Hulk Robot <hulkci@huawei.com>
-Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
-Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
-Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
---- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-@@ -89,9 +89,9 @@ static void t7xx_cldma_gpd_set_next_ptr(
- }
-
- static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
-- size_t size)
-+ size_t size, gfp_t gfp_mask)
- {
-- req->skb = __dev_alloc_skb(size, GFP_KERNEL);
-+ req->skb = __dev_alloc_skb(size, gfp_mask);
- if (!req->skb)
- return -ENOMEM;
-
-@@ -173,7 +173,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru
- spin_unlock_irqrestore(&queue->ring_lock, flags);
- req = queue->rx_refill;
-
-- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
- if (ret)
- return ret;
-
-@@ -396,7 +396,7 @@ static struct cldma_request *t7xx_alloc_
- if (!req->gpd)
- goto err_free_req;
-
-- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
-+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
- if (val)
- goto err_free_pool;
-
-@@ -793,7 +793,7 @@ static int t7xx_cldma_clear_rxq(struct c
- if (req->skb)
- continue;
-
-- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
- if (ret)
- break;
-
+++ /dev/null
-From dc4771f7058cd7e5d9fa85c421534d86e6116ee4 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 20 May 2021 13:50:44 +0000
-Subject: powerpc/lib/code-patching: Don't use struct 'ppc_inst' for runnable
- code in tests.
-
-From: Christophe Leroy <christophe.leroy@csgroup.eu>
-
-[ Upstream commit e90a21ea801d1776d9a786ad02354fd3fe23ce09 ]
-
-'struct ppc_inst' is meant to represent an instruction internally, it
-is not meant to dereference code in memory.
-
-For testing code patching, use patch_instruction() to properly
-write into memory the code to be tested.
-
-Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/d8425fb42a4adebc35b7509f121817eeb02fac31.1621516826.git.christophe.leroy@csgroup.eu
-Stable-dep-of: 97f88a3d7231 ("powerpc/kprobes: Fix null pointer reference in arch_prepare_kprobe()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 95 ++++++++++++++++++--------------
- 1 file changed, 53 insertions(+), 42 deletions(-)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index a2e4f864b63d..ba3ccadc1a8c 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -422,9 +422,9 @@ static void __init test_branch_iform(void)
- {
- int err;
- struct ppc_inst instr;
-- unsigned long addr;
--
-- addr = (unsigned long)&instr;
-+ u32 tmp[2];
-+ struct ppc_inst *iptr = (struct ppc_inst *)tmp;
-+ unsigned long addr = (unsigned long)tmp;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_iform(ppc_inst(0x48000000)));
-@@ -445,52 +445,57 @@ static void __init test_branch_iform(void)
- check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
-
- /* Absolute branch to 0x100 */
-- instr = ppc_inst(0x48000103);
-- check(instr_is_branch_to_addr(&instr, 0x100));
-+ patch_instruction(iptr, ppc_inst(0x48000103));
-+ check(instr_is_branch_to_addr(iptr, 0x100));
- /* Absolute branch to 0x420fc */
-- instr = ppc_inst(0x480420ff);
-- check(instr_is_branch_to_addr(&instr, 0x420fc));
-+ patch_instruction(iptr, ppc_inst(0x480420ff));
-+ check(instr_is_branch_to_addr(iptr, 0x420fc));
- /* Maximum positive relative branch, + 20MB - 4B */
-- instr = ppc_inst(0x49fffffc);
-- check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
-+ patch_instruction(iptr, ppc_inst(0x49fffffc));
-+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
- /* Smallest negative relative branch, - 4B */
-- instr = ppc_inst(0x4bfffffc);
-- check(instr_is_branch_to_addr(&instr, addr - 4));
-+ patch_instruction(iptr, ppc_inst(0x4bfffffc));
-+ check(instr_is_branch_to_addr(iptr, addr - 4));
- /* Largest negative relative branch, - 32 MB */
-- instr = ppc_inst(0x4a000000);
-- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
-+ patch_instruction(iptr, ppc_inst(0x4a000000));
-+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
-
- /* Branch to self, with link */
-- err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK);
-- check(instr_is_branch_to_addr(&instr, addr));
-+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr));
-
- /* Branch to self - 0x100, with link */
-- err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK);
-- check(instr_is_branch_to_addr(&instr, addr - 0x100));
-+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
-
- /* Branch to self + 0x100, no link */
-- err = create_branch(&instr, &instr, addr + 0x100, 0);
-- check(instr_is_branch_to_addr(&instr, addr + 0x100));
-+ err = create_branch(&instr, iptr, addr + 0x100, 0);
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 MB */
-- err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK);
-- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
-+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
-
- /* Out of range relative negative offset, - 32 MB + 4*/
-- err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK);
-+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
- check(err);
-
- /* Out of range relative positive offset, + 32 MB */
-- err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK);
-+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
- check(err);
-
- /* Unaligned target */
-- err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK);
-+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
- check(err);
-
- /* Check flags are masked correctly */
-- err = create_branch(&instr, &instr, addr, 0xFFFFFFFC);
-- check(instr_is_branch_to_addr(&instr, addr));
-+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr));
- check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
- }
-
-@@ -513,9 +518,10 @@ static void __init test_branch_bform(void)
- int err;
- unsigned long addr;
- struct ppc_inst *iptr, instr;
-+ u32 tmp[2];
- unsigned int flags;
-
-- iptr = &instr;
-+ iptr = (struct ppc_inst *)tmp;
- addr = (unsigned long)iptr;
-
- /* The simplest case, branch to self, no flags */
-@@ -528,39 +534,43 @@ static void __init test_branch_bform(void)
- check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
-
- /* Absolute conditional branch to 0x100 */
-- instr = ppc_inst(0x43ff0103);
-- check(instr_is_branch_to_addr(&instr, 0x100));
-+ patch_instruction(iptr, ppc_inst(0x43ff0103));
-+ check(instr_is_branch_to_addr(iptr, 0x100));
- /* Absolute conditional branch to 0x20fc */
-- instr = ppc_inst(0x43ff20ff);
-- check(instr_is_branch_to_addr(&instr, 0x20fc));
-+ patch_instruction(iptr, ppc_inst(0x43ff20ff));
-+ check(instr_is_branch_to_addr(iptr, 0x20fc));
- /* Maximum positive relative conditional branch, + 32 KB - 4B */
-- instr = ppc_inst(0x43ff7ffc);
-- check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
-+ patch_instruction(iptr, ppc_inst(0x43ff7ffc));
-+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
- /* Smallest negative relative conditional branch, - 4B */
-- instr = ppc_inst(0x43fffffc);
-- check(instr_is_branch_to_addr(&instr, addr - 4));
-+ patch_instruction(iptr, ppc_inst(0x43fffffc));
-+ check(instr_is_branch_to_addr(iptr, addr - 4));
- /* Largest negative relative conditional branch, - 32 KB */
-- instr = ppc_inst(0x43ff8000);
-- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
-+ patch_instruction(iptr, ppc_inst(0x43ff8000));
-+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
-
- /* All condition code bits set & link */
- flags = 0x3ff000 | BRANCH_SET_LINK;
-
- /* Branch to self */
- err = create_cond_branch(&instr, iptr, addr, flags);
-- check(instr_is_branch_to_addr(&instr, addr));
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr));
-
- /* Branch to self - 0x100 */
- err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
-- check(instr_is_branch_to_addr(&instr, addr - 0x100));
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
-
- /* Branch to self + 0x100 */
- err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
-- check(instr_is_branch_to_addr(&instr, addr + 0x100));
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 KB */
- err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
-- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
-
- /* Out of range relative negative offset, - 32 KB + 4*/
- err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
-@@ -576,7 +586,8 @@ static void __init test_branch_bform(void)
-
- /* Check flags are masked correctly */
- err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
-- check(instr_is_branch_to_addr(&instr, addr));
-+ patch_instruction(iptr, instr);
-+ check(instr_is_branch_to_addr(iptr, addr));
- check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
- }
-
---
-2.35.1
-
+++ /dev/null
-From c9869aacea83c0fd66a5d3861130dfbf57d30b73 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 23 Jul 2021 14:39:49 +0300
-Subject: RDMA/rdmavt: Decouple QP and SGE lists allocations
-
-From: Leon Romanovsky <leonro@nvidia.com>
-
-[ Upstream commit 44da3730e046a784d088157175d9418ba60661fc ]
-
-The rdmavt QP has fields that are both needed for the control and data
-path. Such mixed declaration caused to the very specific allocation flow
-with kzalloc_node and SGE list embedded into the struct rvt_qp.
-
-This patch separates QP creation to two: regular memory allocation for the
-control path and specific code for the SGE list, while the access to the
-later is performed through derefenced pointer.
-
-Such pointer and its context are expected to be in the cache, so
-performance difference is expected to be negligible, if any exists.
-
-Link: https://lore.kernel.org/r/f66c1e20ccefba0db3c69c58ca9c897f062b4d1c.1627040189.git.leonro@nvidia.com
-Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
-Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-Stable-dep-of: a3c278807a45 ("RDMA/siw: Fix QP destroy to wait for all references dropped.")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/infiniband/sw/rdmavt/qp.c | 13 ++++++++-----
- include/rdma/rdmavt_qp.h | 2 +-
- 2 files changed, 9 insertions(+), 6 deletions(-)
-
-diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
-index 585a9c76e518..5d300f25ef87 100644
---- a/drivers/infiniband/sw/rdmavt/qp.c
-+++ b/drivers/infiniband/sw/rdmavt/qp.c
-@@ -1073,7 +1073,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
- int err;
- struct rvt_swqe *swq = NULL;
- size_t sz;
-- size_t sg_list_sz;
-+ size_t sg_list_sz = 0;
- struct ib_qp *ret = ERR_PTR(-ENOMEM);
- struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
- void *priv = NULL;
-@@ -1120,8 +1120,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
- if (!swq)
- return ERR_PTR(-ENOMEM);
-
-- sz = sizeof(*qp);
-- sg_list_sz = 0;
- if (init_attr->srq) {
- struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
-
-@@ -1131,10 +1129,13 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
- } else if (init_attr->cap.max_recv_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (init_attr->cap.max_recv_sge - 1);
-- qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
-- rdi->dparms.node);
-+ qp = kzalloc_node(sizeof(*qp), GFP_KERNEL, rdi->dparms.node);
- if (!qp)
- goto bail_swq;
-+ qp->r_sg_list =
-+ kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
-+ if (!qp->r_sg_list)
-+ goto bail_qp;
- qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
-
- RCU_INIT_POINTER(qp->next, NULL);
-@@ -1322,6 +1323,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
-
- bail_qp:
- kfree(qp->s_ack_queue);
-+ kfree(qp->r_sg_list);
- kfree(qp);
-
- bail_swq:
-@@ -1752,6 +1754,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
- kvfree(qp->r_rq.kwq);
- rdi->driver_f.qp_priv_free(rdi, qp);
- kfree(qp->s_ack_queue);
-+ kfree(qp->r_sg_list);
- rdma_destroy_ah_attr(&qp->remote_ah_attr);
- rdma_destroy_ah_attr(&qp->alt_ah_attr);
- free_ud_wq_attr(qp);
-diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
-index 8275954f5ce6..2e58d5e6ac0e 100644
---- a/include/rdma/rdmavt_qp.h
-+++ b/include/rdma/rdmavt_qp.h
-@@ -444,7 +444,7 @@ struct rvt_qp {
- /*
- * This sge list MUST be last. Do not add anything below here.
- */
-- struct rvt_sge r_sg_list[] /* verified SGEs */
-+ struct rvt_sge *r_sg_list /* verified SGEs */
- ____cacheline_aligned_in_smp;
- };
-
---
-2.35.1
-
drm-bridge-avoid-uninitialized-variable-warning.patch
drm-mipi-dsi-detach-devices-when-removing-the-host.patch
drm-bridge-parade-ps8640-fix-regulator-supply-order.patch
-net-wwan-t7xx-add-control-dma-interface.patch
drm-dp_mst-fix-drm_dp_dpcd_read-return-value-checks.patch
drm-pl111-add-of_node_put-when-breaking-out-of-for_e.patch
platform-chrome-fix-double-free-in-chromeos_laptop_p.patch
arm-dts-kirkwood-lsxl-remove-first-ethernet-port.patch
ia64-export-memory_add_physaddr_to_nid-to-fix-cxl-bu.patch
soc-tegra-fuse-drop-kconfig-dependency-on-tegra20_ap.patch
-arm64-dts-qcom-fix-ipq8074-pcie-phy-nodes.patch
arm-dts-exynos-correct-s5k6a3-reset-polarity-on-mida.patch
arm-drop-cmdline_-dependency-on-atags.patch
arm64-ftrace-fix-module-plts-with-mcount.patch
clk-tegra20-fix-refcount-leak-in-tegra20_clock_init.patch
hsi-omap_ssi-fix-refcount-leak-in-ssi_probe.patch
hsi-omap_ssi_port-fix-dma_map_sg-error-check.patch
-clk-qcom-gcc-sdm660-move-parent-tables-after-plls.patch
-clk-qcom-gcc-sdm660-replace-usage-of-parent_names.patch
-clk-qcom-gcc-sdm660-use-array_size-for-num_parents.patch
-clk-qcom-gcc-sdm660-use-floor-ops-for-sdcc1-clock.patch
media-exynos4-is-fimc-is-add-of_node_put-when-breaki.patch
tty-xilinx_uartps-fix-the-ignore_status.patch
media-meson-vdec-add-missing-clk_disable_unprepare-o.patch
mtd-devices-docg3-check-the-return-value-of-devm_ior.patch
mtd-rawnand-fsl_elbc-fix-none-ecc-mode.patch
rdma-siw-always-consume-all-skbuf-data-in-sk_data_re.patch
-rdma-rdmavt-decouple-qp-and-sge-lists-allocations.patch
ata-fix-ata_id_sense_reporting_enabled-and-ata_id_ha.patch
ata-fix-ata_id_has_devslp.patch
ata-fix-ata_id_has_ncq_autosense.patch
powerpc-pci_dn-add-missing-of_node_put.patch
powerpc-powernv-add-missing-of_node_put-in-opal_expo.patch
x86-hyperv-fix-struct-hv_enlightened_vmcs-definition.patch
-kvm-x86-pending-exceptions-must-not-be-blocked-by-an.patch
powerpc-64s-fix-generic_cpu-build-flags-for-ppc970-g.patch
powerpc-fix-spe-power-isa-properties-for-e500v1-plat.patch
-powerpc-lib-code-patching-don-t-use-struct-ppc_inst-.patch
crypto-sahara-don-t-sleep-when-in-softirq.patch
crypto-hisilicon-zip-fix-mismatch-in-get-set-sgl_sge.patch
kernel-cgroup-mundane-spelling-fixes-throughout-the-.patch
net-ieee802154-return-einval-for-unknown-addr-type.patch
revert-net-ieee802154-reject-zero-sized-raw_sendmsg.patch
net-ieee802154-don-t-warn-zero-sized-raw_sendmsg.patch
-net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch
revert-drm-amdgpu-move-nbio-sdma_doorbell_range-into-sdma-code-for-vega.patch
revert-drm-amdgpu-use-dirty-framebuffer-helper.patch
ext4-continue-to-expand-file-system-when-the-target-size-doesn-t-reach.patch
+++ /dev/null
-From ca42e146223b8f5a5ff4538b467c232f58a358cc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sun, 29 Aug 2021 22:48:19 +0200
-Subject: clk: qcom: gcc-sdm660: Use ARRAY_SIZE for num_parents
-
-From: Marijn Suijten <marijn.suijten@somainline.org>
-
-[ Upstream commit 00ff818888fd436b687dbef457ea5a9135c60b15 ]
-
-Where possible, use ARRAY_SIZE to determine the number of parents in
-clk_parent_data instead of hardcoding a number that relies on an array
-defined hundreds of lines above.
-
-Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
-Link: https://lore.kernel.org/r/20210829204822.289829-2-marijn.suijten@somainline.org
-Signed-off-by: Stephen Boyd <sboyd@kernel.org>
-Stable-dep-of: 6956c18f4ad9 ("clk: qcom: gcc-sdm660: Use floor ops for SDCC1 clock")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/clk/qcom/gcc-sdm660.c | 80 +++++++++++++++++------------------
- 1 file changed, 40 insertions(+), 40 deletions(-)
-
-diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
-index 4d36f96e9ae2..9b97425008ce 100644
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -284,7 +284,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -309,7 +309,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup1_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -323,7 +323,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -337,7 +337,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup2_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -351,7 +351,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -365,7 +365,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup3_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -379,7 +379,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -393,7 +393,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_qup4_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -426,7 +426,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_uart1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -440,7 +440,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp1_uart2_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -454,7 +454,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -468,7 +468,7 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup1_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -482,7 +482,7 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -496,7 +496,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup2_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -510,7 +510,7 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -524,7 +524,7 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup3_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -538,7 +538,7 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -552,7 +552,7 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_qup4_spi_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -566,7 +566,7 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_uart1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -580,7 +580,7 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "blsp2_uart2_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -601,7 +601,7 @@ static struct clk_rcg2 gp1_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp1_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -615,7 +615,7 @@ static struct clk_rcg2 gp2_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp2_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -629,7 +629,7 @@ static struct clk_rcg2 gp3_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gp3_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -649,7 +649,7 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_gpll0_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -670,7 +670,7 @@ static struct clk_rcg2 hmss_gpll4_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_gpll4_clk_src",
- .parent_data = gcc_parent_data_xo_gpll4,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll4),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -689,7 +689,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_rbcpr_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -708,7 +708,7 @@ static struct clk_rcg2 pdm2_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "pdm2_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -730,7 +730,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "qspi_ser_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
-- .num_parents = 6,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -756,7 +756,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -778,7 +778,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc1_ice_core_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -804,7 +804,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "sdcc2_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4,
-- .num_parents = 4,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4),
- .ops = &clk_rcg2_floor_ops,
- },
- };
-@@ -827,7 +827,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_axi_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -848,7 +848,7 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_ice_core_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -862,7 +862,7 @@ static struct clk_rcg2 ufs_phy_aux_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_phy_aux_clk_src",
- .parent_data = gcc_parent_data_xo_sleep_clk,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -883,7 +883,7 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "ufs_unipro_core_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -904,7 +904,7 @@ static struct clk_rcg2 usb20_master_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb20_master_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -924,7 +924,7 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb20_mock_utmi_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -949,7 +949,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_master_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -970,7 +970,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb30_mock_utmi_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
-- .num_parents = 3,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
- .ops = &clk_rcg2_ops,
- },
- };
-@@ -990,7 +990,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
- .clkr.hw.init = &(struct clk_init_data){
- .name = "usb3_phy_aux_clk_src",
- .parent_data = gcc_parent_data_xo_sleep_clk,
-- .num_parents = 2,
-+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
- .ops = &clk_rcg2_ops,
- },
- };
---
-2.35.1
-
+++ /dev/null
-From c24001c4744ea9e1f02b9c24e8e0ab85281e3804 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 14 Jul 2022 22:38:22 +0200
-Subject: clk: qcom: gcc-sdm660: Use floor ops for SDCC1 clock
-
-From: Marijn Suijten <marijn.suijten@somainline.org>
-
-[ Upstream commit 6956c18f4ad9200aa945f7ea37d65a05afc49d51 ]
-
-In commit 3f905469c8ce ("clk: qcom: gcc: Use floor ops for SDCC clocks")
-floor ops were applied to SDCC2 only, but flooring is also required on
-the SDCC1 apps clock which is used by the eMMC card on Sony's Nile
-platform, and otherwise result in the typicial "Card appears
-overclocked" warnings observed on many other platforms before:
-
- mmc0: Card appears overclocked; req 52000000 Hz, actual 100000000 Hz
- mmc0: Card appears overclocked; req 52000000 Hz, actual 100000000 Hz
- mmc0: Card appears overclocked; req 104000000 Hz, actual 192000000 Hz
-
-Fixes: f2a76a2955c0 ("clk: qcom: Add Global Clock controller (GCC) driver for SDM660")
-Signed-off-by: Marijn Suijten <marijn.suijten@somainline.org>
-Tested-by: Alexey Minnekhanov <alexeymin@postmarketos.org>
-Reviewed-by: Stephen Boyd <sboyd@kernel.org>
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
-Link: https://lore.kernel.org/r/20220714203822.186448-1-marijn.suijten@somainline.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/clk/qcom/gcc-sdm660.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
-index 9b97425008ce..db918c92a522 100644
---- a/drivers/clk/qcom/gcc-sdm660.c
-+++ b/drivers/clk/qcom/gcc-sdm660.c
-@@ -757,7 +757,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
- .name = "sdcc1_apps_clk_src",
- .parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
- .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
-- .ops = &clk_rcg2_ops,
-+ .ops = &clk_rcg2_floor_ops,
- },
- };
-
---
-2.35.1
-
+++ /dev/null
-From a5d560287e22035061d8bd1cbc98cfb20e45e892 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 6 May 2022 11:12:59 -0700
-Subject: net: wwan: t7xx: Add control DMA interface
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Haijun Liu <haijun.liu@mediatek.com>
-
-[ Upstream commit 39d439047f1dc88f98b755d6f3a53a4ef8f0de21 ]
-
-Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control
-path of Host-Modem data transfers. CLDMA HIF layer provides a common
-interface to the Port Layer.
-
-CLDMA manages 8 independent RX/TX physical channels with data flow
-control in HW queues. CLDMA uses ring buffers of General Packet
-Descriptors (GPD) for TX/RX. GPDs can represent multiple or single
-data buffers (DB).
-
-CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA
-interrupts, and initializes CLDMA HW registers.
-
-CLDMA TX flow:
-1. Port Layer write
-2. Get DB address
-3. Configure GPD
-4. Triggering processing via HW register write
-
-CLDMA RX flow:
-1. CLDMA HW sends a RX "done" to host
-2. Driver starts thread to safely read GPD
-3. DB is sent to Port layer
-4. Create a new buffer for GPD ring
-
-Note: This patch does not enable compilation since it has dependencies
-such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and
-struct t7xx_pci_dev which are added by the core patch.
-
-Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
-Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
-Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
-Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
-Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
-Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
-Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Stable-dep-of: 2ac6cdd581f4 ("drm/dp_mst: fix drm_dp_dpcd_read return value checks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/wwan/t7xx/t7xx_cldma.c | 281 ++++++
- drivers/net/wwan/t7xx/t7xx_cldma.h | 180 ++++
- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 1192 ++++++++++++++++++++++++
- drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 126 +++
- drivers/net/wwan/t7xx/t7xx_reg.h | 33 +
- 5 files changed, 1812 insertions(+)
- create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.c
- create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.h
- create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.c
- create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.h
- create mode 100644 drivers/net/wwan/t7xx/t7xx_reg.h
-
-diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
-new file mode 100644
-index 000000000000..9f43f256db1d
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
-@@ -0,0 +1,281 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ */
-+
-+#include <linux/bits.h>
-+#include <linux/delay.h>
-+#include <linux/io.h>
-+#include <linux/io-64-nonatomic-lo-hi.h>
-+#include <linux/types.h>
-+
-+#include "t7xx_cldma.h"
-+
-+#define ADDR_SIZE 8
-+
-+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
-+{
-+ u32 val;
-+
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
-+ val |= IP_BUSY_WAKEUP;
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
-+}
-+
-+/**
-+ * t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
-+ * @hw_info: Pointer to struct t7xx_cldma_hw.
-+ *
-+ * Restore HW after resume. Writes uplink configuration for CLDMA HW.
-+ */
-+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
-+{
-+ u32 ul_cfg;
-+
-+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
-+
-+ if (hw_info->hw_mode == MODE_BIT_64)
-+ ul_cfg |= UL_CFG_BIT_MODE_64;
-+ else if (hw_info->hw_mode == MODE_BIT_40)
-+ ul_cfg |= UL_CFG_BIT_MODE_40;
-+ else if (hw_info->hw_mode == MODE_BIT_36)
-+ ul_cfg |= UL_CFG_BIT_MODE_36;
-+
-+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ /* Disable TX and RX invalid address check */
-+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
-+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
-+}
-+
-+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val, reg);
-+}
-+
-+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
-+{
-+ /* Enable the TX & RX interrupts */
-+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
-+ iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
-+ /* Enable the empty queue interrupt */
-+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
-+ iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
-+}
-+
-+void t7xx_cldma_hw_reset(void __iomem *ao_base)
-+{
-+ u32 val;
-+
-+ val = ioread32(ao_base + REG_INFRA_RST2_SET);
-+ val |= RST2_PMIC_SW_RST_SET;
-+ iowrite32(val, ao_base + REG_INFRA_RST2_SET);
-+ val = ioread32(ao_base + REG_INFRA_RST4_SET);
-+ val |= RST4_CLDMA1_SW_RST_SET;
-+ iowrite32(val, ao_base + REG_INFRA_RST4_SET);
-+ udelay(1);
-+
-+ val = ioread32(ao_base + REG_INFRA_RST4_CLR);
-+ val |= RST4_CLDMA1_SW_RST_CLR;
-+ iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
-+ val = ioread32(ao_base + REG_INFRA_RST2_CLR);
-+ val |= RST2_PMIC_SW_RST_CLR;
-+ iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
-+}
-+
-+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
-+{
-+ u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
-+
-+ return ioread64(hw_info->ap_pdn_base + offset);
-+}
-+
-+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
-+ enum mtk_txrx tx_rx)
-+{
-+ u32 offset = qno * ADDR_SIZE;
-+ void __iomem *reg;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
-+ iowrite64(address, reg + offset);
-+}
-+
-+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *base = hw_info->ap_pdn_base;
-+
-+ if (tx_rx == MTK_RX)
-+ iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
-+ else
-+ iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
-+}
-+
-+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 mask, val;
-+
-+ mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
-+ val = ioread32(reg);
-+
-+ return val & mask;
-+}
-+
-+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
-+{
-+ unsigned int ch_id;
-+
-+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+ ch_id &= bitmask;
-+ /* Clear the ch IDs in the TX interrupt status register */
-+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+}
-+
-+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
-+{
-+ unsigned int ch_id;
-+
-+ ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+ ch_id &= bitmask;
-+ /* Clear the ch IDs in the RX interrupt status register */
-+ iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+ ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+}
-+
-+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
-+ val = ioread32(reg);
-+ return val & bitmask;
-+}
-+
-+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val, reg);
-+}
-+
-+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
-+}
-+
-+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val, reg);
-+}
-+
-+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+ u32 val;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
-+ val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
-+ iowrite32(val << EQ_STA_BIT_OFFSET, reg);
-+}
-+
-+/**
-+ * t7xx_cldma_hw_init() - Initialize CLDMA HW.
-+ * @hw_info: Pointer to struct t7xx_cldma_hw.
-+ *
-+ * Write uplink and downlink configuration to CLDMA HW.
-+ */
-+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
-+{
-+ u32 ul_cfg, dl_cfg;
-+
-+ ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
-+ /* Configure the DRAM address mode */
-+ ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
-+ dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
-+
-+ if (hw_info->hw_mode == MODE_BIT_64) {
-+ ul_cfg |= UL_CFG_BIT_MODE_64;
-+ dl_cfg |= DL_CFG_BIT_MODE_64;
-+ } else if (hw_info->hw_mode == MODE_BIT_40) {
-+ ul_cfg |= UL_CFG_BIT_MODE_40;
-+ dl_cfg |= DL_CFG_BIT_MODE_40;
-+ } else if (hw_info->hw_mode == MODE_BIT_36) {
-+ ul_cfg |= UL_CFG_BIT_MODE_36;
-+ dl_cfg |= DL_CFG_BIT_MODE_36;
-+ }
-+
-+ iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
-+ dl_cfg |= DL_CFG_UP_HW_LAST;
-+ iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
-+ iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
-+ iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
-+ iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
-+ iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
-+}
-+
-+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
-+ hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
-+ iowrite32(CLDMA_ALL_Q, reg);
-+}
-+
-+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
-+{
-+ void __iomem *reg;
-+
-+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
-+ hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
-+ iowrite32(TXRX_STATUS_BITMASK, reg);
-+ iowrite32(EMPTY_STATUS_BITMASK, reg);
-+}
-diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h
-new file mode 100644
-index 000000000000..8949e8377fb0
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_cldma.h
-@@ -0,0 +1,180 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ */
-+
-+#ifndef __T7XX_CLDMA_H__
-+#define __T7XX_CLDMA_H__
-+
-+#include <linux/bits.h>
-+#include <linux/types.h>
-+
-+#define CLDMA_TXQ_NUM 8
-+#define CLDMA_RXQ_NUM 8
-+#define CLDMA_ALL_Q GENMASK(7, 0)
-+
-+/* Interrupt status bits */
-+#define EMPTY_STATUS_BITMASK GENMASK(15, 8)
-+#define TXRX_STATUS_BITMASK GENMASK(7, 0)
-+#define EQ_STA_BIT_OFFSET 8
-+#define L2_INT_BIT_COUNT 16
-+#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK)
-+
-+#define TQ_ERR_INT_BITMASK GENMASK(23, 16)
-+#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
-+
-+#define RQ_ERR_INT_BITMASK GENMASK(23, 16)
-+#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
-+
-+#define CLDMA0_AO_BASE 0x10049000
-+#define CLDMA0_PD_BASE 0x1021d000
-+#define CLDMA1_AO_BASE 0x1004b000
-+#define CLDMA1_PD_BASE 0x1021f000
-+
-+#define CLDMA_R_AO_BASE 0x10023000
-+#define CLDMA_R_PD_BASE 0x1023d000
-+
-+/* CLDMA TX */
-+#define REG_CLDMA_UL_START_ADDRL_0 0x0004
-+#define REG_CLDMA_UL_START_ADDRH_0 0x0008
-+#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044
-+#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048
-+#define REG_CLDMA_UL_STATUS 0x0084
-+#define REG_CLDMA_UL_START_CMD 0x0088
-+#define REG_CLDMA_UL_RESUME_CMD 0x008c
-+#define REG_CLDMA_UL_STOP_CMD 0x0090
-+#define REG_CLDMA_UL_ERROR 0x0094
-+#define REG_CLDMA_UL_CFG 0x0098
-+#define UL_CFG_BIT_MODE_36 BIT(5)
-+#define UL_CFG_BIT_MODE_40 BIT(6)
-+#define UL_CFG_BIT_MODE_64 BIT(7)
-+#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5)
-+
-+#define REG_CLDMA_UL_MEM 0x009c
-+#define UL_MEM_CHECK_DIS BIT(0)
-+
-+/* CLDMA RX */
-+#define REG_CLDMA_DL_START_CMD 0x05bc
-+#define REG_CLDMA_DL_RESUME_CMD 0x05c0
-+#define REG_CLDMA_DL_STOP_CMD 0x05c4
-+#define REG_CLDMA_DL_MEM 0x0508
-+#define DL_MEM_CHECK_DIS BIT(0)
-+
-+#define REG_CLDMA_DL_CFG 0x0404
-+#define DL_CFG_UP_HW_LAST BIT(2)
-+#define DL_CFG_BIT_MODE_36 BIT(10)
-+#define DL_CFG_BIT_MODE_40 BIT(11)
-+#define DL_CFG_BIT_MODE_64 BIT(12)
-+#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10)
-+
-+#define REG_CLDMA_DL_START_ADDRL_0 0x0478
-+#define REG_CLDMA_DL_START_ADDRH_0 0x047c
-+#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8
-+#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc
-+#define REG_CLDMA_DL_STATUS 0x04f8
-+
-+/* CLDMA MISC */
-+#define REG_CLDMA_L2TISAR0 0x0810
-+#define REG_CLDMA_L2TISAR1 0x0814
-+#define REG_CLDMA_L2TIMR0 0x0818
-+#define REG_CLDMA_L2TIMR1 0x081c
-+#define REG_CLDMA_L2TIMCR0 0x0820
-+#define REG_CLDMA_L2TIMCR1 0x0824
-+#define REG_CLDMA_L2TIMSR0 0x0828
-+#define REG_CLDMA_L2TIMSR1 0x082c
-+#define REG_CLDMA_L3TISAR0 0x0830
-+#define REG_CLDMA_L3TISAR1 0x0834
-+#define REG_CLDMA_L2RISAR0 0x0850
-+#define REG_CLDMA_L2RISAR1 0x0854
-+#define REG_CLDMA_L3RISAR0 0x0870
-+#define REG_CLDMA_L3RISAR1 0x0874
-+#define REG_CLDMA_IP_BUSY 0x08b4
-+#define IP_BUSY_WAKEUP BIT(0)
-+#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0)
-+#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0)
-+
-+/* CLDMA MISC */
-+#define REG_CLDMA_L2RIMR0 0x0858
-+#define REG_CLDMA_L2RIMR1 0x085c
-+#define REG_CLDMA_L2RIMCR0 0x0860
-+#define REG_CLDMA_L2RIMCR1 0x0864
-+#define REG_CLDMA_L2RIMSR0 0x0868
-+#define REG_CLDMA_L2RIMSR1 0x086c
-+#define REG_CLDMA_BUSY_MASK 0x0954
-+#define BUSY_MASK_PCIE BIT(0)
-+#define BUSY_MASK_AP BIT(1)
-+#define BUSY_MASK_MD BIT(2)
-+
-+#define REG_CLDMA_INT_MASK 0x0960
-+
-+/* CLDMA RESET */
-+#define REG_INFRA_RST4_SET 0x0730
-+#define RST4_CLDMA1_SW_RST_SET BIT(20)
-+
-+#define REG_INFRA_RST4_CLR 0x0734
-+#define RST4_CLDMA1_SW_RST_CLR BIT(20)
-+
-+#define REG_INFRA_RST2_SET 0x0140
-+#define RST2_PMIC_SW_RST_SET BIT(18)
-+
-+#define REG_INFRA_RST2_CLR 0x0144
-+#define RST2_PMIC_SW_RST_CLR BIT(18)
-+
-+enum mtk_txrx {
-+ MTK_TX,
-+ MTK_RX,
-+};
-+
-+enum t7xx_hw_mode {
-+ MODE_BIT_32,
-+ MODE_BIT_36,
-+ MODE_BIT_40,
-+ MODE_BIT_64,
-+};
-+
-+struct t7xx_cldma_hw {
-+ enum t7xx_hw_mode hw_mode;
-+ void __iomem *ap_ao_base;
-+ void __iomem *ap_pdn_base;
-+ u32 phy_interrupt_id;
-+};
-+
-+void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx);
-+unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info);
-+void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info);
-+void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
-+void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
-+void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info,
-+ unsigned int qno, u64 address, enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_reset(void __iomem *ao_base);
-+void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
-+unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
-+ enum mtk_txrx tx_rx);
-+void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info);
-+void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info);
-+bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
-+#endif
-diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-new file mode 100644
-index 000000000000..c756b1d0b519
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-@@ -0,0 +1,1192 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ *
-+ * Contributors:
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ */
-+
-+#include <linux/bits.h>
-+#include <linux/bitops.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/dmapool.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma-direction.h>
-+#include <linux/gfp.h>
-+#include <linux/io.h>
-+#include <linux/io-64-nonatomic-lo-hi.h>
-+#include <linux/iopoll.h>
-+#include <linux/irqreturn.h>
-+#include <linux/kernel.h>
-+#include <linux/kthread.h>
-+#include <linux/list.h>
-+#include <linux/netdevice.h>
-+#include <linux/pci.h>
-+#include <linux/sched.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/spinlock.h>
-+#include <linux/types.h>
-+#include <linux/wait.h>
-+#include <linux/workqueue.h>
-+
-+#include "t7xx_cldma.h"
-+#include "t7xx_hif_cldma.h"
-+#include "t7xx_mhccif.h"
-+#include "t7xx_pci.h"
-+#include "t7xx_pcie_mac.h"
-+#include "t7xx_reg.h"
-+#include "t7xx_state_monitor.h"
-+
-+#define MAX_TX_BUDGET 16
-+#define MAX_RX_BUDGET 16
-+
-+#define CHECK_Q_STOP_TIMEOUT_US 1000000
-+#define CHECK_Q_STOP_STEP_US 10000
-+
-+#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */
-+
-+static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
-+ enum mtk_txrx tx_rx, unsigned int index)
-+{
-+ queue->dir = tx_rx;
-+ queue->index = index;
-+ queue->md_ctrl = md_ctrl;
-+ queue->tr_ring = NULL;
-+ queue->tr_done = NULL;
-+ queue->tx_next = NULL;
-+}
-+
-+static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
-+ enum mtk_txrx tx_rx, unsigned int index)
-+{
-+ md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
-+ init_waitqueue_head(&queue->req_wq);
-+ spin_lock_init(&queue->ring_lock);
-+}
-+
-+static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
-+{
-+ gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
-+ gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
-+}
-+
-+static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
-+{
-+ gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
-+ gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
-+}
-+
-+static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
-+ size_t size)
-+{
-+ req->skb = __dev_alloc_skb(size, GFP_KERNEL);
-+ if (!req->skb)
-+ return -ENOMEM;
-+
-+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data,
-+ skb_data_area_size(req->skb), DMA_FROM_DEVICE);
-+ if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
-+ dev_kfree_skb_any(req->skb);
-+ req->skb = NULL;
-+ req->mapped_buff = 0;
-+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ return 0;
-+}
-+
-+static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ unsigned int hwo_polling_count = 0;
-+ struct t7xx_cldma_hw *hw_info;
-+ bool rx_not_done = true;
-+ unsigned long flags;
-+ int count = 0;
-+
-+ hw_info = &md_ctrl->hw_info;
-+
-+ do {
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ struct sk_buff *skb;
-+ int ret;
-+
-+ req = queue->tr_done;
-+ if (!req)
-+ return -ENODATA;
-+
-+ gpd = req->gpd;
-+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
-+ dma_addr_t gpd_addr;
-+
-+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
-+ dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
-+ return -ENODEV;
-+ }
-+
-+ gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
-+ queue->index * sizeof(u64));
-+ if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
-+ return 0;
-+
-+ udelay(1);
-+ continue;
-+ }
-+
-+ hwo_polling_count = 0;
-+ skb = req->skb;
-+
-+ if (req->mapped_buff) {
-+ dma_unmap_single(md_ctrl->dev, req->mapped_buff,
-+ skb_data_area_size(skb), DMA_FROM_DEVICE);
-+ req->mapped_buff = 0;
-+ }
-+
-+ skb->len = 0;
-+ skb_reset_tail_pointer(skb);
-+ skb_put(skb, le16_to_cpu(gpd->data_buff_len));
-+
-+ ret = md_ctrl->recv_skb(queue, skb);
-+ /* Break processing, will try again later */
-+ if (ret < 0)
-+ return ret;
-+
-+ req->skb = NULL;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
-+
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+ req = queue->rx_refill;
-+
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
-+ if (ret)
-+ return ret;
-+
-+ gpd = req->gpd;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
-+ gpd->data_buff_len = 0;
-+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
-+
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ rx_not_done = ++count < budget || !need_resched();
-+ } while (rx_not_done);
-+
-+ *over_budget = true;
-+ return 0;
-+}
-+
-+static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct t7xx_cldma_hw *hw_info;
-+ unsigned int pending_rx_int;
-+ bool over_budget = false;
-+ unsigned long flags;
-+ int ret;
-+
-+ hw_info = &md_ctrl->hw_info;
-+
-+ do {
-+ ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
-+ if (ret == -ENODATA)
-+ return 0;
-+ else if (ret)
-+ return ret;
-+
-+ pending_rx_int = 0;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->rxq_active & BIT(queue->index)) {
-+ if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
-+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
-+
-+ pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
-+ MTK_RX);
-+ if (pending_rx_int) {
-+ t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
-+
-+ if (over_budget) {
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ return -EAGAIN;
-+ }
-+ }
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ } while (pending_rx_int);
-+
-+ return 0;
-+}
-+
-+static void t7xx_cldma_rx_done(struct work_struct *work)
-+{
-+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ int value;
-+
-+ value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
-+ if (value && md_ctrl->rxq_active & BIT(queue->index)) {
-+ queue_work(queue->worker, &queue->cldma_work);
-+ return;
-+ }
-+
-+ t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
-+ t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
-+ t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
-+}
-+
-+static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ unsigned int dma_len, count = 0;
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ unsigned long flags;
-+ dma_addr_t dma_free;
-+ struct sk_buff *skb;
-+
-+ while (!kthread_should_stop()) {
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ req = queue->tr_done;
-+ if (!req) {
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+ break;
-+ }
-+ gpd = req->gpd;
-+ if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+ break;
-+ }
-+ queue->budget++;
-+ dma_free = req->mapped_buff;
-+ dma_len = le16_to_cpu(gpd->data_buff_len);
-+ skb = req->skb;
-+ req->skb = NULL;
-+ queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ count++;
-+ dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
-+ dev_kfree_skb_any(skb);
-+ }
-+
-+ if (count)
-+ wake_up_nr(&queue->req_wq, count);
-+
-+ return count;
-+}
-+
-+static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct cldma_request *req;
-+ dma_addr_t ul_curr_addr;
-+ unsigned long flags;
-+ bool pending_gpd;
-+
-+ if (!(md_ctrl->txq_active & BIT(queue->index)))
-+ return;
-+
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (pending_gpd) {
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+
-+ /* Check current processing TGPD, 64-bit address is in a table by Q index */
-+ ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
-+ queue->index * sizeof(u64));
-+ if (req->gpd_addr != ul_curr_addr) {
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
-+ md_ctrl->hif_id, queue->index);
-+ return;
-+ }
-+
-+ t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static void t7xx_cldma_tx_done(struct work_struct *work)
-+{
-+ struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct t7xx_cldma_hw *hw_info;
-+ unsigned int l2_tx_int;
-+ unsigned long flags;
-+
-+ hw_info = &md_ctrl->hw_info;
-+ t7xx_cldma_gpd_tx_collect(queue);
-+ l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
-+ MTK_TX);
-+ if (l2_tx_int & EQ_STA_BIT(queue->index)) {
-+ t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
-+ t7xx_cldma_txq_empty_hndl(queue);
-+ }
-+
-+ if (l2_tx_int & BIT(queue->index)) {
-+ t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
-+ queue_work(queue->worker, &queue->cldma_work);
-+ return;
-+ }
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->txq_active & BIT(queue->index)) {
-+ t7xx_cldma_clear_ip_busy(hw_info);
-+ t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
-+ t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
-+ struct cldma_ring *ring, enum dma_data_direction tx_rx)
-+{
-+ struct cldma_request *req_cur, *req_next;
-+
-+ list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
-+ if (req_cur->mapped_buff && req_cur->skb) {
-+ dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
-+ skb_data_area_size(req_cur->skb), tx_rx);
-+ req_cur->mapped_buff = 0;
-+ }
-+
-+ dev_kfree_skb_any(req_cur->skb);
-+
-+ if (req_cur->gpd)
-+ dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
-+
-+ list_del(&req_cur->entry);
-+ kfree(req_cur);
-+ }
-+}
-+
-+static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
-+{
-+ struct cldma_request *req;
-+ int val;
-+
-+ req = kzalloc(sizeof(*req), GFP_KERNEL);
-+ if (!req)
-+ return NULL;
-+
-+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
-+ if (!req->gpd)
-+ goto err_free_req;
-+
-+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
-+ if (val)
-+ goto err_free_pool;
-+
-+ return req;
-+
-+err_free_pool:
-+ dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
-+
-+err_free_req:
-+ kfree(req);
-+
-+ return NULL;
-+}
-+
-+static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
-+{
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ int i;
-+
-+ INIT_LIST_HEAD(&ring->gpd_ring);
-+ ring->length = MAX_RX_BUDGET;
-+
-+ for (i = 0; i < ring->length; i++) {
-+ req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
-+ if (!req) {
-+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ gpd = req->gpd;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
-+ gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
-+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
-+ INIT_LIST_HEAD(&req->entry);
-+ list_add_tail(&req->entry, &ring->gpd_ring);
-+ }
-+
-+ /* Link previous GPD to next GPD, circular */
-+ list_for_each_entry(req, &ring->gpd_ring, entry) {
-+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
-+ gpd = req->gpd;
-+ }
-+
-+ return 0;
-+}
-+
-+static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
-+{
-+ struct cldma_request *req;
-+
-+ req = kzalloc(sizeof(*req), GFP_KERNEL);
-+ if (!req)
-+ return NULL;
-+
-+ req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
-+ if (!req->gpd) {
-+ kfree(req);
-+ return NULL;
-+ }
-+
-+ return req;
-+}
-+
-+static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
-+{
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ int i;
-+
-+ INIT_LIST_HEAD(&ring->gpd_ring);
-+ ring->length = MAX_TX_BUDGET;
-+
-+ for (i = 0; i < ring->length; i++) {
-+ req = t7xx_alloc_tx_request(md_ctrl);
-+ if (!req) {
-+ t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
-+ return -ENOMEM;
-+ }
-+
-+ gpd = req->gpd;
-+ gpd->flags = GPD_FLAGS_IOC;
-+ INIT_LIST_HEAD(&req->entry);
-+ list_add_tail(&req->entry, &ring->gpd_ring);
-+ }
-+
-+ /* Link previous GPD to next GPD, circular */
-+ list_for_each_entry(req, &ring->gpd_ring, entry) {
-+ t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
-+ gpd = req->gpd;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
-+ * @queue: Pointer to the queue structure.
-+ *
-+ * Called with ring_lock (unless called during initialization phase)
-+ */
-+static void t7xx_cldma_q_reset(struct cldma_queue *queue)
-+{
-+ struct cldma_request *req;
-+
-+ req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
-+ queue->tr_done = req;
-+ queue->budget = queue->tr_ring->length;
-+
-+ if (queue->dir == MTK_TX)
-+ queue->tx_next = req;
-+ else
-+ queue->rx_refill = req;
-+}
-+
-+static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+
-+ queue->dir = MTK_RX;
-+ queue->tr_ring = &md_ctrl->rx_ring[queue->index];
-+ t7xx_cldma_q_reset(queue);
-+}
-+
-+static void t7xx_cldma_txq_init(struct cldma_queue *queue)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+
-+ queue->dir = MTK_TX;
-+ queue->tr_ring = &md_ctrl->tx_ring[queue->index];
-+ t7xx_cldma_q_reset(queue);
-+}
-+
-+static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
-+}
-+
-+static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
-+}
-+
-+static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
-+{
-+ unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ int i;
-+
-+ /* L2 raw interrupt status */
-+ l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
-+ l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
-+ l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
-+ l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
-+ l2_tx_int &= ~l2_tx_int_msk;
-+ l2_rx_int &= ~l2_rx_int_msk;
-+
-+ if (l2_tx_int) {
-+ if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
-+ /* Read and clear L3 TX interrupt status */
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
-+ }
-+
-+ t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
-+ if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
-+ for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
-+ if (i < CLDMA_TXQ_NUM) {
-+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
-+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
-+ queue_work(md_ctrl->txq[i].worker,
-+ &md_ctrl->txq[i].cldma_work);
-+ } else {
-+ t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
-+ }
-+ }
-+ }
-+ }
-+
-+ if (l2_rx_int) {
-+ if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
-+ /* Read and clear L3 RX interrupt status */
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
-+ val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
-+ iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
-+ }
-+
-+ t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
-+ if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
-+ l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
-+ for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
-+ t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
-+ t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
-+ queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
-+ }
-+ }
-+ }
-+}
-+
-+static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ unsigned int tx_active;
-+ unsigned int rx_active;
-+
-+ if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
-+ return false;
-+
-+ tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
-+ rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
-+
-+ return tx_active || rx_active;
-+}
-+
-+/**
-+ * t7xx_cldma_stop() - Stop CLDMA.
-+ * @md_ctrl: CLDMA context structure.
-+ *
-+ * Stop TX and RX queues. Disable L1 and L2 interrupts.
-+ * Clear status registers.
-+ *
-+ * Return:
-+ * * 0 - Success.
-+ * * -ERROR - Error code from polling cldma_queues_active.
-+ */
-+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ bool active;
-+ int i, ret;
-+
-+ md_ctrl->rxq_active = 0;
-+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
-+ md_ctrl->txq_active = 0;
-+ t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
-+ md_ctrl->txq_started = 0;
-+ t7xx_cldma_disable_irq(md_ctrl);
-+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
-+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
-+ t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
-+ t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
-+
-+ if (md_ctrl->is_late_init) {
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ flush_work(&md_ctrl->txq[i].cldma_work);
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
-+ flush_work(&md_ctrl->rxq[i].cldma_work);
-+ }
-+
-+ ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
-+ CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
-+ if (ret)
-+ dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
-+
-+ return ret;
-+}
-+
-+static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
-+{
-+ int i;
-+
-+ if (!md_ctrl->is_late_init)
-+ return;
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
-+
-+ dma_pool_destroy(md_ctrl->gpd_dmapool);
-+ md_ctrl->gpd_dmapool = NULL;
-+ md_ctrl->is_late_init = false;
-+}
-+
-+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
-+{
-+ unsigned long flags;
-+ int i;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ md_ctrl->txq_active = 0;
-+ md_ctrl->rxq_active = 0;
-+ t7xx_cldma_disable_irq(md_ctrl);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ cancel_work_sync(&md_ctrl->txq[i].cldma_work);
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ }
-+
-+ t7xx_cldma_late_release(md_ctrl);
-+}
-+
-+/**
-+ * t7xx_cldma_start() - Start CLDMA.
-+ * @md_ctrl: CLDMA context structure.
-+ *
-+ * Set TX/RX start address.
-+ * Start all RX queues and enable L2 interrupt.
-+ */
-+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->is_late_init) {
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ int i;
-+
-+ t7xx_cldma_enable_irq(md_ctrl);
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ if (md_ctrl->txq[i].tr_done)
-+ t7xx_cldma_hw_set_start_addr(hw_info, i,
-+ md_ctrl->txq[i].tr_done->gpd_addr,
-+ MTK_TX);
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ if (md_ctrl->rxq[i].tr_done)
-+ t7xx_cldma_hw_set_start_addr(hw_info, i,
-+ md_ctrl->rxq[i].tr_done->gpd_addr,
-+ MTK_RX);
-+ }
-+
-+ /* Enable L2 interrupt */
-+ t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
-+ t7xx_cldma_hw_start(hw_info);
-+ md_ctrl->txq_started = 0;
-+ md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
-+ md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
-+{
-+ struct cldma_queue *txq = &md_ctrl->txq[qnum];
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&txq->ring_lock, flags);
-+ t7xx_cldma_q_reset(txq);
-+ list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
-+ gpd = req->gpd;
-+ gpd->flags &= ~GPD_FLAGS_HWO;
-+ t7xx_cldma_gpd_set_data_ptr(gpd, 0);
-+ gpd->data_buff_len = 0;
-+ dev_kfree_skb_any(req->skb);
-+ req->skb = NULL;
-+ }
-+ spin_unlock_irqrestore(&txq->ring_lock, flags);
-+}
-+
-+static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
-+{
-+ struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
-+ struct cldma_request *req;
-+ struct cldma_gpd *gpd;
-+ unsigned long flags;
-+ int ret = 0;
-+
-+ spin_lock_irqsave(&rxq->ring_lock, flags);
-+ t7xx_cldma_q_reset(rxq);
-+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
-+ gpd = req->gpd;
-+ gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
-+ gpd->data_buff_len = 0;
-+
-+ if (req->skb) {
-+ req->skb->len = 0;
-+ skb_reset_tail_pointer(req->skb);
-+ }
-+ }
-+
-+ list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
-+ if (req->skb)
-+ continue;
-+
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
-+ if (ret)
-+ break;
-+
-+ t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
-+ }
-+ spin_unlock_irqrestore(&rxq->ring_lock, flags);
-+
-+ return ret;
-+}
-+
-+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
-+{
-+ int i;
-+
-+ if (tx_rx == MTK_TX) {
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ t7xx_cldma_clear_txq(md_ctrl, i);
-+ } else {
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++)
-+ t7xx_cldma_clear_rxq(md_ctrl, i);
-+ }
-+}
-+
-+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
-+ t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
-+ if (tx_rx == MTK_RX)
-+ md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
-+ else
-+ md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
-+ t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
-+ struct sk_buff *skb)
-+{
-+ struct cldma_ctrl *md_ctrl = queue->md_ctrl;
-+ struct cldma_gpd *gpd = tx_req->gpd;
-+ unsigned long flags;
-+
-+ /* Update GPD */
-+ tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
-+
-+ if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
-+ dev_err(md_ctrl->dev, "DMA mapping failed\n");
-+ return -ENOMEM;
-+ }
-+
-+ t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
-+ gpd->data_buff_len = cpu_to_le16(skb->len);
-+
-+ /* This lock must cover TGPD setting, as even without a resume operation,
-+ * CLDMA can send next HWO=1 if last TGPD just finished.
-+ */
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (md_ctrl->txq_active & BIT(queue->index))
-+ gpd->flags |= GPD_FLAGS_HWO;
-+
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ tx_req->skb = skb;
-+ return 0;
-+}
-+
-+/* Called with cldma_lock */
-+static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
-+ struct cldma_request *prev_req)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+
-+ /* Check whether the device was powered off (CLDMA start address is not set) */
-+ if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
-+ t7xx_cldma_hw_init(hw_info);
-+ t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
-+ md_ctrl->txq_started &= ~BIT(qno);
-+ }
-+
-+ if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
-+ if (md_ctrl->txq_started & BIT(qno))
-+ t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
-+ else
-+ t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
-+
-+ md_ctrl->txq_started |= BIT(qno);
-+ }
-+}
-+
-+/**
-+ * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
-+ * @md_ctrl: CLDMA context structure.
-+ * @recv_skb: Receiving skb callback.
-+ */
-+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
-+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
-+{
-+ md_ctrl->recv_skb = recv_skb;
-+}
-+
-+/**
-+ * t7xx_cldma_send_skb() - Send control data to modem.
-+ * @md_ctrl: CLDMA context structure.
-+ * @qno: Queue number.
-+ * @skb: Socket buffer.
-+ *
-+ * Return:
-+ * * 0 - Success.
-+ * * -ENOMEM - Allocation failure.
-+ * * -EINVAL - Invalid queue request.
-+ * * -EIO - Queue is not active.
-+ * * -ETIMEDOUT - Timeout waiting for the device to wake up.
-+ */
-+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
-+{
-+ struct cldma_request *tx_req;
-+ struct cldma_queue *queue;
-+ unsigned long flags;
-+ int ret;
-+
-+ if (qno >= CLDMA_TXQ_NUM)
-+ return -EINVAL;
-+
-+ queue = &md_ctrl->txq[qno];
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ if (!(md_ctrl->txq_active & BIT(qno))) {
-+ ret = -EIO;
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ goto allow_sleep;
-+ }
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ do {
-+ spin_lock_irqsave(&queue->ring_lock, flags);
-+ tx_req = queue->tx_next;
-+ if (queue->budget > 0 && !tx_req->skb) {
-+ struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
-+
-+ queue->budget--;
-+ t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
-+ queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ /* Protect the access to the modem for queues operations (resume/start)
-+ * which access shared locations by all the queues.
-+ * cldma_lock is independent of ring_lock which is per queue.
-+ */
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+
-+ break;
-+ }
-+ spin_unlock_irqrestore(&queue->ring_lock, flags);
-+
-+ if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+ }
-+
-+ ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
-+ } while (!ret);
-+
-+allow_sleep:
-+ return ret;
-+}
-+
-+static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
-+{
-+ char dma_pool_name[32];
-+ int i, j, ret;
-+
-+ if (md_ctrl->is_late_init) {
-+ dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
-+ return -EALREADY;
-+ }
-+
-+ snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
-+
-+ md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
-+ sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
-+ if (!md_ctrl->gpd_dmapool) {
-+ dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
-+ if (ret) {
-+ dev_err(md_ctrl->dev, "control TX ring init fail\n");
-+ goto err_free_tx_ring;
-+ }
-+ }
-+
-+ for (j = 0; j < CLDMA_RXQ_NUM; j++) {
-+ md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-+
-+ if (j == CLDMA_RXQ_NUM - 1)
-+ md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-+
-+ ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
-+ if (ret) {
-+ dev_err(md_ctrl->dev, "Control RX ring init fail\n");
-+ goto err_free_rx_ring;
-+ }
-+ }
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++)
-+ t7xx_cldma_txq_init(&md_ctrl->txq[i]);
-+
-+ for (j = 0; j < CLDMA_RXQ_NUM; j++)
-+ t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
-+
-+ md_ctrl->is_late_init = true;
-+ return 0;
-+
-+err_free_rx_ring:
-+ while (j--)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
-+
-+err_free_tx_ring:
-+ while (i--)
-+ t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
-+
-+ return ret;
-+}
-+
-+static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
-+{
-+ return addr + phy_addr - addr_trs1;
-+}
-+
-+static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ u32 phy_ao_base, phy_pd_base;
-+
-+ if (md_ctrl->hif_id != CLDMA_ID_MD)
-+ return;
-+
-+ phy_ao_base = CLDMA1_AO_BASE;
-+ phy_pd_base = CLDMA1_PD_BASE;
-+ hw_info->phy_interrupt_id = CLDMA1_INT;
-+ hw_info->hw_mode = MODE_BIT_64;
-+ hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
-+ pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
-+ hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
-+ pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
-+}
-+
-+static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
-+{
-+ dev_kfree_skb_any(skb);
-+ return 0;
-+}
-+
-+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
-+{
-+ struct device *dev = &t7xx_dev->pdev->dev;
-+ struct cldma_ctrl *md_ctrl;
-+
-+ md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
-+ if (!md_ctrl)
-+ return -ENOMEM;
-+
-+ md_ctrl->t7xx_dev = t7xx_dev;
-+ md_ctrl->dev = dev;
-+ md_ctrl->hif_id = hif_id;
-+ md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
-+ t7xx_hw_info_init(md_ctrl);
-+ t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
-+ return 0;
-+}
-+
-+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
-+ t7xx_cldma_hw_stop(hw_info, MTK_TX);
-+ t7xx_cldma_hw_stop(hw_info, MTK_RX);
-+ t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
-+ t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
-+ t7xx_cldma_hw_init(hw_info);
-+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
-+}
-+
-+static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
-+{
-+ struct cldma_ctrl *md_ctrl = data;
-+ u32 interrupt;
-+
-+ interrupt = md_ctrl->hw_info.phy_interrupt_id;
-+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
-+ t7xx_cldma_irq_work_cb(md_ctrl);
-+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
-+ t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
-+ return IRQ_HANDLED;
-+}
-+
-+static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
-+{
-+ int i;
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ if (md_ctrl->txq[i].worker) {
-+ destroy_workqueue(md_ctrl->txq[i].worker);
-+ md_ctrl->txq[i].worker = NULL;
-+ }
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ if (md_ctrl->rxq[i].worker) {
-+ destroy_workqueue(md_ctrl->rxq[i].worker);
-+ md_ctrl->rxq[i].worker = NULL;
-+ }
-+ }
-+}
-+
-+/**
-+ * t7xx_cldma_init() - Initialize CLDMA.
-+ * @md_ctrl: CLDMA context structure.
-+ *
-+ * Initialize HIF TX/RX queue structure.
-+ * Register CLDMA callback ISR with PCIe driver.
-+ *
-+ * Return:
-+ * * 0 - Success.
-+ * * -ERROR - Error code from failure sub-initializations.
-+ */
-+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
-+{
-+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
-+ int i;
-+
-+ md_ctrl->txq_active = 0;
-+ md_ctrl->rxq_active = 0;
-+ md_ctrl->is_late_init = false;
-+
-+ spin_lock_init(&md_ctrl->cldma_lock);
-+
-+ for (i = 0; i < CLDMA_TXQ_NUM; i++) {
-+ md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
-+ md_ctrl->txq[i].worker =
-+ alloc_workqueue("md_hif%d_tx%d_worker",
-+ WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
-+ 1, md_ctrl->hif_id, i);
-+ if (!md_ctrl->txq[i].worker)
-+ goto err_workqueue;
-+
-+ INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
-+ }
-+
-+ for (i = 0; i < CLDMA_RXQ_NUM; i++) {
-+ md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
-+ INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
-+
-+ md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker",
-+ WQ_UNBOUND | WQ_MEM_RECLAIM,
-+ 1, md_ctrl->hif_id, i);
-+ if (!md_ctrl->rxq[i].worker)
-+ goto err_workqueue;
-+ }
-+
-+ t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
-+ md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
-+ md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
-+ md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
-+ t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
-+ return 0;
-+
-+err_workqueue:
-+ t7xx_cldma_destroy_wqs(md_ctrl);
-+ return -ENOMEM;
-+}
-+
-+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_cldma_late_release(md_ctrl);
-+ t7xx_cldma_late_init(md_ctrl);
-+}
-+
-+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
-+{
-+ t7xx_cldma_stop(md_ctrl);
-+ t7xx_cldma_late_release(md_ctrl);
-+ t7xx_cldma_destroy_wqs(md_ctrl);
-+}
-diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
-new file mode 100644
-index 000000000000..deb239e4f803
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
-@@ -0,0 +1,126 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ */
-+
-+#ifndef __T7XX_HIF_CLDMA_H__
-+#define __T7XX_HIF_CLDMA_H__
-+
-+#include <linux/bits.h>
-+#include <linux/device.h>
-+#include <linux/dmapool.h>
-+#include <linux/pci.h>
-+#include <linux/skbuff.h>
-+#include <linux/spinlock.h>
-+#include <linux/wait.h>
-+#include <linux/workqueue.h>
-+#include <linux/types.h>
-+
-+#include "t7xx_cldma.h"
-+#include "t7xx_pci.h"
-+
-+/**
-+ * enum cldma_id - Identifiers for CLDMA HW units.
-+ * @CLDMA_ID_MD: Modem control channel.
-+ * @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
-+ * @CLDMA_NUM: Number of CLDMA HW units available.
-+ */
-+enum cldma_id {
-+ CLDMA_ID_MD,
-+ CLDMA_ID_AP,
-+ CLDMA_NUM
-+};
-+
-+struct cldma_gpd {
-+ u8 flags;
-+ u8 not_used1;
-+ __le16 rx_data_allow_len;
-+ __le32 next_gpd_ptr_h;
-+ __le32 next_gpd_ptr_l;
-+ __le32 data_buff_bd_ptr_h;
-+ __le32 data_buff_bd_ptr_l;
-+ __le16 data_buff_len;
-+ __le16 not_used2;
-+};
-+
-+struct cldma_request {
-+ struct cldma_gpd *gpd; /* Virtual address for CPU */
-+ dma_addr_t gpd_addr; /* Physical address for DMA */
-+ struct sk_buff *skb;
-+ dma_addr_t mapped_buff;
-+ struct list_head entry;
-+};
-+
-+struct cldma_ring {
-+ struct list_head gpd_ring; /* Ring of struct cldma_request */
-+ unsigned int length; /* Number of struct cldma_request */
-+ int pkt_size;
-+};
-+
-+struct cldma_queue {
-+ struct cldma_ctrl *md_ctrl;
-+ enum mtk_txrx dir;
-+ unsigned int index;
-+ struct cldma_ring *tr_ring;
-+ struct cldma_request *tr_done;
-+ struct cldma_request *rx_refill;
-+ struct cldma_request *tx_next;
-+ int budget; /* Same as ring buffer size by default */
-+ spinlock_t ring_lock;
-+ wait_queue_head_t req_wq; /* Only for TX */
-+ struct workqueue_struct *worker;
-+ struct work_struct cldma_work;
-+};
-+
-+struct cldma_ctrl {
-+ enum cldma_id hif_id;
-+ struct device *dev;
-+ struct t7xx_pci_dev *t7xx_dev;
-+ struct cldma_queue txq[CLDMA_TXQ_NUM];
-+ struct cldma_queue rxq[CLDMA_RXQ_NUM];
-+ unsigned short txq_active;
-+ unsigned short rxq_active;
-+ unsigned short txq_started;
-+ spinlock_t cldma_lock; /* Protects CLDMA structure */
-+ /* Assumes T/R GPD/BD/SPD have the same size */
-+ struct dma_pool *gpd_dmapool;
-+ struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
-+ struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
-+ struct t7xx_cldma_hw hw_info;
-+ bool is_late_init;
-+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
-+};
-+
-+#define GPD_FLAGS_HWO BIT(0)
-+#define GPD_FLAGS_IOC BIT(7)
-+#define GPD_DMAPOOL_ALIGN 16
-+
-+#define CLDMA_MTU 3584 /* 3.5kB */
-+
-+int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
-+void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
-+int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
-+int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-+void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
-+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
-+int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
-+void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
-+void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
-+
-+#endif /* __T7XX_HIF_CLDMA_H__ */
-diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
-new file mode 100644
-index 000000000000..7dc6c77a59e3
---- /dev/null
-+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
-@@ -0,0 +1,33 @@
-+/* SPDX-License-Identifier: GPL-2.0-only
-+ *
-+ * Copyright (c) 2021, MediaTek Inc.
-+ * Copyright (c) 2021-2022, Intel Corporation.
-+ *
-+ * Authors:
-+ * Haijun Liu <haijun.liu@mediatek.com>
-+ * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
-+ *
-+ * Contributors:
-+ * Amir Hanania <amir.hanania@intel.com>
-+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-+ * Eliot Lee <eliot.lee@intel.com>
-+ * Moises Veleta <moises.veleta@intel.com>
-+ * Ricardo Martinez <ricardo.martinez@linux.intel.com>
-+ * Sreehari Kancharla <sreehari.kancharla@intel.com>
-+ */
-+
-+#ifndef __T7XX_REG_H__
-+#define __T7XX_REG_H__
-+
-+enum t7xx_int {
-+ DPMAIF_INT,
-+ CLDMA0_INT,
-+ CLDMA1_INT,
-+ CLDMA2_INT,
-+ MHCCIF_INT,
-+ DPMAIF2_INT,
-+ SAP_RGU_INT,
-+ CLDMA3_INT,
-+};
-+
-+#endif /* __T7XX_REG_H__ */
---
-2.35.1
-
+++ /dev/null
-From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001
-From: Yang Yingliang <yangyingliang@huawei.com>
-Date: Thu, 19 May 2022 11:21:08 +0800
-Subject: net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr()
-
-From: Yang Yingliang <yangyingliang@huawei.com>
-
-commit 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 upstream.
-
-Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
-so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass
-the flag.
-
-Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
-Reported-by: Hulk Robot <hulkci@huawei.com>
-Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
-Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
-Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
---- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
-@@ -89,9 +89,9 @@ static void t7xx_cldma_gpd_set_next_ptr(
- }
-
- static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
-- size_t size)
-+ size_t size, gfp_t gfp_mask)
- {
-- req->skb = __dev_alloc_skb(size, GFP_KERNEL);
-+ req->skb = __dev_alloc_skb(size, gfp_mask);
- if (!req->skb)
- return -ENOMEM;
-
-@@ -173,7 +173,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru
- spin_unlock_irqrestore(&queue->ring_lock, flags);
- req = queue->rx_refill;
-
-- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
- if (ret)
- return ret;
-
-@@ -396,7 +396,7 @@ static struct cldma_request *t7xx_alloc_
- if (!req->gpd)
- goto err_free_req;
-
-- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
-+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
- if (val)
- goto err_free_pool;
-
-@@ -793,7 +793,7 @@ static int t7xx_cldma_clear_rxq(struct c
- if (req->skb)
- continue;
-
-- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
-+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
- if (ret)
- break;
-
drm-mipi-dsi-detach-devices-when-removing-the-host.patch
drm-virtio-correct-drm_gem_shmem_get_sg_table-error-.patch
drm-bridge-parade-ps8640-fix-regulator-supply-order.patch
-net-wwan-t7xx-add-control-dma-interface.patch
drm-dp_mst-fix-drm_dp_dpcd_read-return-value-checks.patch
drm-pl111-add-of_node_put-when-breaking-out-of-for_e.patch
asoc-mt6359-fix-tests-for-platform_get_irq-failure.patch
clk-tegra20-fix-refcount-leak-in-tegra20_clock_init.patch
hsi-omap_ssi-fix-refcount-leak-in-ssi_probe.patch
hsi-omap_ssi_port-fix-dma_map_sg-error-check.patch
-clk-qcom-gcc-sdm660-use-array_size-for-num_parents.patch
-clk-qcom-gcc-sdm660-use-floor-ops-for-sdcc1-clock.patch
media-exynos4-is-fimc-is-add-of_node_put-when-breaki.patch
tty-xilinx_uartps-fix-the-ignore_status.patch
media-meson-vdec-add-missing-clk_disable_unprepare-o.patch
revert-net-ieee802154-reject-zero-sized-raw_sendmsg.patch
net-ieee802154-don-t-warn-zero-sized-raw_sendmsg.patch
drm-amd-display-fix-build-breakage-with-config_debug_fs-n.patch
-net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch
kconfig.debug-simplify-the-dependency-of-debug_info_dwarf4-5.patch
kconfig.debug-add-toolchain-checks-for-debug_info_dwarf_toolchain_default.patch
lib-kconfig.debug-add-check-for-non-constant-.-s-u-leb128-support-to-dwarf5.patch