--- /dev/null
+From 81748c3141eeafd455178daeba50515b587cf8d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 17:39:51 +0100
+Subject: ASoC: codecs: constify static sdw_slave_ops struct
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit 65b7b869da9bd3bd0b9fa60e6fe557bfbc0a75e8 ]
+
+The struct sdw_slave_ops is not modified and sdw_driver takes pointer to
+const, so make it a const for code safety.
+
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20230124163953.345949-1-krzysztof.kozlowski@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 84822215acd1 ("ASoC: codecs: wcd938x: fix accessing regmap on unattached devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/rt1316-sdw.c | 2 +-
+ sound/soc/codecs/rt1318-sdw.c | 2 +-
+ sound/soc/codecs/rt711-sdca-sdw.c | 2 +-
+ sound/soc/codecs/rt715-sdca-sdw.c | 2 +-
+ sound/soc/codecs/wcd938x-sdw.c | 2 +-
+ sound/soc/codecs/wsa881x.c | 2 +-
+ sound/soc/codecs/wsa883x.c | 2 +-
+ 7 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
+index ed0a114363621..154b6179b6dcd 100644
+--- a/sound/soc/codecs/rt1316-sdw.c
++++ b/sound/soc/codecs/rt1316-sdw.c
+@@ -585,7 +585,7 @@ static int rt1316_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
+ * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
+ * port_prep are not defined for now
+ */
+-static struct sdw_slave_ops rt1316_slave_ops = {
++static const struct sdw_slave_ops rt1316_slave_ops = {
+ .read_prop = rt1316_read_prop,
+ .update_status = rt1316_update_status,
+ };
+diff --git a/sound/soc/codecs/rt1318-sdw.c b/sound/soc/codecs/rt1318-sdw.c
+index f85f5ab2c6d04..c6ec86e97a6e7 100644
+--- a/sound/soc/codecs/rt1318-sdw.c
++++ b/sound/soc/codecs/rt1318-sdw.c
+@@ -697,7 +697,7 @@ static int rt1318_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
+ * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
+ * port_prep are not defined for now
+ */
+-static struct sdw_slave_ops rt1318_slave_ops = {
++static const struct sdw_slave_ops rt1318_slave_ops = {
+ .read_prop = rt1318_read_prop,
+ .update_status = rt1318_update_status,
+ };
+diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
+index 88a8392a58edb..e23cec4c457de 100644
+--- a/sound/soc/codecs/rt711-sdca-sdw.c
++++ b/sound/soc/codecs/rt711-sdca-sdw.c
+@@ -338,7 +338,7 @@ static int rt711_sdca_interrupt_callback(struct sdw_slave *slave,
+ return ret;
+ }
+
+-static struct sdw_slave_ops rt711_sdca_slave_ops = {
++static const struct sdw_slave_ops rt711_sdca_slave_ops = {
+ .read_prop = rt711_sdca_read_prop,
+ .interrupt_callback = rt711_sdca_interrupt_callback,
+ .update_status = rt711_sdca_update_status,
+diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
+index c54ecf3e69879..38a82e4e2f952 100644
+--- a/sound/soc/codecs/rt715-sdca-sdw.c
++++ b/sound/soc/codecs/rt715-sdca-sdw.c
+@@ -172,7 +172,7 @@ static int rt715_sdca_read_prop(struct sdw_slave *slave)
+ return 0;
+ }
+
+-static struct sdw_slave_ops rt715_sdca_slave_ops = {
++static const struct sdw_slave_ops rt715_sdca_slave_ops = {
+ .read_prop = rt715_sdca_read_prop,
+ .update_status = rt715_sdca_update_status,
+ };
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 1bf3c06a2b622..33d1b5ffeaeba 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -191,7 +191,7 @@ static int wcd9380_interrupt_callback(struct sdw_slave *slave,
+ return IRQ_HANDLED;
+ }
+
+-static struct sdw_slave_ops wcd9380_slave_ops = {
++static const struct sdw_slave_ops wcd9380_slave_ops = {
+ .update_status = wcd9380_update_status,
+ .interrupt_callback = wcd9380_interrupt_callback,
+ .bus_config = wcd9380_bus_config,
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 6c8b1db649b89..046843b57b038 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -1101,7 +1101,7 @@ static int wsa881x_bus_config(struct sdw_slave *slave,
+ return 0;
+ }
+
+-static struct sdw_slave_ops wsa881x_slave_ops = {
++static const struct sdw_slave_ops wsa881x_slave_ops = {
+ .update_status = wsa881x_update_status,
+ .bus_config = wsa881x_bus_config,
+ .port_prep = wsa881x_port_prep,
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 2533d0973529f..6e9a64c5948e2 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1073,7 +1073,7 @@ static int wsa883x_port_prep(struct sdw_slave *slave,
+ return 0;
+ }
+
+-static struct sdw_slave_ops wsa883x_slave_ops = {
++static const struct sdw_slave_ops wsa883x_slave_ops = {
+ .update_status = wsa883x_update_status,
+ .port_prep = wsa883x_port_prep,
+ };
+--
+2.39.2
+
--- /dev/null
+From 265f4d27b7e7a3ec3ff48f2707f3521a2a816c60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 16:41:02 +0200
+Subject: ASoC: codecs: wcd938x: fix accessing regmap on unattached devices
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit 84822215acd15bd86a7759a835271e63bba83a7b ]
+
+The WCD938x comes with three devices on two Linux drivers:
+1. RX Soundwire device (wcd938x-sdw.c driver),
+2. TX Soundwire device, which is used to access devices via regmap (also
+ wcd938x-sdw.c driver),
+3. platform device (wcd938x.c driver) - glue and component master,
+ actually having most of the code using TX Soundwire device regmap.
+
+When RX and TX Soundwire devices probe, the component master (platform
+device) bind tries to write micbias configuration via TX Soundwire
+regmap. This might happen before TX Soundwire enumerates, so the regmap
+access fails. On Qualcomm SM8550 board with WCD9385:
+
+ qcom-soundwire 6d30000.soundwire-controller: Qualcomm Soundwire controller v2.0.0 Registered
+ wcd938x_codec audio-codec: bound sdw:0:0217:010d:00:4 (ops wcd938x_sdw_component_ops)
+ wcd938x_codec audio-codec: bound sdw:0:0217:010d:00:3 (ops wcd938x_sdw_component_ops)
+ qcom-soundwire 6ad0000.soundwire-controller: swrm_wait_for_wr_fifo_avail err write overflow
+
+Fix the issue by:
+1. Moving the regmap creation from platform device to TX Soundwire
+ device. The regmap settings are moved as-is with one difference:
+ making the wcd938x_regmap_config const.
+2. Using regmap in cache only mode till the actual TX Soundwire device
+ enumerates and then sync the regmap cache.
+
+Cc: <stable@vger.kernel.org> # v3.14+
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Message-Id: <20230503144102.242240-1-krzysztof.kozlowski@linaro.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wcd938x-sdw.c | 1037 +++++++++++++++++++++++++++++++-
+ sound/soc/codecs/wcd938x.c | 1003 +-----------------------------
+ sound/soc/codecs/wcd938x.h | 1 +
+ 3 files changed, 1030 insertions(+), 1011 deletions(-)
+
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 33d1b5ffeaeba..402286dfaea44 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -161,6 +161,14 @@ EXPORT_SYMBOL_GPL(wcd938x_sdw_set_sdw_stream);
+ static int wcd9380_update_status(struct sdw_slave *slave,
+ enum sdw_slave_status status)
+ {
++ struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
++
++ if (wcd->regmap && (status == SDW_SLAVE_ATTACHED)) {
++ /* Write out any cached changes that happened between probe and attach */
++ regcache_cache_only(wcd->regmap, false);
++ return regcache_sync(wcd->regmap);
++ }
++
+ return 0;
+ }
+
+@@ -177,20 +185,1014 @@ static int wcd9380_interrupt_callback(struct sdw_slave *slave,
+ {
+ struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
+ struct irq_domain *slave_irq = wcd->slave_irq;
+- struct regmap *regmap = dev_get_regmap(&slave->dev, NULL);
+ u32 sts1, sts2, sts3;
+
+ do {
+ handle_nested_irq(irq_find_mapping(slave_irq, 0));
+- regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
+- regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
+- regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
++ regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
++ regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
++ regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
+
+ } while (sts1 || sts2 || sts3);
+
+ return IRQ_HANDLED;
+ }
+
++static const struct reg_default wcd938x_defaults[] = {
++ {WCD938X_ANA_PAGE_REGISTER, 0x00},
++ {WCD938X_ANA_BIAS, 0x00},
++ {WCD938X_ANA_RX_SUPPLIES, 0x00},
++ {WCD938X_ANA_HPH, 0x0C},
++ {WCD938X_ANA_EAR, 0x00},
++ {WCD938X_ANA_EAR_COMPANDER_CTL, 0x02},
++ {WCD938X_ANA_TX_CH1, 0x20},
++ {WCD938X_ANA_TX_CH2, 0x00},
++ {WCD938X_ANA_TX_CH3, 0x20},
++ {WCD938X_ANA_TX_CH4, 0x00},
++ {WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC, 0x00},
++ {WCD938X_ANA_MICB3_DSP_EN_LOGIC, 0x00},
++ {WCD938X_ANA_MBHC_MECH, 0x39},
++ {WCD938X_ANA_MBHC_ELECT, 0x08},
++ {WCD938X_ANA_MBHC_ZDET, 0x00},
++ {WCD938X_ANA_MBHC_RESULT_1, 0x00},
++ {WCD938X_ANA_MBHC_RESULT_2, 0x00},
++ {WCD938X_ANA_MBHC_RESULT_3, 0x00},
++ {WCD938X_ANA_MBHC_BTN0, 0x00},
++ {WCD938X_ANA_MBHC_BTN1, 0x10},
++ {WCD938X_ANA_MBHC_BTN2, 0x20},
++ {WCD938X_ANA_MBHC_BTN3, 0x30},
++ {WCD938X_ANA_MBHC_BTN4, 0x40},
++ {WCD938X_ANA_MBHC_BTN5, 0x50},
++ {WCD938X_ANA_MBHC_BTN6, 0x60},
++ {WCD938X_ANA_MBHC_BTN7, 0x70},
++ {WCD938X_ANA_MICB1, 0x10},
++ {WCD938X_ANA_MICB2, 0x10},
++ {WCD938X_ANA_MICB2_RAMP, 0x00},
++ {WCD938X_ANA_MICB3, 0x10},
++ {WCD938X_ANA_MICB4, 0x10},
++ {WCD938X_BIAS_CTL, 0x2A},
++ {WCD938X_BIAS_VBG_FINE_ADJ, 0x55},
++ {WCD938X_LDOL_VDDCX_ADJUST, 0x01},
++ {WCD938X_LDOL_DISABLE_LDOL, 0x00},
++ {WCD938X_MBHC_CTL_CLK, 0x00},
++ {WCD938X_MBHC_CTL_ANA, 0x00},
++ {WCD938X_MBHC_CTL_SPARE_1, 0x00},
++ {WCD938X_MBHC_CTL_SPARE_2, 0x00},
++ {WCD938X_MBHC_CTL_BCS, 0x00},
++ {WCD938X_MBHC_MOISTURE_DET_FSM_STATUS, 0x00},
++ {WCD938X_MBHC_TEST_CTL, 0x00},
++ {WCD938X_LDOH_MODE, 0x2B},
++ {WCD938X_LDOH_BIAS, 0x68},
++ {WCD938X_LDOH_STB_LOADS, 0x00},
++ {WCD938X_LDOH_SLOWRAMP, 0x50},
++ {WCD938X_MICB1_TEST_CTL_1, 0x1A},
++ {WCD938X_MICB1_TEST_CTL_2, 0x00},
++ {WCD938X_MICB1_TEST_CTL_3, 0xA4},
++ {WCD938X_MICB2_TEST_CTL_1, 0x1A},
++ {WCD938X_MICB2_TEST_CTL_2, 0x00},
++ {WCD938X_MICB2_TEST_CTL_3, 0x24},
++ {WCD938X_MICB3_TEST_CTL_1, 0x1A},
++ {WCD938X_MICB3_TEST_CTL_2, 0x00},
++ {WCD938X_MICB3_TEST_CTL_3, 0xA4},
++ {WCD938X_MICB4_TEST_CTL_1, 0x1A},
++ {WCD938X_MICB4_TEST_CTL_2, 0x00},
++ {WCD938X_MICB4_TEST_CTL_3, 0xA4},
++ {WCD938X_TX_COM_ADC_VCM, 0x39},
++ {WCD938X_TX_COM_BIAS_ATEST, 0xE0},
++ {WCD938X_TX_COM_SPARE1, 0x00},
++ {WCD938X_TX_COM_SPARE2, 0x00},
++ {WCD938X_TX_COM_TXFE_DIV_CTL, 0x22},
++ {WCD938X_TX_COM_TXFE_DIV_START, 0x00},
++ {WCD938X_TX_COM_SPARE3, 0x00},
++ {WCD938X_TX_COM_SPARE4, 0x00},
++ {WCD938X_TX_1_2_TEST_EN, 0xCC},
++ {WCD938X_TX_1_2_ADC_IB, 0xE9},
++ {WCD938X_TX_1_2_ATEST_REFCTL, 0x0A},
++ {WCD938X_TX_1_2_TEST_CTL, 0x38},
++ {WCD938X_TX_1_2_TEST_BLK_EN1, 0xFF},
++ {WCD938X_TX_1_2_TXFE1_CLKDIV, 0x00},
++ {WCD938X_TX_1_2_SAR2_ERR, 0x00},
++ {WCD938X_TX_1_2_SAR1_ERR, 0x00},
++ {WCD938X_TX_3_4_TEST_EN, 0xCC},
++ {WCD938X_TX_3_4_ADC_IB, 0xE9},
++ {WCD938X_TX_3_4_ATEST_REFCTL, 0x0A},
++ {WCD938X_TX_3_4_TEST_CTL, 0x38},
++ {WCD938X_TX_3_4_TEST_BLK_EN3, 0xFF},
++ {WCD938X_TX_3_4_TXFE3_CLKDIV, 0x00},
++ {WCD938X_TX_3_4_SAR4_ERR, 0x00},
++ {WCD938X_TX_3_4_SAR3_ERR, 0x00},
++ {WCD938X_TX_3_4_TEST_BLK_EN2, 0xFB},
++ {WCD938X_TX_3_4_TXFE2_CLKDIV, 0x00},
++ {WCD938X_TX_3_4_SPARE1, 0x00},
++ {WCD938X_TX_3_4_TEST_BLK_EN4, 0xFB},
++ {WCD938X_TX_3_4_TXFE4_CLKDIV, 0x00},
++ {WCD938X_TX_3_4_SPARE2, 0x00},
++ {WCD938X_CLASSH_MODE_1, 0x40},
++ {WCD938X_CLASSH_MODE_2, 0x3A},
++ {WCD938X_CLASSH_MODE_3, 0x00},
++ {WCD938X_CLASSH_CTRL_VCL_1, 0x70},
++ {WCD938X_CLASSH_CTRL_VCL_2, 0x82},
++ {WCD938X_CLASSH_CTRL_CCL_1, 0x31},
++ {WCD938X_CLASSH_CTRL_CCL_2, 0x80},
++ {WCD938X_CLASSH_CTRL_CCL_3, 0x80},
++ {WCD938X_CLASSH_CTRL_CCL_4, 0x51},
++ {WCD938X_CLASSH_CTRL_CCL_5, 0x00},
++ {WCD938X_CLASSH_BUCK_TMUX_A_D, 0x00},
++ {WCD938X_CLASSH_BUCK_SW_DRV_CNTL, 0x77},
++ {WCD938X_CLASSH_SPARE, 0x00},
++ {WCD938X_FLYBACK_EN, 0x4E},
++ {WCD938X_FLYBACK_VNEG_CTRL_1, 0x0B},
++ {WCD938X_FLYBACK_VNEG_CTRL_2, 0x45},
++ {WCD938X_FLYBACK_VNEG_CTRL_3, 0x74},
++ {WCD938X_FLYBACK_VNEG_CTRL_4, 0x7F},
++ {WCD938X_FLYBACK_VNEG_CTRL_5, 0x83},
++ {WCD938X_FLYBACK_VNEG_CTRL_6, 0x98},
++ {WCD938X_FLYBACK_VNEG_CTRL_7, 0xA9},
++ {WCD938X_FLYBACK_VNEG_CTRL_8, 0x68},
++ {WCD938X_FLYBACK_VNEG_CTRL_9, 0x64},
++ {WCD938X_FLYBACK_VNEGDAC_CTRL_1, 0xED},
++ {WCD938X_FLYBACK_VNEGDAC_CTRL_2, 0xF0},
++ {WCD938X_FLYBACK_VNEGDAC_CTRL_3, 0xA6},
++ {WCD938X_FLYBACK_CTRL_1, 0x65},
++ {WCD938X_FLYBACK_TEST_CTL, 0x00},
++ {WCD938X_RX_AUX_SW_CTL, 0x00},
++ {WCD938X_RX_PA_AUX_IN_CONN, 0x01},
++ {WCD938X_RX_TIMER_DIV, 0x32},
++ {WCD938X_RX_OCP_CTL, 0x1F},
++ {WCD938X_RX_OCP_COUNT, 0x77},
++ {WCD938X_RX_BIAS_EAR_DAC, 0xA0},
++ {WCD938X_RX_BIAS_EAR_AMP, 0xAA},
++ {WCD938X_RX_BIAS_HPH_LDO, 0xA9},
++ {WCD938X_RX_BIAS_HPH_PA, 0xAA},
++ {WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2, 0x8A},
++ {WCD938X_RX_BIAS_HPH_RDAC_LDO, 0x88},
++ {WCD938X_RX_BIAS_HPH_CNP1, 0x82},
++ {WCD938X_RX_BIAS_HPH_LOWPOWER, 0x82},
++ {WCD938X_RX_BIAS_AUX_DAC, 0xA0},
++ {WCD938X_RX_BIAS_AUX_AMP, 0xAA},
++ {WCD938X_RX_BIAS_VNEGDAC_BLEEDER, 0x50},
++ {WCD938X_RX_BIAS_MISC, 0x00},
++ {WCD938X_RX_BIAS_BUCK_RST, 0x08},
++ {WCD938X_RX_BIAS_BUCK_VREF_ERRAMP, 0x44},
++ {WCD938X_RX_BIAS_FLYB_ERRAMP, 0x40},
++ {WCD938X_RX_BIAS_FLYB_BUFF, 0xAA},
++ {WCD938X_RX_BIAS_FLYB_MID_RST, 0x14},
++ {WCD938X_HPH_L_STATUS, 0x04},
++ {WCD938X_HPH_R_STATUS, 0x04},
++ {WCD938X_HPH_CNP_EN, 0x80},
++ {WCD938X_HPH_CNP_WG_CTL, 0x9A},
++ {WCD938X_HPH_CNP_WG_TIME, 0x14},
++ {WCD938X_HPH_OCP_CTL, 0x28},
++ {WCD938X_HPH_AUTO_CHOP, 0x16},
++ {WCD938X_HPH_CHOP_CTL, 0x83},
++ {WCD938X_HPH_PA_CTL1, 0x46},
++ {WCD938X_HPH_PA_CTL2, 0x50},
++ {WCD938X_HPH_L_EN, 0x80},
++ {WCD938X_HPH_L_TEST, 0xE0},
++ {WCD938X_HPH_L_ATEST, 0x50},
++ {WCD938X_HPH_R_EN, 0x80},
++ {WCD938X_HPH_R_TEST, 0xE0},
++ {WCD938X_HPH_R_ATEST, 0x54},
++ {WCD938X_HPH_RDAC_CLK_CTL1, 0x99},
++ {WCD938X_HPH_RDAC_CLK_CTL2, 0x9B},
++ {WCD938X_HPH_RDAC_LDO_CTL, 0x33},
++ {WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL, 0x00},
++ {WCD938X_HPH_REFBUFF_UHQA_CTL, 0x68},
++ {WCD938X_HPH_REFBUFF_LP_CTL, 0x0E},
++ {WCD938X_HPH_L_DAC_CTL, 0x20},
++ {WCD938X_HPH_R_DAC_CTL, 0x20},
++ {WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL, 0x55},
++ {WCD938X_HPH_SURGE_HPHLR_SURGE_EN, 0x19},
++ {WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1, 0xA0},
++ {WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS, 0x00},
++ {WCD938X_EAR_EAR_EN_REG, 0x22},
++ {WCD938X_EAR_EAR_PA_CON, 0x44},
++ {WCD938X_EAR_EAR_SP_CON, 0xDB},
++ {WCD938X_EAR_EAR_DAC_CON, 0x80},
++ {WCD938X_EAR_EAR_CNP_FSM_CON, 0xB2},
++ {WCD938X_EAR_TEST_CTL, 0x00},
++ {WCD938X_EAR_STATUS_REG_1, 0x00},
++ {WCD938X_EAR_STATUS_REG_2, 0x08},
++ {WCD938X_ANA_NEW_PAGE_REGISTER, 0x00},
++ {WCD938X_HPH_NEW_ANA_HPH2, 0x00},
++ {WCD938X_HPH_NEW_ANA_HPH3, 0x00},
++ {WCD938X_SLEEP_CTL, 0x16},
++ {WCD938X_SLEEP_WATCHDOG_CTL, 0x00},
++ {WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL, 0x00},
++ {WCD938X_MBHC_NEW_CTL_1, 0x02},
++ {WCD938X_MBHC_NEW_CTL_2, 0x05},
++ {WCD938X_MBHC_NEW_PLUG_DETECT_CTL, 0xE9},
++ {WCD938X_MBHC_NEW_ZDET_ANA_CTL, 0x0F},
++ {WCD938X_MBHC_NEW_ZDET_RAMP_CTL, 0x00},
++ {WCD938X_MBHC_NEW_FSM_STATUS, 0x00},
++ {WCD938X_MBHC_NEW_ADC_RESULT, 0x00},
++ {WCD938X_TX_NEW_AMIC_MUX_CFG, 0x00},
++ {WCD938X_AUX_AUXPA, 0x00},
++ {WCD938X_LDORXTX_MODE, 0x0C},
++ {WCD938X_LDORXTX_CONFIG, 0x10},
++ {WCD938X_DIE_CRACK_DIE_CRK_DET_EN, 0x00},
++ {WCD938X_DIE_CRACK_DIE_CRK_DET_OUT, 0x00},
++ {WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40},
++ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x81},
++ {WCD938X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10},
++ {WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00},
++ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x81},
++ {WCD938X_HPH_NEW_INT_PA_MISC1, 0x22},
++ {WCD938X_HPH_NEW_INT_PA_MISC2, 0x00},
++ {WCD938X_HPH_NEW_INT_PA_RDAC_MISC, 0x00},
++ {WCD938X_HPH_NEW_INT_HPH_TIMER1, 0xFE},
++ {WCD938X_HPH_NEW_INT_HPH_TIMER2, 0x02},
++ {WCD938X_HPH_NEW_INT_HPH_TIMER3, 0x4E},
++ {WCD938X_HPH_NEW_INT_HPH_TIMER4, 0x54},
++ {WCD938X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00},
++ {WCD938X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00},
++ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW, 0x90},
++ {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW, 0x90},
++ {WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI, 0x62},
++ {WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP, 0x01},
++ {WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP, 0x11},
++ {WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL, 0x57},
++ {WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL, 0x01},
++ {WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT, 0x00},
++ {WCD938X_MBHC_NEW_INT_SPARE_2, 0x00},
++ {WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON, 0xA8},
++ {WCD938X_EAR_INT_NEW_CNP_VCM_CON1, 0x42},
++ {WCD938X_EAR_INT_NEW_CNP_VCM_CON2, 0x22},
++ {WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS, 0x00},
++ {WCD938X_AUX_INT_EN_REG, 0x00},
++ {WCD938X_AUX_INT_PA_CTRL, 0x06},
++ {WCD938X_AUX_INT_SP_CTRL, 0xD2},
++ {WCD938X_AUX_INT_DAC_CTRL, 0x80},
++ {WCD938X_AUX_INT_CLK_CTRL, 0x50},
++ {WCD938X_AUX_INT_TEST_CTRL, 0x00},
++ {WCD938X_AUX_INT_STATUS_REG, 0x00},
++ {WCD938X_AUX_INT_MISC, 0x00},
++ {WCD938X_LDORXTX_INT_BIAS, 0x6E},
++ {WCD938X_LDORXTX_INT_STB_LOADS_DTEST, 0x50},
++ {WCD938X_LDORXTX_INT_TEST0, 0x1C},
++ {WCD938X_LDORXTX_INT_STARTUP_TIMER, 0xFF},
++ {WCD938X_LDORXTX_INT_TEST1, 0x1F},
++ {WCD938X_LDORXTX_INT_STATUS, 0x00},
++ {WCD938X_SLEEP_INT_WATCHDOG_CTL_1, 0x0A},
++ {WCD938X_SLEEP_INT_WATCHDOG_CTL_2, 0x0A},
++ {WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1, 0x02},
++ {WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2, 0x60},
++ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2, 0xFF},
++ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1, 0x7F},
++ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0, 0x3F},
++ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M, 0x1F},
++ {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M, 0x0F},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1, 0xD7},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0, 0xC8},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP, 0xC6},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1, 0xD5},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0, 0xCA},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP, 0x05},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0, 0xA5},
++ {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP, 0x13},
++ {WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1, 0x88},
++ {WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP, 0x42},
++ {WCD938X_TX_COM_NEW_INT_TXADC_INT_L2, 0xFF},
++ {WCD938X_TX_COM_NEW_INT_TXADC_INT_L1, 0x64},
++ {WCD938X_TX_COM_NEW_INT_TXADC_INT_L0, 0x64},
++ {WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP, 0x77},
++ {WCD938X_DIGITAL_PAGE_REGISTER, 0x00},
++ {WCD938X_DIGITAL_CHIP_ID0, 0x00},
++ {WCD938X_DIGITAL_CHIP_ID1, 0x00},
++ {WCD938X_DIGITAL_CHIP_ID2, 0x0D},
++ {WCD938X_DIGITAL_CHIP_ID3, 0x01},
++ {WCD938X_DIGITAL_SWR_TX_CLK_RATE, 0x00},
++ {WCD938X_DIGITAL_CDC_RST_CTL, 0x03},
++ {WCD938X_DIGITAL_TOP_CLK_CFG, 0x00},
++ {WCD938X_DIGITAL_CDC_ANA_CLK_CTL, 0x00},
++ {WCD938X_DIGITAL_CDC_DIG_CLK_CTL, 0xF0},
++ {WCD938X_DIGITAL_SWR_RST_EN, 0x00},
++ {WCD938X_DIGITAL_CDC_PATH_MODE, 0x55},
++ {WCD938X_DIGITAL_CDC_RX_RST, 0x00},
++ {WCD938X_DIGITAL_CDC_RX0_CTL, 0xFC},
++ {WCD938X_DIGITAL_CDC_RX1_CTL, 0xFC},
++ {WCD938X_DIGITAL_CDC_RX2_CTL, 0xFC},
++ {WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1, 0x00},
++ {WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3, 0x00},
++ {WCD938X_DIGITAL_CDC_COMP_CTL_0, 0x00},
++ {WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL, 0x1E},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A1_0, 0x00},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A1_1, 0x01},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A2_0, 0x63},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A2_1, 0x04},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A3_0, 0xAC},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A3_1, 0x04},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A4_0, 0x1A},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A4_1, 0x03},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A5_0, 0xBC},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A5_1, 0x02},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A6_0, 0xC7},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_A7_0, 0xF8},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_C_0, 0x47},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_C_1, 0x43},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_C_2, 0xB1},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_C_3, 0x17},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R1, 0x4D},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R2, 0x29},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R3, 0x34},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R4, 0x59},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R5, 0x66},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R6, 0x87},
++ {WCD938X_DIGITAL_CDC_HPH_DSM_R7, 0x64},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A1_0, 0x00},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A1_1, 0x01},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A2_0, 0x96},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A2_1, 0x09},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A3_0, 0xAB},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A3_1, 0x05},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A4_0, 0x1C},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A4_1, 0x02},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A5_0, 0x17},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A5_1, 0x02},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A6_0, 0xAA},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_A7_0, 0xE3},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_C_0, 0x69},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_C_1, 0x54},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_C_2, 0x02},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_C_3, 0x15},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R1, 0xA4},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R2, 0xB5},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R3, 0x86},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R4, 0x85},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R5, 0xAA},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R6, 0xE2},
++ {WCD938X_DIGITAL_CDC_AUX_DSM_R7, 0x62},
++ {WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0, 0x55},
++ {WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1, 0xA9},
++ {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0, 0x3D},
++ {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1, 0x2E},
++ {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2, 0x01},
++ {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0, 0x00},
++ {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1, 0xFC},
++ {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2, 0x01},
++ {WCD938X_DIGITAL_CDC_HPH_GAIN_CTL, 0x00},
++ {WCD938X_DIGITAL_CDC_AUX_GAIN_CTL, 0x00},
++ {WCD938X_DIGITAL_CDC_EAR_PATH_CTL, 0x00},
++ {WCD938X_DIGITAL_CDC_SWR_CLH, 0x00},
++ {WCD938X_DIGITAL_SWR_CLH_BYP, 0x00},
++ {WCD938X_DIGITAL_CDC_TX0_CTL, 0x68},
++ {WCD938X_DIGITAL_CDC_TX1_CTL, 0x68},
++ {WCD938X_DIGITAL_CDC_TX2_CTL, 0x68},
++ {WCD938X_DIGITAL_CDC_TX_RST, 0x00},
++ {WCD938X_DIGITAL_CDC_REQ_CTL, 0x01},
++ {WCD938X_DIGITAL_CDC_RST, 0x00},
++ {WCD938X_DIGITAL_CDC_AMIC_CTL, 0x0F},
++ {WCD938X_DIGITAL_CDC_DMIC_CTL, 0x04},
++ {WCD938X_DIGITAL_CDC_DMIC1_CTL, 0x01},
++ {WCD938X_DIGITAL_CDC_DMIC2_CTL, 0x01},
++ {WCD938X_DIGITAL_CDC_DMIC3_CTL, 0x01},
++ {WCD938X_DIGITAL_CDC_DMIC4_CTL, 0x01},
++ {WCD938X_DIGITAL_EFUSE_PRG_CTL, 0x00},
++ {WCD938X_DIGITAL_EFUSE_CTL, 0x2B},
++ {WCD938X_DIGITAL_CDC_DMIC_RATE_1_2, 0x11},
++ {WCD938X_DIGITAL_CDC_DMIC_RATE_3_4, 0x11},
++ {WCD938X_DIGITAL_PDM_WD_CTL0, 0x00},
++ {WCD938X_DIGITAL_PDM_WD_CTL1, 0x00},
++ {WCD938X_DIGITAL_PDM_WD_CTL2, 0x00},
++ {WCD938X_DIGITAL_INTR_MODE, 0x00},
++ {WCD938X_DIGITAL_INTR_MASK_0, 0xFF},
++ {WCD938X_DIGITAL_INTR_MASK_1, 0xFF},
++ {WCD938X_DIGITAL_INTR_MASK_2, 0x3F},
++ {WCD938X_DIGITAL_INTR_STATUS_0, 0x00},
++ {WCD938X_DIGITAL_INTR_STATUS_1, 0x00},
++ {WCD938X_DIGITAL_INTR_STATUS_2, 0x00},
++ {WCD938X_DIGITAL_INTR_CLEAR_0, 0x00},
++ {WCD938X_DIGITAL_INTR_CLEAR_1, 0x00},
++ {WCD938X_DIGITAL_INTR_CLEAR_2, 0x00},
++ {WCD938X_DIGITAL_INTR_LEVEL_0, 0x00},
++ {WCD938X_DIGITAL_INTR_LEVEL_1, 0x00},
++ {WCD938X_DIGITAL_INTR_LEVEL_2, 0x00},
++ {WCD938X_DIGITAL_INTR_SET_0, 0x00},
++ {WCD938X_DIGITAL_INTR_SET_1, 0x00},
++ {WCD938X_DIGITAL_INTR_SET_2, 0x00},
++ {WCD938X_DIGITAL_INTR_TEST_0, 0x00},
++ {WCD938X_DIGITAL_INTR_TEST_1, 0x00},
++ {WCD938X_DIGITAL_INTR_TEST_2, 0x00},
++ {WCD938X_DIGITAL_TX_MODE_DBG_EN, 0x00},
++ {WCD938X_DIGITAL_TX_MODE_DBG_0_1, 0x00},
++ {WCD938X_DIGITAL_TX_MODE_DBG_2_3, 0x00},
++ {WCD938X_DIGITAL_LB_IN_SEL_CTL, 0x00},
++ {WCD938X_DIGITAL_LOOP_BACK_MODE, 0x00},
++ {WCD938X_DIGITAL_SWR_DAC_TEST, 0x00},
++ {WCD938X_DIGITAL_SWR_HM_TEST_RX_0, 0x40},
++ {WCD938X_DIGITAL_SWR_HM_TEST_TX_0, 0x40},
++ {WCD938X_DIGITAL_SWR_HM_TEST_RX_1, 0x00},
++ {WCD938X_DIGITAL_SWR_HM_TEST_TX_1, 0x00},
++ {WCD938X_DIGITAL_SWR_HM_TEST_TX_2, 0x00},
++ {WCD938X_DIGITAL_SWR_HM_TEST_0, 0x00},
++ {WCD938X_DIGITAL_SWR_HM_TEST_1, 0x00},
++ {WCD938X_DIGITAL_PAD_CTL_SWR_0, 0x8F},
++ {WCD938X_DIGITAL_PAD_CTL_SWR_1, 0x06},
++ {WCD938X_DIGITAL_I2C_CTL, 0x00},
++ {WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE, 0x00},
++ {WCD938X_DIGITAL_EFUSE_TEST_CTL_0, 0x00},
++ {WCD938X_DIGITAL_EFUSE_TEST_CTL_1, 0x00},
++ {WCD938X_DIGITAL_EFUSE_T_DATA_0, 0x00},
++ {WCD938X_DIGITAL_EFUSE_T_DATA_1, 0x00},
++ {WCD938X_DIGITAL_PAD_CTL_PDM_RX0, 0xF1},
++ {WCD938X_DIGITAL_PAD_CTL_PDM_RX1, 0xF1},
++ {WCD938X_DIGITAL_PAD_CTL_PDM_TX0, 0xF1},
++ {WCD938X_DIGITAL_PAD_CTL_PDM_TX1, 0xF1},
++ {WCD938X_DIGITAL_PAD_CTL_PDM_TX2, 0xF1},
++ {WCD938X_DIGITAL_PAD_INP_DIS_0, 0x00},
++ {WCD938X_DIGITAL_PAD_INP_DIS_1, 0x00},
++ {WCD938X_DIGITAL_DRIVE_STRENGTH_0, 0x00},
++ {WCD938X_DIGITAL_DRIVE_STRENGTH_1, 0x00},
++ {WCD938X_DIGITAL_DRIVE_STRENGTH_2, 0x00},
++ {WCD938X_DIGITAL_RX_DATA_EDGE_CTL, 0x1F},
++ {WCD938X_DIGITAL_TX_DATA_EDGE_CTL, 0x80},
++ {WCD938X_DIGITAL_GPIO_MODE, 0x00},
++ {WCD938X_DIGITAL_PIN_CTL_OE, 0x00},
++ {WCD938X_DIGITAL_PIN_CTL_DATA_0, 0x00},
++ {WCD938X_DIGITAL_PIN_CTL_DATA_1, 0x00},
++ {WCD938X_DIGITAL_PIN_STATUS_0, 0x00},
++ {WCD938X_DIGITAL_PIN_STATUS_1, 0x00},
++ {WCD938X_DIGITAL_DIG_DEBUG_CTL, 0x00},
++ {WCD938X_DIGITAL_DIG_DEBUG_EN, 0x00},
++ {WCD938X_DIGITAL_ANA_CSR_DBG_ADD, 0x00},
++ {WCD938X_DIGITAL_ANA_CSR_DBG_CTL, 0x48},
++ {WCD938X_DIGITAL_SSP_DBG, 0x00},
++ {WCD938X_DIGITAL_MODE_STATUS_0, 0x00},
++ {WCD938X_DIGITAL_MODE_STATUS_1, 0x00},
++ {WCD938X_DIGITAL_SPARE_0, 0x00},
++ {WCD938X_DIGITAL_SPARE_1, 0x00},
++ {WCD938X_DIGITAL_SPARE_2, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_0, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_1, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_2, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_3, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_4, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_5, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_6, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_7, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_8, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_9, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_10, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_11, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_12, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_13, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_14, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_15, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_16, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_17, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_18, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_19, 0xFF},
++ {WCD938X_DIGITAL_EFUSE_REG_20, 0x0E},
++ {WCD938X_DIGITAL_EFUSE_REG_21, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_22, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_23, 0xF8},
++ {WCD938X_DIGITAL_EFUSE_REG_24, 0x16},
++ {WCD938X_DIGITAL_EFUSE_REG_25, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_26, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_27, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_28, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_29, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_30, 0x00},
++ {WCD938X_DIGITAL_EFUSE_REG_31, 0x00},
++ {WCD938X_DIGITAL_TX_REQ_FB_CTL_0, 0x88},
++ {WCD938X_DIGITAL_TX_REQ_FB_CTL_1, 0x88},
++ {WCD938X_DIGITAL_TX_REQ_FB_CTL_2, 0x88},
++ {WCD938X_DIGITAL_TX_REQ_FB_CTL_3, 0x88},
++ {WCD938X_DIGITAL_TX_REQ_FB_CTL_4, 0x88},
++ {WCD938X_DIGITAL_DEM_BYPASS_DATA0, 0x55},
++ {WCD938X_DIGITAL_DEM_BYPASS_DATA1, 0x55},
++ {WCD938X_DIGITAL_DEM_BYPASS_DATA2, 0x55},
++ {WCD938X_DIGITAL_DEM_BYPASS_DATA3, 0x01},
++};
++
++static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case WCD938X_ANA_PAGE_REGISTER:
++ case WCD938X_ANA_BIAS:
++ case WCD938X_ANA_RX_SUPPLIES:
++ case WCD938X_ANA_HPH:
++ case WCD938X_ANA_EAR:
++ case WCD938X_ANA_EAR_COMPANDER_CTL:
++ case WCD938X_ANA_TX_CH1:
++ case WCD938X_ANA_TX_CH2:
++ case WCD938X_ANA_TX_CH3:
++ case WCD938X_ANA_TX_CH4:
++ case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
++ case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
++ case WCD938X_ANA_MBHC_MECH:
++ case WCD938X_ANA_MBHC_ELECT:
++ case WCD938X_ANA_MBHC_ZDET:
++ case WCD938X_ANA_MBHC_BTN0:
++ case WCD938X_ANA_MBHC_BTN1:
++ case WCD938X_ANA_MBHC_BTN2:
++ case WCD938X_ANA_MBHC_BTN3:
++ case WCD938X_ANA_MBHC_BTN4:
++ case WCD938X_ANA_MBHC_BTN5:
++ case WCD938X_ANA_MBHC_BTN6:
++ case WCD938X_ANA_MBHC_BTN7:
++ case WCD938X_ANA_MICB1:
++ case WCD938X_ANA_MICB2:
++ case WCD938X_ANA_MICB2_RAMP:
++ case WCD938X_ANA_MICB3:
++ case WCD938X_ANA_MICB4:
++ case WCD938X_BIAS_CTL:
++ case WCD938X_BIAS_VBG_FINE_ADJ:
++ case WCD938X_LDOL_VDDCX_ADJUST:
++ case WCD938X_LDOL_DISABLE_LDOL:
++ case WCD938X_MBHC_CTL_CLK:
++ case WCD938X_MBHC_CTL_ANA:
++ case WCD938X_MBHC_CTL_SPARE_1:
++ case WCD938X_MBHC_CTL_SPARE_2:
++ case WCD938X_MBHC_CTL_BCS:
++ case WCD938X_MBHC_TEST_CTL:
++ case WCD938X_LDOH_MODE:
++ case WCD938X_LDOH_BIAS:
++ case WCD938X_LDOH_STB_LOADS:
++ case WCD938X_LDOH_SLOWRAMP:
++ case WCD938X_MICB1_TEST_CTL_1:
++ case WCD938X_MICB1_TEST_CTL_2:
++ case WCD938X_MICB1_TEST_CTL_3:
++ case WCD938X_MICB2_TEST_CTL_1:
++ case WCD938X_MICB2_TEST_CTL_2:
++ case WCD938X_MICB2_TEST_CTL_3:
++ case WCD938X_MICB3_TEST_CTL_1:
++ case WCD938X_MICB3_TEST_CTL_2:
++ case WCD938X_MICB3_TEST_CTL_3:
++ case WCD938X_MICB4_TEST_CTL_1:
++ case WCD938X_MICB4_TEST_CTL_2:
++ case WCD938X_MICB4_TEST_CTL_3:
++ case WCD938X_TX_COM_ADC_VCM:
++ case WCD938X_TX_COM_BIAS_ATEST:
++ case WCD938X_TX_COM_SPARE1:
++ case WCD938X_TX_COM_SPARE2:
++ case WCD938X_TX_COM_TXFE_DIV_CTL:
++ case WCD938X_TX_COM_TXFE_DIV_START:
++ case WCD938X_TX_COM_SPARE3:
++ case WCD938X_TX_COM_SPARE4:
++ case WCD938X_TX_1_2_TEST_EN:
++ case WCD938X_TX_1_2_ADC_IB:
++ case WCD938X_TX_1_2_ATEST_REFCTL:
++ case WCD938X_TX_1_2_TEST_CTL:
++ case WCD938X_TX_1_2_TEST_BLK_EN1:
++ case WCD938X_TX_1_2_TXFE1_CLKDIV:
++ case WCD938X_TX_3_4_TEST_EN:
++ case WCD938X_TX_3_4_ADC_IB:
++ case WCD938X_TX_3_4_ATEST_REFCTL:
++ case WCD938X_TX_3_4_TEST_CTL:
++ case WCD938X_TX_3_4_TEST_BLK_EN3:
++ case WCD938X_TX_3_4_TXFE3_CLKDIV:
++ case WCD938X_TX_3_4_TEST_BLK_EN2:
++ case WCD938X_TX_3_4_TXFE2_CLKDIV:
++ case WCD938X_TX_3_4_SPARE1:
++ case WCD938X_TX_3_4_TEST_BLK_EN4:
++ case WCD938X_TX_3_4_TXFE4_CLKDIV:
++ case WCD938X_TX_3_4_SPARE2:
++ case WCD938X_CLASSH_MODE_1:
++ case WCD938X_CLASSH_MODE_2:
++ case WCD938X_CLASSH_MODE_3:
++ case WCD938X_CLASSH_CTRL_VCL_1:
++ case WCD938X_CLASSH_CTRL_VCL_2:
++ case WCD938X_CLASSH_CTRL_CCL_1:
++ case WCD938X_CLASSH_CTRL_CCL_2:
++ case WCD938X_CLASSH_CTRL_CCL_3:
++ case WCD938X_CLASSH_CTRL_CCL_4:
++ case WCD938X_CLASSH_CTRL_CCL_5:
++ case WCD938X_CLASSH_BUCK_TMUX_A_D:
++ case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
++ case WCD938X_CLASSH_SPARE:
++ case WCD938X_FLYBACK_EN:
++ case WCD938X_FLYBACK_VNEG_CTRL_1:
++ case WCD938X_FLYBACK_VNEG_CTRL_2:
++ case WCD938X_FLYBACK_VNEG_CTRL_3:
++ case WCD938X_FLYBACK_VNEG_CTRL_4:
++ case WCD938X_FLYBACK_VNEG_CTRL_5:
++ case WCD938X_FLYBACK_VNEG_CTRL_6:
++ case WCD938X_FLYBACK_VNEG_CTRL_7:
++ case WCD938X_FLYBACK_VNEG_CTRL_8:
++ case WCD938X_FLYBACK_VNEG_CTRL_9:
++ case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
++ case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
++ case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
++ case WCD938X_FLYBACK_CTRL_1:
++ case WCD938X_FLYBACK_TEST_CTL:
++ case WCD938X_RX_AUX_SW_CTL:
++ case WCD938X_RX_PA_AUX_IN_CONN:
++ case WCD938X_RX_TIMER_DIV:
++ case WCD938X_RX_OCP_CTL:
++ case WCD938X_RX_OCP_COUNT:
++ case WCD938X_RX_BIAS_EAR_DAC:
++ case WCD938X_RX_BIAS_EAR_AMP:
++ case WCD938X_RX_BIAS_HPH_LDO:
++ case WCD938X_RX_BIAS_HPH_PA:
++ case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
++ case WCD938X_RX_BIAS_HPH_RDAC_LDO:
++ case WCD938X_RX_BIAS_HPH_CNP1:
++ case WCD938X_RX_BIAS_HPH_LOWPOWER:
++ case WCD938X_RX_BIAS_AUX_DAC:
++ case WCD938X_RX_BIAS_AUX_AMP:
++ case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
++ case WCD938X_RX_BIAS_MISC:
++ case WCD938X_RX_BIAS_BUCK_RST:
++ case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
++ case WCD938X_RX_BIAS_FLYB_ERRAMP:
++ case WCD938X_RX_BIAS_FLYB_BUFF:
++ case WCD938X_RX_BIAS_FLYB_MID_RST:
++ case WCD938X_HPH_CNP_EN:
++ case WCD938X_HPH_CNP_WG_CTL:
++ case WCD938X_HPH_CNP_WG_TIME:
++ case WCD938X_HPH_OCP_CTL:
++ case WCD938X_HPH_AUTO_CHOP:
++ case WCD938X_HPH_CHOP_CTL:
++ case WCD938X_HPH_PA_CTL1:
++ case WCD938X_HPH_PA_CTL2:
++ case WCD938X_HPH_L_EN:
++ case WCD938X_HPH_L_TEST:
++ case WCD938X_HPH_L_ATEST:
++ case WCD938X_HPH_R_EN:
++ case WCD938X_HPH_R_TEST:
++ case WCD938X_HPH_R_ATEST:
++ case WCD938X_HPH_RDAC_CLK_CTL1:
++ case WCD938X_HPH_RDAC_CLK_CTL2:
++ case WCD938X_HPH_RDAC_LDO_CTL:
++ case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
++ case WCD938X_HPH_REFBUFF_UHQA_CTL:
++ case WCD938X_HPH_REFBUFF_LP_CTL:
++ case WCD938X_HPH_L_DAC_CTL:
++ case WCD938X_HPH_R_DAC_CTL:
++ case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
++ case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
++ case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
++ case WCD938X_EAR_EAR_EN_REG:
++ case WCD938X_EAR_EAR_PA_CON:
++ case WCD938X_EAR_EAR_SP_CON:
++ case WCD938X_EAR_EAR_DAC_CON:
++ case WCD938X_EAR_EAR_CNP_FSM_CON:
++ case WCD938X_EAR_TEST_CTL:
++ case WCD938X_ANA_NEW_PAGE_REGISTER:
++ case WCD938X_HPH_NEW_ANA_HPH2:
++ case WCD938X_HPH_NEW_ANA_HPH3:
++ case WCD938X_SLEEP_CTL:
++ case WCD938X_SLEEP_WATCHDOG_CTL:
++ case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
++ case WCD938X_MBHC_NEW_CTL_1:
++ case WCD938X_MBHC_NEW_CTL_2:
++ case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
++ case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
++ case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
++ case WCD938X_TX_NEW_AMIC_MUX_CFG:
++ case WCD938X_AUX_AUXPA:
++ case WCD938X_LDORXTX_MODE:
++ case WCD938X_LDORXTX_CONFIG:
++ case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
++ case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
++ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
++ case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
++ case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
++ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
++ case WCD938X_HPH_NEW_INT_PA_MISC1:
++ case WCD938X_HPH_NEW_INT_PA_MISC2:
++ case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
++ case WCD938X_HPH_NEW_INT_HPH_TIMER1:
++ case WCD938X_HPH_NEW_INT_HPH_TIMER2:
++ case WCD938X_HPH_NEW_INT_HPH_TIMER3:
++ case WCD938X_HPH_NEW_INT_HPH_TIMER4:
++ case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
++ case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
++ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
++ case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
++ case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
++ case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
++ case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
++ case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
++ case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
++ case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
++ case WCD938X_MBHC_NEW_INT_SPARE_2:
++ case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
++ case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
++ case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
++ case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
++ case WCD938X_AUX_INT_EN_REG:
++ case WCD938X_AUX_INT_PA_CTRL:
++ case WCD938X_AUX_INT_SP_CTRL:
++ case WCD938X_AUX_INT_DAC_CTRL:
++ case WCD938X_AUX_INT_CLK_CTRL:
++ case WCD938X_AUX_INT_TEST_CTRL:
++ case WCD938X_AUX_INT_MISC:
++ case WCD938X_LDORXTX_INT_BIAS:
++ case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
++ case WCD938X_LDORXTX_INT_TEST0:
++ case WCD938X_LDORXTX_INT_STARTUP_TIMER:
++ case WCD938X_LDORXTX_INT_TEST1:
++ case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
++ case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
++ case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
++ case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
++ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
++ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
++ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
++ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
++ case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
++ case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
++ case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
++ case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
++ case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
++ case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
++ case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
++ case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
++ case WCD938X_DIGITAL_PAGE_REGISTER:
++ case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
++ case WCD938X_DIGITAL_CDC_RST_CTL:
++ case WCD938X_DIGITAL_TOP_CLK_CFG:
++ case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
++ case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
++ case WCD938X_DIGITAL_SWR_RST_EN:
++ case WCD938X_DIGITAL_CDC_PATH_MODE:
++ case WCD938X_DIGITAL_CDC_RX_RST:
++ case WCD938X_DIGITAL_CDC_RX0_CTL:
++ case WCD938X_DIGITAL_CDC_RX1_CTL:
++ case WCD938X_DIGITAL_CDC_RX2_CTL:
++ case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
++ case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
++ case WCD938X_DIGITAL_CDC_COMP_CTL_0:
++ case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
++ case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
++ case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
++ case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
++ case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
++ case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
++ case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
++ case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
++ case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
++ case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
++ case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
++ case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
++ case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
++ case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
++ case WCD938X_DIGITAL_CDC_SWR_CLH:
++ case WCD938X_DIGITAL_SWR_CLH_BYP:
++ case WCD938X_DIGITAL_CDC_TX0_CTL:
++ case WCD938X_DIGITAL_CDC_TX1_CTL:
++ case WCD938X_DIGITAL_CDC_TX2_CTL:
++ case WCD938X_DIGITAL_CDC_TX_RST:
++ case WCD938X_DIGITAL_CDC_REQ_CTL:
++ case WCD938X_DIGITAL_CDC_RST:
++ case WCD938X_DIGITAL_CDC_AMIC_CTL:
++ case WCD938X_DIGITAL_CDC_DMIC_CTL:
++ case WCD938X_DIGITAL_CDC_DMIC1_CTL:
++ case WCD938X_DIGITAL_CDC_DMIC2_CTL:
++ case WCD938X_DIGITAL_CDC_DMIC3_CTL:
++ case WCD938X_DIGITAL_CDC_DMIC4_CTL:
++ case WCD938X_DIGITAL_EFUSE_PRG_CTL:
++ case WCD938X_DIGITAL_EFUSE_CTL:
++ case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
++ case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
++ case WCD938X_DIGITAL_PDM_WD_CTL0:
++ case WCD938X_DIGITAL_PDM_WD_CTL1:
++ case WCD938X_DIGITAL_PDM_WD_CTL2:
++ case WCD938X_DIGITAL_INTR_MODE:
++ case WCD938X_DIGITAL_INTR_MASK_0:
++ case WCD938X_DIGITAL_INTR_MASK_1:
++ case WCD938X_DIGITAL_INTR_MASK_2:
++ case WCD938X_DIGITAL_INTR_CLEAR_0:
++ case WCD938X_DIGITAL_INTR_CLEAR_1:
++ case WCD938X_DIGITAL_INTR_CLEAR_2:
++ case WCD938X_DIGITAL_INTR_LEVEL_0:
++ case WCD938X_DIGITAL_INTR_LEVEL_1:
++ case WCD938X_DIGITAL_INTR_LEVEL_2:
++ case WCD938X_DIGITAL_INTR_SET_0:
++ case WCD938X_DIGITAL_INTR_SET_1:
++ case WCD938X_DIGITAL_INTR_SET_2:
++ case WCD938X_DIGITAL_INTR_TEST_0:
++ case WCD938X_DIGITAL_INTR_TEST_1:
++ case WCD938X_DIGITAL_INTR_TEST_2:
++ case WCD938X_DIGITAL_TX_MODE_DBG_EN:
++ case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
++ case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
++ case WCD938X_DIGITAL_LB_IN_SEL_CTL:
++ case WCD938X_DIGITAL_LOOP_BACK_MODE:
++ case WCD938X_DIGITAL_SWR_DAC_TEST:
++ case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
++ case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
++ case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
++ case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
++ case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
++ case WCD938X_DIGITAL_PAD_CTL_SWR_0:
++ case WCD938X_DIGITAL_PAD_CTL_SWR_1:
++ case WCD938X_DIGITAL_I2C_CTL:
++ case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
++ case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
++ case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
++ case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
++ case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
++ case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
++ case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
++ case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
++ case WCD938X_DIGITAL_PAD_INP_DIS_0:
++ case WCD938X_DIGITAL_PAD_INP_DIS_1:
++ case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
++ case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
++ case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
++ case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
++ case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
++ case WCD938X_DIGITAL_GPIO_MODE:
++ case WCD938X_DIGITAL_PIN_CTL_OE:
++ case WCD938X_DIGITAL_PIN_CTL_DATA_0:
++ case WCD938X_DIGITAL_PIN_CTL_DATA_1:
++ case WCD938X_DIGITAL_DIG_DEBUG_CTL:
++ case WCD938X_DIGITAL_DIG_DEBUG_EN:
++ case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
++ case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
++ case WCD938X_DIGITAL_SSP_DBG:
++ case WCD938X_DIGITAL_SPARE_0:
++ case WCD938X_DIGITAL_SPARE_1:
++ case WCD938X_DIGITAL_SPARE_2:
++ case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
++ case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
++ case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
++ case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
++ case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
++ case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
++ case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
++ case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
++ case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
++ return true;
++ }
++
++ return false;
++}
++
++static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case WCD938X_ANA_MBHC_RESULT_1:
++ case WCD938X_ANA_MBHC_RESULT_2:
++ case WCD938X_ANA_MBHC_RESULT_3:
++ case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
++ case WCD938X_TX_1_2_SAR2_ERR:
++ case WCD938X_TX_1_2_SAR1_ERR:
++ case WCD938X_TX_3_4_SAR4_ERR:
++ case WCD938X_TX_3_4_SAR3_ERR:
++ case WCD938X_HPH_L_STATUS:
++ case WCD938X_HPH_R_STATUS:
++ case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
++ case WCD938X_EAR_STATUS_REG_1:
++ case WCD938X_EAR_STATUS_REG_2:
++ case WCD938X_MBHC_NEW_FSM_STATUS:
++ case WCD938X_MBHC_NEW_ADC_RESULT:
++ case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
++ case WCD938X_AUX_INT_STATUS_REG:
++ case WCD938X_LDORXTX_INT_STATUS:
++ case WCD938X_DIGITAL_CHIP_ID0:
++ case WCD938X_DIGITAL_CHIP_ID1:
++ case WCD938X_DIGITAL_CHIP_ID2:
++ case WCD938X_DIGITAL_CHIP_ID3:
++ case WCD938X_DIGITAL_INTR_STATUS_0:
++ case WCD938X_DIGITAL_INTR_STATUS_1:
++ case WCD938X_DIGITAL_INTR_STATUS_2:
++ case WCD938X_DIGITAL_INTR_CLEAR_0:
++ case WCD938X_DIGITAL_INTR_CLEAR_1:
++ case WCD938X_DIGITAL_INTR_CLEAR_2:
++ case WCD938X_DIGITAL_SWR_HM_TEST_0:
++ case WCD938X_DIGITAL_SWR_HM_TEST_1:
++ case WCD938X_DIGITAL_EFUSE_T_DATA_0:
++ case WCD938X_DIGITAL_EFUSE_T_DATA_1:
++ case WCD938X_DIGITAL_PIN_STATUS_0:
++ case WCD938X_DIGITAL_PIN_STATUS_1:
++ case WCD938X_DIGITAL_MODE_STATUS_0:
++ case WCD938X_DIGITAL_MODE_STATUS_1:
++ case WCD938X_DIGITAL_EFUSE_REG_0:
++ case WCD938X_DIGITAL_EFUSE_REG_1:
++ case WCD938X_DIGITAL_EFUSE_REG_2:
++ case WCD938X_DIGITAL_EFUSE_REG_3:
++ case WCD938X_DIGITAL_EFUSE_REG_4:
++ case WCD938X_DIGITAL_EFUSE_REG_5:
++ case WCD938X_DIGITAL_EFUSE_REG_6:
++ case WCD938X_DIGITAL_EFUSE_REG_7:
++ case WCD938X_DIGITAL_EFUSE_REG_8:
++ case WCD938X_DIGITAL_EFUSE_REG_9:
++ case WCD938X_DIGITAL_EFUSE_REG_10:
++ case WCD938X_DIGITAL_EFUSE_REG_11:
++ case WCD938X_DIGITAL_EFUSE_REG_12:
++ case WCD938X_DIGITAL_EFUSE_REG_13:
++ case WCD938X_DIGITAL_EFUSE_REG_14:
++ case WCD938X_DIGITAL_EFUSE_REG_15:
++ case WCD938X_DIGITAL_EFUSE_REG_16:
++ case WCD938X_DIGITAL_EFUSE_REG_17:
++ case WCD938X_DIGITAL_EFUSE_REG_18:
++ case WCD938X_DIGITAL_EFUSE_REG_19:
++ case WCD938X_DIGITAL_EFUSE_REG_20:
++ case WCD938X_DIGITAL_EFUSE_REG_21:
++ case WCD938X_DIGITAL_EFUSE_REG_22:
++ case WCD938X_DIGITAL_EFUSE_REG_23:
++ case WCD938X_DIGITAL_EFUSE_REG_24:
++ case WCD938X_DIGITAL_EFUSE_REG_25:
++ case WCD938X_DIGITAL_EFUSE_REG_26:
++ case WCD938X_DIGITAL_EFUSE_REG_27:
++ case WCD938X_DIGITAL_EFUSE_REG_28:
++ case WCD938X_DIGITAL_EFUSE_REG_29:
++ case WCD938X_DIGITAL_EFUSE_REG_30:
++ case WCD938X_DIGITAL_EFUSE_REG_31:
++ return true;
++ }
++ return false;
++}
++
++static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
++{
++ bool ret;
++
++ ret = wcd938x_readonly_register(dev, reg);
++ if (!ret)
++ return wcd938x_rdwr_register(dev, reg);
++
++ return ret;
++}
++
++static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
++{
++ return wcd938x_rdwr_register(dev, reg);
++}
++
++static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
++{
++ if (reg <= WCD938X_BASE_ADDRESS)
++ return false;
++
++ if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
++ return true;
++
++ if (wcd938x_readonly_register(dev, reg))
++ return true;
++
++ return false;
++}
++
++static const struct regmap_config wcd938x_regmap_config = {
++ .name = "wcd938x_csr",
++ .reg_bits = 32,
++ .val_bits = 8,
++ .cache_type = REGCACHE_RBTREE,
++ .reg_defaults = wcd938x_defaults,
++ .num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
++ .max_register = WCD938X_MAX_REGISTER,
++ .readable_reg = wcd938x_readable_register,
++ .writeable_reg = wcd938x_writeable_register,
++ .volatile_reg = wcd938x_volatile_register,
++ .can_multi_write = true,
++};
++
+ static const struct sdw_slave_ops wcd9380_slave_ops = {
+ .update_status = wcd9380_update_status,
+ .interrupt_callback = wcd9380_interrupt_callback,
+@@ -261,6 +1263,16 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ wcd->ch_info = &wcd938x_sdw_rx_ch_info[0];
+ }
+
++ if (wcd->is_tx) {
++ wcd->regmap = devm_regmap_init_sdw(pdev, &wcd938x_regmap_config);
++ if (IS_ERR(wcd->regmap))
++ return dev_err_probe(dev, PTR_ERR(wcd->regmap),
++ "Regmap init failed\n");
++
++ /* Start in cache-only until device is enumerated */
++ regcache_cache_only(wcd->regmap, true);
++ };
++
+ pm_runtime_set_autosuspend_delay(dev, 3000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+@@ -278,22 +1290,23 @@ MODULE_DEVICE_TABLE(sdw, wcd9380_slave_id);
+
+ static int __maybe_unused wcd938x_sdw_runtime_suspend(struct device *dev)
+ {
+- struct regmap *regmap = dev_get_regmap(dev, NULL);
++ struct wcd938x_sdw_priv *wcd = dev_get_drvdata(dev);
+
+- if (regmap) {
+- regcache_cache_only(regmap, true);
+- regcache_mark_dirty(regmap);
++ if (wcd->regmap) {
++ regcache_cache_only(wcd->regmap, true);
++ regcache_mark_dirty(wcd->regmap);
+ }
++
+ return 0;
+ }
+
+ static int __maybe_unused wcd938x_sdw_runtime_resume(struct device *dev)
+ {
+- struct regmap *regmap = dev_get_regmap(dev, NULL);
++ struct wcd938x_sdw_priv *wcd = dev_get_drvdata(dev);
+
+- if (regmap) {
+- regcache_cache_only(regmap, false);
+- regcache_sync(regmap);
++ if (wcd->regmap) {
++ regcache_cache_only(wcd->regmap, false);
++ regcache_sync(wcd->regmap);
+ }
+
+ pm_runtime_mark_last_busy(dev);
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index aca06a4026f3e..1d801a7b1469d 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -273,1001 +273,6 @@ static struct wcd_mbhc_field wcd_mbhc_fields[WCD_MBHC_REG_FUNC_MAX] = {
+ WCD_MBHC_FIELD(WCD_MBHC_ELECT_ISRC_EN, WCD938X_ANA_MBHC_ZDET, 0x02),
+ };
+
+-static const struct reg_default wcd938x_defaults[] = {
+- {WCD938X_ANA_PAGE_REGISTER, 0x00},
+- {WCD938X_ANA_BIAS, 0x00},
+- {WCD938X_ANA_RX_SUPPLIES, 0x00},
+- {WCD938X_ANA_HPH, 0x0C},
+- {WCD938X_ANA_EAR, 0x00},
+- {WCD938X_ANA_EAR_COMPANDER_CTL, 0x02},
+- {WCD938X_ANA_TX_CH1, 0x20},
+- {WCD938X_ANA_TX_CH2, 0x00},
+- {WCD938X_ANA_TX_CH3, 0x20},
+- {WCD938X_ANA_TX_CH4, 0x00},
+- {WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC, 0x00},
+- {WCD938X_ANA_MICB3_DSP_EN_LOGIC, 0x00},
+- {WCD938X_ANA_MBHC_MECH, 0x39},
+- {WCD938X_ANA_MBHC_ELECT, 0x08},
+- {WCD938X_ANA_MBHC_ZDET, 0x00},
+- {WCD938X_ANA_MBHC_RESULT_1, 0x00},
+- {WCD938X_ANA_MBHC_RESULT_2, 0x00},
+- {WCD938X_ANA_MBHC_RESULT_3, 0x00},
+- {WCD938X_ANA_MBHC_BTN0, 0x00},
+- {WCD938X_ANA_MBHC_BTN1, 0x10},
+- {WCD938X_ANA_MBHC_BTN2, 0x20},
+- {WCD938X_ANA_MBHC_BTN3, 0x30},
+- {WCD938X_ANA_MBHC_BTN4, 0x40},
+- {WCD938X_ANA_MBHC_BTN5, 0x50},
+- {WCD938X_ANA_MBHC_BTN6, 0x60},
+- {WCD938X_ANA_MBHC_BTN7, 0x70},
+- {WCD938X_ANA_MICB1, 0x10},
+- {WCD938X_ANA_MICB2, 0x10},
+- {WCD938X_ANA_MICB2_RAMP, 0x00},
+- {WCD938X_ANA_MICB3, 0x10},
+- {WCD938X_ANA_MICB4, 0x10},
+- {WCD938X_BIAS_CTL, 0x2A},
+- {WCD938X_BIAS_VBG_FINE_ADJ, 0x55},
+- {WCD938X_LDOL_VDDCX_ADJUST, 0x01},
+- {WCD938X_LDOL_DISABLE_LDOL, 0x00},
+- {WCD938X_MBHC_CTL_CLK, 0x00},
+- {WCD938X_MBHC_CTL_ANA, 0x00},
+- {WCD938X_MBHC_CTL_SPARE_1, 0x00},
+- {WCD938X_MBHC_CTL_SPARE_2, 0x00},
+- {WCD938X_MBHC_CTL_BCS, 0x00},
+- {WCD938X_MBHC_MOISTURE_DET_FSM_STATUS, 0x00},
+- {WCD938X_MBHC_TEST_CTL, 0x00},
+- {WCD938X_LDOH_MODE, 0x2B},
+- {WCD938X_LDOH_BIAS, 0x68},
+- {WCD938X_LDOH_STB_LOADS, 0x00},
+- {WCD938X_LDOH_SLOWRAMP, 0x50},
+- {WCD938X_MICB1_TEST_CTL_1, 0x1A},
+- {WCD938X_MICB1_TEST_CTL_2, 0x00},
+- {WCD938X_MICB1_TEST_CTL_3, 0xA4},
+- {WCD938X_MICB2_TEST_CTL_1, 0x1A},
+- {WCD938X_MICB2_TEST_CTL_2, 0x00},
+- {WCD938X_MICB2_TEST_CTL_3, 0x24},
+- {WCD938X_MICB3_TEST_CTL_1, 0x1A},
+- {WCD938X_MICB3_TEST_CTL_2, 0x00},
+- {WCD938X_MICB3_TEST_CTL_3, 0xA4},
+- {WCD938X_MICB4_TEST_CTL_1, 0x1A},
+- {WCD938X_MICB4_TEST_CTL_2, 0x00},
+- {WCD938X_MICB4_TEST_CTL_3, 0xA4},
+- {WCD938X_TX_COM_ADC_VCM, 0x39},
+- {WCD938X_TX_COM_BIAS_ATEST, 0xE0},
+- {WCD938X_TX_COM_SPARE1, 0x00},
+- {WCD938X_TX_COM_SPARE2, 0x00},
+- {WCD938X_TX_COM_TXFE_DIV_CTL, 0x22},
+- {WCD938X_TX_COM_TXFE_DIV_START, 0x00},
+- {WCD938X_TX_COM_SPARE3, 0x00},
+- {WCD938X_TX_COM_SPARE4, 0x00},
+- {WCD938X_TX_1_2_TEST_EN, 0xCC},
+- {WCD938X_TX_1_2_ADC_IB, 0xE9},
+- {WCD938X_TX_1_2_ATEST_REFCTL, 0x0A},
+- {WCD938X_TX_1_2_TEST_CTL, 0x38},
+- {WCD938X_TX_1_2_TEST_BLK_EN1, 0xFF},
+- {WCD938X_TX_1_2_TXFE1_CLKDIV, 0x00},
+- {WCD938X_TX_1_2_SAR2_ERR, 0x00},
+- {WCD938X_TX_1_2_SAR1_ERR, 0x00},
+- {WCD938X_TX_3_4_TEST_EN, 0xCC},
+- {WCD938X_TX_3_4_ADC_IB, 0xE9},
+- {WCD938X_TX_3_4_ATEST_REFCTL, 0x0A},
+- {WCD938X_TX_3_4_TEST_CTL, 0x38},
+- {WCD938X_TX_3_4_TEST_BLK_EN3, 0xFF},
+- {WCD938X_TX_3_4_TXFE3_CLKDIV, 0x00},
+- {WCD938X_TX_3_4_SAR4_ERR, 0x00},
+- {WCD938X_TX_3_4_SAR3_ERR, 0x00},
+- {WCD938X_TX_3_4_TEST_BLK_EN2, 0xFB},
+- {WCD938X_TX_3_4_TXFE2_CLKDIV, 0x00},
+- {WCD938X_TX_3_4_SPARE1, 0x00},
+- {WCD938X_TX_3_4_TEST_BLK_EN4, 0xFB},
+- {WCD938X_TX_3_4_TXFE4_CLKDIV, 0x00},
+- {WCD938X_TX_3_4_SPARE2, 0x00},
+- {WCD938X_CLASSH_MODE_1, 0x40},
+- {WCD938X_CLASSH_MODE_2, 0x3A},
+- {WCD938X_CLASSH_MODE_3, 0x00},
+- {WCD938X_CLASSH_CTRL_VCL_1, 0x70},
+- {WCD938X_CLASSH_CTRL_VCL_2, 0x82},
+- {WCD938X_CLASSH_CTRL_CCL_1, 0x31},
+- {WCD938X_CLASSH_CTRL_CCL_2, 0x80},
+- {WCD938X_CLASSH_CTRL_CCL_3, 0x80},
+- {WCD938X_CLASSH_CTRL_CCL_4, 0x51},
+- {WCD938X_CLASSH_CTRL_CCL_5, 0x00},
+- {WCD938X_CLASSH_BUCK_TMUX_A_D, 0x00},
+- {WCD938X_CLASSH_BUCK_SW_DRV_CNTL, 0x77},
+- {WCD938X_CLASSH_SPARE, 0x00},
+- {WCD938X_FLYBACK_EN, 0x4E},
+- {WCD938X_FLYBACK_VNEG_CTRL_1, 0x0B},
+- {WCD938X_FLYBACK_VNEG_CTRL_2, 0x45},
+- {WCD938X_FLYBACK_VNEG_CTRL_3, 0x74},
+- {WCD938X_FLYBACK_VNEG_CTRL_4, 0x7F},
+- {WCD938X_FLYBACK_VNEG_CTRL_5, 0x83},
+- {WCD938X_FLYBACK_VNEG_CTRL_6, 0x98},
+- {WCD938X_FLYBACK_VNEG_CTRL_7, 0xA9},
+- {WCD938X_FLYBACK_VNEG_CTRL_8, 0x68},
+- {WCD938X_FLYBACK_VNEG_CTRL_9, 0x64},
+- {WCD938X_FLYBACK_VNEGDAC_CTRL_1, 0xED},
+- {WCD938X_FLYBACK_VNEGDAC_CTRL_2, 0xF0},
+- {WCD938X_FLYBACK_VNEGDAC_CTRL_3, 0xA6},
+- {WCD938X_FLYBACK_CTRL_1, 0x65},
+- {WCD938X_FLYBACK_TEST_CTL, 0x00},
+- {WCD938X_RX_AUX_SW_CTL, 0x00},
+- {WCD938X_RX_PA_AUX_IN_CONN, 0x01},
+- {WCD938X_RX_TIMER_DIV, 0x32},
+- {WCD938X_RX_OCP_CTL, 0x1F},
+- {WCD938X_RX_OCP_COUNT, 0x77},
+- {WCD938X_RX_BIAS_EAR_DAC, 0xA0},
+- {WCD938X_RX_BIAS_EAR_AMP, 0xAA},
+- {WCD938X_RX_BIAS_HPH_LDO, 0xA9},
+- {WCD938X_RX_BIAS_HPH_PA, 0xAA},
+- {WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2, 0x8A},
+- {WCD938X_RX_BIAS_HPH_RDAC_LDO, 0x88},
+- {WCD938X_RX_BIAS_HPH_CNP1, 0x82},
+- {WCD938X_RX_BIAS_HPH_LOWPOWER, 0x82},
+- {WCD938X_RX_BIAS_AUX_DAC, 0xA0},
+- {WCD938X_RX_BIAS_AUX_AMP, 0xAA},
+- {WCD938X_RX_BIAS_VNEGDAC_BLEEDER, 0x50},
+- {WCD938X_RX_BIAS_MISC, 0x00},
+- {WCD938X_RX_BIAS_BUCK_RST, 0x08},
+- {WCD938X_RX_BIAS_BUCK_VREF_ERRAMP, 0x44},
+- {WCD938X_RX_BIAS_FLYB_ERRAMP, 0x40},
+- {WCD938X_RX_BIAS_FLYB_BUFF, 0xAA},
+- {WCD938X_RX_BIAS_FLYB_MID_RST, 0x14},
+- {WCD938X_HPH_L_STATUS, 0x04},
+- {WCD938X_HPH_R_STATUS, 0x04},
+- {WCD938X_HPH_CNP_EN, 0x80},
+- {WCD938X_HPH_CNP_WG_CTL, 0x9A},
+- {WCD938X_HPH_CNP_WG_TIME, 0x14},
+- {WCD938X_HPH_OCP_CTL, 0x28},
+- {WCD938X_HPH_AUTO_CHOP, 0x16},
+- {WCD938X_HPH_CHOP_CTL, 0x83},
+- {WCD938X_HPH_PA_CTL1, 0x46},
+- {WCD938X_HPH_PA_CTL2, 0x50},
+- {WCD938X_HPH_L_EN, 0x80},
+- {WCD938X_HPH_L_TEST, 0xE0},
+- {WCD938X_HPH_L_ATEST, 0x50},
+- {WCD938X_HPH_R_EN, 0x80},
+- {WCD938X_HPH_R_TEST, 0xE0},
+- {WCD938X_HPH_R_ATEST, 0x54},
+- {WCD938X_HPH_RDAC_CLK_CTL1, 0x99},
+- {WCD938X_HPH_RDAC_CLK_CTL2, 0x9B},
+- {WCD938X_HPH_RDAC_LDO_CTL, 0x33},
+- {WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL, 0x00},
+- {WCD938X_HPH_REFBUFF_UHQA_CTL, 0x68},
+- {WCD938X_HPH_REFBUFF_LP_CTL, 0x0E},
+- {WCD938X_HPH_L_DAC_CTL, 0x20},
+- {WCD938X_HPH_R_DAC_CTL, 0x20},
+- {WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL, 0x55},
+- {WCD938X_HPH_SURGE_HPHLR_SURGE_EN, 0x19},
+- {WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1, 0xA0},
+- {WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS, 0x00},
+- {WCD938X_EAR_EAR_EN_REG, 0x22},
+- {WCD938X_EAR_EAR_PA_CON, 0x44},
+- {WCD938X_EAR_EAR_SP_CON, 0xDB},
+- {WCD938X_EAR_EAR_DAC_CON, 0x80},
+- {WCD938X_EAR_EAR_CNP_FSM_CON, 0xB2},
+- {WCD938X_EAR_TEST_CTL, 0x00},
+- {WCD938X_EAR_STATUS_REG_1, 0x00},
+- {WCD938X_EAR_STATUS_REG_2, 0x08},
+- {WCD938X_ANA_NEW_PAGE_REGISTER, 0x00},
+- {WCD938X_HPH_NEW_ANA_HPH2, 0x00},
+- {WCD938X_HPH_NEW_ANA_HPH3, 0x00},
+- {WCD938X_SLEEP_CTL, 0x16},
+- {WCD938X_SLEEP_WATCHDOG_CTL, 0x00},
+- {WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL, 0x00},
+- {WCD938X_MBHC_NEW_CTL_1, 0x02},
+- {WCD938X_MBHC_NEW_CTL_2, 0x05},
+- {WCD938X_MBHC_NEW_PLUG_DETECT_CTL, 0xE9},
+- {WCD938X_MBHC_NEW_ZDET_ANA_CTL, 0x0F},
+- {WCD938X_MBHC_NEW_ZDET_RAMP_CTL, 0x00},
+- {WCD938X_MBHC_NEW_FSM_STATUS, 0x00},
+- {WCD938X_MBHC_NEW_ADC_RESULT, 0x00},
+- {WCD938X_TX_NEW_AMIC_MUX_CFG, 0x00},
+- {WCD938X_AUX_AUXPA, 0x00},
+- {WCD938X_LDORXTX_MODE, 0x0C},
+- {WCD938X_LDORXTX_CONFIG, 0x10},
+- {WCD938X_DIE_CRACK_DIE_CRK_DET_EN, 0x00},
+- {WCD938X_DIE_CRACK_DIE_CRK_DET_OUT, 0x00},
+- {WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40},
+- {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x81},
+- {WCD938X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10},
+- {WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00},
+- {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x81},
+- {WCD938X_HPH_NEW_INT_PA_MISC1, 0x22},
+- {WCD938X_HPH_NEW_INT_PA_MISC2, 0x00},
+- {WCD938X_HPH_NEW_INT_PA_RDAC_MISC, 0x00},
+- {WCD938X_HPH_NEW_INT_HPH_TIMER1, 0xFE},
+- {WCD938X_HPH_NEW_INT_HPH_TIMER2, 0x02},
+- {WCD938X_HPH_NEW_INT_HPH_TIMER3, 0x4E},
+- {WCD938X_HPH_NEW_INT_HPH_TIMER4, 0x54},
+- {WCD938X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00},
+- {WCD938X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00},
+- {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW, 0x90},
+- {WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW, 0x90},
+- {WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI, 0x62},
+- {WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP, 0x01},
+- {WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP, 0x11},
+- {WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL, 0x57},
+- {WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL, 0x01},
+- {WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT, 0x00},
+- {WCD938X_MBHC_NEW_INT_SPARE_2, 0x00},
+- {WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON, 0xA8},
+- {WCD938X_EAR_INT_NEW_CNP_VCM_CON1, 0x42},
+- {WCD938X_EAR_INT_NEW_CNP_VCM_CON2, 0x22},
+- {WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS, 0x00},
+- {WCD938X_AUX_INT_EN_REG, 0x00},
+- {WCD938X_AUX_INT_PA_CTRL, 0x06},
+- {WCD938X_AUX_INT_SP_CTRL, 0xD2},
+- {WCD938X_AUX_INT_DAC_CTRL, 0x80},
+- {WCD938X_AUX_INT_CLK_CTRL, 0x50},
+- {WCD938X_AUX_INT_TEST_CTRL, 0x00},
+- {WCD938X_AUX_INT_STATUS_REG, 0x00},
+- {WCD938X_AUX_INT_MISC, 0x00},
+- {WCD938X_LDORXTX_INT_BIAS, 0x6E},
+- {WCD938X_LDORXTX_INT_STB_LOADS_DTEST, 0x50},
+- {WCD938X_LDORXTX_INT_TEST0, 0x1C},
+- {WCD938X_LDORXTX_INT_STARTUP_TIMER, 0xFF},
+- {WCD938X_LDORXTX_INT_TEST1, 0x1F},
+- {WCD938X_LDORXTX_INT_STATUS, 0x00},
+- {WCD938X_SLEEP_INT_WATCHDOG_CTL_1, 0x0A},
+- {WCD938X_SLEEP_INT_WATCHDOG_CTL_2, 0x0A},
+- {WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1, 0x02},
+- {WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2, 0x60},
+- {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2, 0xFF},
+- {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1, 0x7F},
+- {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0, 0x3F},
+- {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M, 0x1F},
+- {WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M, 0x0F},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1, 0xD7},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0, 0xC8},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP, 0xC6},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1, 0xD5},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0, 0xCA},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP, 0x05},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0, 0xA5},
+- {WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP, 0x13},
+- {WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1, 0x88},
+- {WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP, 0x42},
+- {WCD938X_TX_COM_NEW_INT_TXADC_INT_L2, 0xFF},
+- {WCD938X_TX_COM_NEW_INT_TXADC_INT_L1, 0x64},
+- {WCD938X_TX_COM_NEW_INT_TXADC_INT_L0, 0x64},
+- {WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP, 0x77},
+- {WCD938X_DIGITAL_PAGE_REGISTER, 0x00},
+- {WCD938X_DIGITAL_CHIP_ID0, 0x00},
+- {WCD938X_DIGITAL_CHIP_ID1, 0x00},
+- {WCD938X_DIGITAL_CHIP_ID2, 0x0D},
+- {WCD938X_DIGITAL_CHIP_ID3, 0x01},
+- {WCD938X_DIGITAL_SWR_TX_CLK_RATE, 0x00},
+- {WCD938X_DIGITAL_CDC_RST_CTL, 0x03},
+- {WCD938X_DIGITAL_TOP_CLK_CFG, 0x00},
+- {WCD938X_DIGITAL_CDC_ANA_CLK_CTL, 0x00},
+- {WCD938X_DIGITAL_CDC_DIG_CLK_CTL, 0xF0},
+- {WCD938X_DIGITAL_SWR_RST_EN, 0x00},
+- {WCD938X_DIGITAL_CDC_PATH_MODE, 0x55},
+- {WCD938X_DIGITAL_CDC_RX_RST, 0x00},
+- {WCD938X_DIGITAL_CDC_RX0_CTL, 0xFC},
+- {WCD938X_DIGITAL_CDC_RX1_CTL, 0xFC},
+- {WCD938X_DIGITAL_CDC_RX2_CTL, 0xFC},
+- {WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1, 0x00},
+- {WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3, 0x00},
+- {WCD938X_DIGITAL_CDC_COMP_CTL_0, 0x00},
+- {WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL, 0x1E},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A1_0, 0x00},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A1_1, 0x01},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A2_0, 0x63},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A2_1, 0x04},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A3_0, 0xAC},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A3_1, 0x04},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A4_0, 0x1A},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A4_1, 0x03},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A5_0, 0xBC},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A5_1, 0x02},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A6_0, 0xC7},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_A7_0, 0xF8},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_C_0, 0x47},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_C_1, 0x43},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_C_2, 0xB1},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_C_3, 0x17},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R1, 0x4D},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R2, 0x29},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R3, 0x34},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R4, 0x59},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R5, 0x66},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R6, 0x87},
+- {WCD938X_DIGITAL_CDC_HPH_DSM_R7, 0x64},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A1_0, 0x00},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A1_1, 0x01},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A2_0, 0x96},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A2_1, 0x09},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A3_0, 0xAB},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A3_1, 0x05},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A4_0, 0x1C},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A4_1, 0x02},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A5_0, 0x17},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A5_1, 0x02},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A6_0, 0xAA},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_A7_0, 0xE3},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_C_0, 0x69},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_C_1, 0x54},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_C_2, 0x02},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_C_3, 0x15},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R1, 0xA4},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R2, 0xB5},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R3, 0x86},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R4, 0x85},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R5, 0xAA},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R6, 0xE2},
+- {WCD938X_DIGITAL_CDC_AUX_DSM_R7, 0x62},
+- {WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0, 0x55},
+- {WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1, 0xA9},
+- {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0, 0x3D},
+- {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1, 0x2E},
+- {WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2, 0x01},
+- {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0, 0x00},
+- {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1, 0xFC},
+- {WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2, 0x01},
+- {WCD938X_DIGITAL_CDC_HPH_GAIN_CTL, 0x00},
+- {WCD938X_DIGITAL_CDC_AUX_GAIN_CTL, 0x00},
+- {WCD938X_DIGITAL_CDC_EAR_PATH_CTL, 0x00},
+- {WCD938X_DIGITAL_CDC_SWR_CLH, 0x00},
+- {WCD938X_DIGITAL_SWR_CLH_BYP, 0x00},
+- {WCD938X_DIGITAL_CDC_TX0_CTL, 0x68},
+- {WCD938X_DIGITAL_CDC_TX1_CTL, 0x68},
+- {WCD938X_DIGITAL_CDC_TX2_CTL, 0x68},
+- {WCD938X_DIGITAL_CDC_TX_RST, 0x00},
+- {WCD938X_DIGITAL_CDC_REQ_CTL, 0x01},
+- {WCD938X_DIGITAL_CDC_RST, 0x00},
+- {WCD938X_DIGITAL_CDC_AMIC_CTL, 0x0F},
+- {WCD938X_DIGITAL_CDC_DMIC_CTL, 0x04},
+- {WCD938X_DIGITAL_CDC_DMIC1_CTL, 0x01},
+- {WCD938X_DIGITAL_CDC_DMIC2_CTL, 0x01},
+- {WCD938X_DIGITAL_CDC_DMIC3_CTL, 0x01},
+- {WCD938X_DIGITAL_CDC_DMIC4_CTL, 0x01},
+- {WCD938X_DIGITAL_EFUSE_PRG_CTL, 0x00},
+- {WCD938X_DIGITAL_EFUSE_CTL, 0x2B},
+- {WCD938X_DIGITAL_CDC_DMIC_RATE_1_2, 0x11},
+- {WCD938X_DIGITAL_CDC_DMIC_RATE_3_4, 0x11},
+- {WCD938X_DIGITAL_PDM_WD_CTL0, 0x00},
+- {WCD938X_DIGITAL_PDM_WD_CTL1, 0x00},
+- {WCD938X_DIGITAL_PDM_WD_CTL2, 0x00},
+- {WCD938X_DIGITAL_INTR_MODE, 0x00},
+- {WCD938X_DIGITAL_INTR_MASK_0, 0xFF},
+- {WCD938X_DIGITAL_INTR_MASK_1, 0xFF},
+- {WCD938X_DIGITAL_INTR_MASK_2, 0x3F},
+- {WCD938X_DIGITAL_INTR_STATUS_0, 0x00},
+- {WCD938X_DIGITAL_INTR_STATUS_1, 0x00},
+- {WCD938X_DIGITAL_INTR_STATUS_2, 0x00},
+- {WCD938X_DIGITAL_INTR_CLEAR_0, 0x00},
+- {WCD938X_DIGITAL_INTR_CLEAR_1, 0x00},
+- {WCD938X_DIGITAL_INTR_CLEAR_2, 0x00},
+- {WCD938X_DIGITAL_INTR_LEVEL_0, 0x00},
+- {WCD938X_DIGITAL_INTR_LEVEL_1, 0x00},
+- {WCD938X_DIGITAL_INTR_LEVEL_2, 0x00},
+- {WCD938X_DIGITAL_INTR_SET_0, 0x00},
+- {WCD938X_DIGITAL_INTR_SET_1, 0x00},
+- {WCD938X_DIGITAL_INTR_SET_2, 0x00},
+- {WCD938X_DIGITAL_INTR_TEST_0, 0x00},
+- {WCD938X_DIGITAL_INTR_TEST_1, 0x00},
+- {WCD938X_DIGITAL_INTR_TEST_2, 0x00},
+- {WCD938X_DIGITAL_TX_MODE_DBG_EN, 0x00},
+- {WCD938X_DIGITAL_TX_MODE_DBG_0_1, 0x00},
+- {WCD938X_DIGITAL_TX_MODE_DBG_2_3, 0x00},
+- {WCD938X_DIGITAL_LB_IN_SEL_CTL, 0x00},
+- {WCD938X_DIGITAL_LOOP_BACK_MODE, 0x00},
+- {WCD938X_DIGITAL_SWR_DAC_TEST, 0x00},
+- {WCD938X_DIGITAL_SWR_HM_TEST_RX_0, 0x40},
+- {WCD938X_DIGITAL_SWR_HM_TEST_TX_0, 0x40},
+- {WCD938X_DIGITAL_SWR_HM_TEST_RX_1, 0x00},
+- {WCD938X_DIGITAL_SWR_HM_TEST_TX_1, 0x00},
+- {WCD938X_DIGITAL_SWR_HM_TEST_TX_2, 0x00},
+- {WCD938X_DIGITAL_SWR_HM_TEST_0, 0x00},
+- {WCD938X_DIGITAL_SWR_HM_TEST_1, 0x00},
+- {WCD938X_DIGITAL_PAD_CTL_SWR_0, 0x8F},
+- {WCD938X_DIGITAL_PAD_CTL_SWR_1, 0x06},
+- {WCD938X_DIGITAL_I2C_CTL, 0x00},
+- {WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE, 0x00},
+- {WCD938X_DIGITAL_EFUSE_TEST_CTL_0, 0x00},
+- {WCD938X_DIGITAL_EFUSE_TEST_CTL_1, 0x00},
+- {WCD938X_DIGITAL_EFUSE_T_DATA_0, 0x00},
+- {WCD938X_DIGITAL_EFUSE_T_DATA_1, 0x00},
+- {WCD938X_DIGITAL_PAD_CTL_PDM_RX0, 0xF1},
+- {WCD938X_DIGITAL_PAD_CTL_PDM_RX1, 0xF1},
+- {WCD938X_DIGITAL_PAD_CTL_PDM_TX0, 0xF1},
+- {WCD938X_DIGITAL_PAD_CTL_PDM_TX1, 0xF1},
+- {WCD938X_DIGITAL_PAD_CTL_PDM_TX2, 0xF1},
+- {WCD938X_DIGITAL_PAD_INP_DIS_0, 0x00},
+- {WCD938X_DIGITAL_PAD_INP_DIS_1, 0x00},
+- {WCD938X_DIGITAL_DRIVE_STRENGTH_0, 0x00},
+- {WCD938X_DIGITAL_DRIVE_STRENGTH_1, 0x00},
+- {WCD938X_DIGITAL_DRIVE_STRENGTH_2, 0x00},
+- {WCD938X_DIGITAL_RX_DATA_EDGE_CTL, 0x1F},
+- {WCD938X_DIGITAL_TX_DATA_EDGE_CTL, 0x80},
+- {WCD938X_DIGITAL_GPIO_MODE, 0x00},
+- {WCD938X_DIGITAL_PIN_CTL_OE, 0x00},
+- {WCD938X_DIGITAL_PIN_CTL_DATA_0, 0x00},
+- {WCD938X_DIGITAL_PIN_CTL_DATA_1, 0x00},
+- {WCD938X_DIGITAL_PIN_STATUS_0, 0x00},
+- {WCD938X_DIGITAL_PIN_STATUS_1, 0x00},
+- {WCD938X_DIGITAL_DIG_DEBUG_CTL, 0x00},
+- {WCD938X_DIGITAL_DIG_DEBUG_EN, 0x00},
+- {WCD938X_DIGITAL_ANA_CSR_DBG_ADD, 0x00},
+- {WCD938X_DIGITAL_ANA_CSR_DBG_CTL, 0x48},
+- {WCD938X_DIGITAL_SSP_DBG, 0x00},
+- {WCD938X_DIGITAL_MODE_STATUS_0, 0x00},
+- {WCD938X_DIGITAL_MODE_STATUS_1, 0x00},
+- {WCD938X_DIGITAL_SPARE_0, 0x00},
+- {WCD938X_DIGITAL_SPARE_1, 0x00},
+- {WCD938X_DIGITAL_SPARE_2, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_0, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_1, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_2, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_3, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_4, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_5, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_6, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_7, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_8, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_9, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_10, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_11, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_12, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_13, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_14, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_15, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_16, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_17, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_18, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_19, 0xFF},
+- {WCD938X_DIGITAL_EFUSE_REG_20, 0x0E},
+- {WCD938X_DIGITAL_EFUSE_REG_21, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_22, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_23, 0xF8},
+- {WCD938X_DIGITAL_EFUSE_REG_24, 0x16},
+- {WCD938X_DIGITAL_EFUSE_REG_25, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_26, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_27, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_28, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_29, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_30, 0x00},
+- {WCD938X_DIGITAL_EFUSE_REG_31, 0x00},
+- {WCD938X_DIGITAL_TX_REQ_FB_CTL_0, 0x88},
+- {WCD938X_DIGITAL_TX_REQ_FB_CTL_1, 0x88},
+- {WCD938X_DIGITAL_TX_REQ_FB_CTL_2, 0x88},
+- {WCD938X_DIGITAL_TX_REQ_FB_CTL_3, 0x88},
+- {WCD938X_DIGITAL_TX_REQ_FB_CTL_4, 0x88},
+- {WCD938X_DIGITAL_DEM_BYPASS_DATA0, 0x55},
+- {WCD938X_DIGITAL_DEM_BYPASS_DATA1, 0x55},
+- {WCD938X_DIGITAL_DEM_BYPASS_DATA2, 0x55},
+- {WCD938X_DIGITAL_DEM_BYPASS_DATA3, 0x01},
+-};
+-
+-static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
+-{
+- switch (reg) {
+- case WCD938X_ANA_PAGE_REGISTER:
+- case WCD938X_ANA_BIAS:
+- case WCD938X_ANA_RX_SUPPLIES:
+- case WCD938X_ANA_HPH:
+- case WCD938X_ANA_EAR:
+- case WCD938X_ANA_EAR_COMPANDER_CTL:
+- case WCD938X_ANA_TX_CH1:
+- case WCD938X_ANA_TX_CH2:
+- case WCD938X_ANA_TX_CH3:
+- case WCD938X_ANA_TX_CH4:
+- case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
+- case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
+- case WCD938X_ANA_MBHC_MECH:
+- case WCD938X_ANA_MBHC_ELECT:
+- case WCD938X_ANA_MBHC_ZDET:
+- case WCD938X_ANA_MBHC_BTN0:
+- case WCD938X_ANA_MBHC_BTN1:
+- case WCD938X_ANA_MBHC_BTN2:
+- case WCD938X_ANA_MBHC_BTN3:
+- case WCD938X_ANA_MBHC_BTN4:
+- case WCD938X_ANA_MBHC_BTN5:
+- case WCD938X_ANA_MBHC_BTN6:
+- case WCD938X_ANA_MBHC_BTN7:
+- case WCD938X_ANA_MICB1:
+- case WCD938X_ANA_MICB2:
+- case WCD938X_ANA_MICB2_RAMP:
+- case WCD938X_ANA_MICB3:
+- case WCD938X_ANA_MICB4:
+- case WCD938X_BIAS_CTL:
+- case WCD938X_BIAS_VBG_FINE_ADJ:
+- case WCD938X_LDOL_VDDCX_ADJUST:
+- case WCD938X_LDOL_DISABLE_LDOL:
+- case WCD938X_MBHC_CTL_CLK:
+- case WCD938X_MBHC_CTL_ANA:
+- case WCD938X_MBHC_CTL_SPARE_1:
+- case WCD938X_MBHC_CTL_SPARE_2:
+- case WCD938X_MBHC_CTL_BCS:
+- case WCD938X_MBHC_TEST_CTL:
+- case WCD938X_LDOH_MODE:
+- case WCD938X_LDOH_BIAS:
+- case WCD938X_LDOH_STB_LOADS:
+- case WCD938X_LDOH_SLOWRAMP:
+- case WCD938X_MICB1_TEST_CTL_1:
+- case WCD938X_MICB1_TEST_CTL_2:
+- case WCD938X_MICB1_TEST_CTL_3:
+- case WCD938X_MICB2_TEST_CTL_1:
+- case WCD938X_MICB2_TEST_CTL_2:
+- case WCD938X_MICB2_TEST_CTL_3:
+- case WCD938X_MICB3_TEST_CTL_1:
+- case WCD938X_MICB3_TEST_CTL_2:
+- case WCD938X_MICB3_TEST_CTL_3:
+- case WCD938X_MICB4_TEST_CTL_1:
+- case WCD938X_MICB4_TEST_CTL_2:
+- case WCD938X_MICB4_TEST_CTL_3:
+- case WCD938X_TX_COM_ADC_VCM:
+- case WCD938X_TX_COM_BIAS_ATEST:
+- case WCD938X_TX_COM_SPARE1:
+- case WCD938X_TX_COM_SPARE2:
+- case WCD938X_TX_COM_TXFE_DIV_CTL:
+- case WCD938X_TX_COM_TXFE_DIV_START:
+- case WCD938X_TX_COM_SPARE3:
+- case WCD938X_TX_COM_SPARE4:
+- case WCD938X_TX_1_2_TEST_EN:
+- case WCD938X_TX_1_2_ADC_IB:
+- case WCD938X_TX_1_2_ATEST_REFCTL:
+- case WCD938X_TX_1_2_TEST_CTL:
+- case WCD938X_TX_1_2_TEST_BLK_EN1:
+- case WCD938X_TX_1_2_TXFE1_CLKDIV:
+- case WCD938X_TX_3_4_TEST_EN:
+- case WCD938X_TX_3_4_ADC_IB:
+- case WCD938X_TX_3_4_ATEST_REFCTL:
+- case WCD938X_TX_3_4_TEST_CTL:
+- case WCD938X_TX_3_4_TEST_BLK_EN3:
+- case WCD938X_TX_3_4_TXFE3_CLKDIV:
+- case WCD938X_TX_3_4_TEST_BLK_EN2:
+- case WCD938X_TX_3_4_TXFE2_CLKDIV:
+- case WCD938X_TX_3_4_SPARE1:
+- case WCD938X_TX_3_4_TEST_BLK_EN4:
+- case WCD938X_TX_3_4_TXFE4_CLKDIV:
+- case WCD938X_TX_3_4_SPARE2:
+- case WCD938X_CLASSH_MODE_1:
+- case WCD938X_CLASSH_MODE_2:
+- case WCD938X_CLASSH_MODE_3:
+- case WCD938X_CLASSH_CTRL_VCL_1:
+- case WCD938X_CLASSH_CTRL_VCL_2:
+- case WCD938X_CLASSH_CTRL_CCL_1:
+- case WCD938X_CLASSH_CTRL_CCL_2:
+- case WCD938X_CLASSH_CTRL_CCL_3:
+- case WCD938X_CLASSH_CTRL_CCL_4:
+- case WCD938X_CLASSH_CTRL_CCL_5:
+- case WCD938X_CLASSH_BUCK_TMUX_A_D:
+- case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
+- case WCD938X_CLASSH_SPARE:
+- case WCD938X_FLYBACK_EN:
+- case WCD938X_FLYBACK_VNEG_CTRL_1:
+- case WCD938X_FLYBACK_VNEG_CTRL_2:
+- case WCD938X_FLYBACK_VNEG_CTRL_3:
+- case WCD938X_FLYBACK_VNEG_CTRL_4:
+- case WCD938X_FLYBACK_VNEG_CTRL_5:
+- case WCD938X_FLYBACK_VNEG_CTRL_6:
+- case WCD938X_FLYBACK_VNEG_CTRL_7:
+- case WCD938X_FLYBACK_VNEG_CTRL_8:
+- case WCD938X_FLYBACK_VNEG_CTRL_9:
+- case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
+- case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
+- case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
+- case WCD938X_FLYBACK_CTRL_1:
+- case WCD938X_FLYBACK_TEST_CTL:
+- case WCD938X_RX_AUX_SW_CTL:
+- case WCD938X_RX_PA_AUX_IN_CONN:
+- case WCD938X_RX_TIMER_DIV:
+- case WCD938X_RX_OCP_CTL:
+- case WCD938X_RX_OCP_COUNT:
+- case WCD938X_RX_BIAS_EAR_DAC:
+- case WCD938X_RX_BIAS_EAR_AMP:
+- case WCD938X_RX_BIAS_HPH_LDO:
+- case WCD938X_RX_BIAS_HPH_PA:
+- case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
+- case WCD938X_RX_BIAS_HPH_RDAC_LDO:
+- case WCD938X_RX_BIAS_HPH_CNP1:
+- case WCD938X_RX_BIAS_HPH_LOWPOWER:
+- case WCD938X_RX_BIAS_AUX_DAC:
+- case WCD938X_RX_BIAS_AUX_AMP:
+- case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
+- case WCD938X_RX_BIAS_MISC:
+- case WCD938X_RX_BIAS_BUCK_RST:
+- case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
+- case WCD938X_RX_BIAS_FLYB_ERRAMP:
+- case WCD938X_RX_BIAS_FLYB_BUFF:
+- case WCD938X_RX_BIAS_FLYB_MID_RST:
+- case WCD938X_HPH_CNP_EN:
+- case WCD938X_HPH_CNP_WG_CTL:
+- case WCD938X_HPH_CNP_WG_TIME:
+- case WCD938X_HPH_OCP_CTL:
+- case WCD938X_HPH_AUTO_CHOP:
+- case WCD938X_HPH_CHOP_CTL:
+- case WCD938X_HPH_PA_CTL1:
+- case WCD938X_HPH_PA_CTL2:
+- case WCD938X_HPH_L_EN:
+- case WCD938X_HPH_L_TEST:
+- case WCD938X_HPH_L_ATEST:
+- case WCD938X_HPH_R_EN:
+- case WCD938X_HPH_R_TEST:
+- case WCD938X_HPH_R_ATEST:
+- case WCD938X_HPH_RDAC_CLK_CTL1:
+- case WCD938X_HPH_RDAC_CLK_CTL2:
+- case WCD938X_HPH_RDAC_LDO_CTL:
+- case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
+- case WCD938X_HPH_REFBUFF_UHQA_CTL:
+- case WCD938X_HPH_REFBUFF_LP_CTL:
+- case WCD938X_HPH_L_DAC_CTL:
+- case WCD938X_HPH_R_DAC_CTL:
+- case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
+- case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
+- case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
+- case WCD938X_EAR_EAR_EN_REG:
+- case WCD938X_EAR_EAR_PA_CON:
+- case WCD938X_EAR_EAR_SP_CON:
+- case WCD938X_EAR_EAR_DAC_CON:
+- case WCD938X_EAR_EAR_CNP_FSM_CON:
+- case WCD938X_EAR_TEST_CTL:
+- case WCD938X_ANA_NEW_PAGE_REGISTER:
+- case WCD938X_HPH_NEW_ANA_HPH2:
+- case WCD938X_HPH_NEW_ANA_HPH3:
+- case WCD938X_SLEEP_CTL:
+- case WCD938X_SLEEP_WATCHDOG_CTL:
+- case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
+- case WCD938X_MBHC_NEW_CTL_1:
+- case WCD938X_MBHC_NEW_CTL_2:
+- case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
+- case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
+- case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
+- case WCD938X_TX_NEW_AMIC_MUX_CFG:
+- case WCD938X_AUX_AUXPA:
+- case WCD938X_LDORXTX_MODE:
+- case WCD938X_LDORXTX_CONFIG:
+- case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
+- case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
+- case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
+- case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
+- case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
+- case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
+- case WCD938X_HPH_NEW_INT_PA_MISC1:
+- case WCD938X_HPH_NEW_INT_PA_MISC2:
+- case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
+- case WCD938X_HPH_NEW_INT_HPH_TIMER1:
+- case WCD938X_HPH_NEW_INT_HPH_TIMER2:
+- case WCD938X_HPH_NEW_INT_HPH_TIMER3:
+- case WCD938X_HPH_NEW_INT_HPH_TIMER4:
+- case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
+- case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
+- case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
+- case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
+- case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
+- case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
+- case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
+- case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
+- case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
+- case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
+- case WCD938X_MBHC_NEW_INT_SPARE_2:
+- case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
+- case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
+- case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
+- case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
+- case WCD938X_AUX_INT_EN_REG:
+- case WCD938X_AUX_INT_PA_CTRL:
+- case WCD938X_AUX_INT_SP_CTRL:
+- case WCD938X_AUX_INT_DAC_CTRL:
+- case WCD938X_AUX_INT_CLK_CTRL:
+- case WCD938X_AUX_INT_TEST_CTRL:
+- case WCD938X_AUX_INT_MISC:
+- case WCD938X_LDORXTX_INT_BIAS:
+- case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
+- case WCD938X_LDORXTX_INT_TEST0:
+- case WCD938X_LDORXTX_INT_STARTUP_TIMER:
+- case WCD938X_LDORXTX_INT_TEST1:
+- case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
+- case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
+- case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
+- case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
+- case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
+- case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
+- case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
+- case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
+- case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
+- case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
+- case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
+- case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
+- case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
+- case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
+- case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
+- case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
+- case WCD938X_DIGITAL_PAGE_REGISTER:
+- case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
+- case WCD938X_DIGITAL_CDC_RST_CTL:
+- case WCD938X_DIGITAL_TOP_CLK_CFG:
+- case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
+- case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
+- case WCD938X_DIGITAL_SWR_RST_EN:
+- case WCD938X_DIGITAL_CDC_PATH_MODE:
+- case WCD938X_DIGITAL_CDC_RX_RST:
+- case WCD938X_DIGITAL_CDC_RX0_CTL:
+- case WCD938X_DIGITAL_CDC_RX1_CTL:
+- case WCD938X_DIGITAL_CDC_RX2_CTL:
+- case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
+- case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
+- case WCD938X_DIGITAL_CDC_COMP_CTL_0:
+- case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
+- case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
+- case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
+- case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
+- case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
+- case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
+- case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
+- case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
+- case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
+- case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
+- case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
+- case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
+- case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
+- case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
+- case WCD938X_DIGITAL_CDC_SWR_CLH:
+- case WCD938X_DIGITAL_SWR_CLH_BYP:
+- case WCD938X_DIGITAL_CDC_TX0_CTL:
+- case WCD938X_DIGITAL_CDC_TX1_CTL:
+- case WCD938X_DIGITAL_CDC_TX2_CTL:
+- case WCD938X_DIGITAL_CDC_TX_RST:
+- case WCD938X_DIGITAL_CDC_REQ_CTL:
+- case WCD938X_DIGITAL_CDC_RST:
+- case WCD938X_DIGITAL_CDC_AMIC_CTL:
+- case WCD938X_DIGITAL_CDC_DMIC_CTL:
+- case WCD938X_DIGITAL_CDC_DMIC1_CTL:
+- case WCD938X_DIGITAL_CDC_DMIC2_CTL:
+- case WCD938X_DIGITAL_CDC_DMIC3_CTL:
+- case WCD938X_DIGITAL_CDC_DMIC4_CTL:
+- case WCD938X_DIGITAL_EFUSE_PRG_CTL:
+- case WCD938X_DIGITAL_EFUSE_CTL:
+- case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
+- case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
+- case WCD938X_DIGITAL_PDM_WD_CTL0:
+- case WCD938X_DIGITAL_PDM_WD_CTL1:
+- case WCD938X_DIGITAL_PDM_WD_CTL2:
+- case WCD938X_DIGITAL_INTR_MODE:
+- case WCD938X_DIGITAL_INTR_MASK_0:
+- case WCD938X_DIGITAL_INTR_MASK_1:
+- case WCD938X_DIGITAL_INTR_MASK_2:
+- case WCD938X_DIGITAL_INTR_CLEAR_0:
+- case WCD938X_DIGITAL_INTR_CLEAR_1:
+- case WCD938X_DIGITAL_INTR_CLEAR_2:
+- case WCD938X_DIGITAL_INTR_LEVEL_0:
+- case WCD938X_DIGITAL_INTR_LEVEL_1:
+- case WCD938X_DIGITAL_INTR_LEVEL_2:
+- case WCD938X_DIGITAL_INTR_SET_0:
+- case WCD938X_DIGITAL_INTR_SET_1:
+- case WCD938X_DIGITAL_INTR_SET_2:
+- case WCD938X_DIGITAL_INTR_TEST_0:
+- case WCD938X_DIGITAL_INTR_TEST_1:
+- case WCD938X_DIGITAL_INTR_TEST_2:
+- case WCD938X_DIGITAL_TX_MODE_DBG_EN:
+- case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
+- case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
+- case WCD938X_DIGITAL_LB_IN_SEL_CTL:
+- case WCD938X_DIGITAL_LOOP_BACK_MODE:
+- case WCD938X_DIGITAL_SWR_DAC_TEST:
+- case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
+- case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
+- case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
+- case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
+- case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
+- case WCD938X_DIGITAL_PAD_CTL_SWR_0:
+- case WCD938X_DIGITAL_PAD_CTL_SWR_1:
+- case WCD938X_DIGITAL_I2C_CTL:
+- case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
+- case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
+- case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
+- case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
+- case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
+- case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
+- case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
+- case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
+- case WCD938X_DIGITAL_PAD_INP_DIS_0:
+- case WCD938X_DIGITAL_PAD_INP_DIS_1:
+- case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
+- case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
+- case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
+- case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
+- case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
+- case WCD938X_DIGITAL_GPIO_MODE:
+- case WCD938X_DIGITAL_PIN_CTL_OE:
+- case WCD938X_DIGITAL_PIN_CTL_DATA_0:
+- case WCD938X_DIGITAL_PIN_CTL_DATA_1:
+- case WCD938X_DIGITAL_DIG_DEBUG_CTL:
+- case WCD938X_DIGITAL_DIG_DEBUG_EN:
+- case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
+- case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
+- case WCD938X_DIGITAL_SSP_DBG:
+- case WCD938X_DIGITAL_SPARE_0:
+- case WCD938X_DIGITAL_SPARE_1:
+- case WCD938X_DIGITAL_SPARE_2:
+- case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
+- case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
+- case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
+- case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
+- case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
+- case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
+- case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
+- case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
+- case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
+-{
+- switch (reg) {
+- case WCD938X_ANA_MBHC_RESULT_1:
+- case WCD938X_ANA_MBHC_RESULT_2:
+- case WCD938X_ANA_MBHC_RESULT_3:
+- case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
+- case WCD938X_TX_1_2_SAR2_ERR:
+- case WCD938X_TX_1_2_SAR1_ERR:
+- case WCD938X_TX_3_4_SAR4_ERR:
+- case WCD938X_TX_3_4_SAR3_ERR:
+- case WCD938X_HPH_L_STATUS:
+- case WCD938X_HPH_R_STATUS:
+- case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
+- case WCD938X_EAR_STATUS_REG_1:
+- case WCD938X_EAR_STATUS_REG_2:
+- case WCD938X_MBHC_NEW_FSM_STATUS:
+- case WCD938X_MBHC_NEW_ADC_RESULT:
+- case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
+- case WCD938X_AUX_INT_STATUS_REG:
+- case WCD938X_LDORXTX_INT_STATUS:
+- case WCD938X_DIGITAL_CHIP_ID0:
+- case WCD938X_DIGITAL_CHIP_ID1:
+- case WCD938X_DIGITAL_CHIP_ID2:
+- case WCD938X_DIGITAL_CHIP_ID3:
+- case WCD938X_DIGITAL_INTR_STATUS_0:
+- case WCD938X_DIGITAL_INTR_STATUS_1:
+- case WCD938X_DIGITAL_INTR_STATUS_2:
+- case WCD938X_DIGITAL_INTR_CLEAR_0:
+- case WCD938X_DIGITAL_INTR_CLEAR_1:
+- case WCD938X_DIGITAL_INTR_CLEAR_2:
+- case WCD938X_DIGITAL_SWR_HM_TEST_0:
+- case WCD938X_DIGITAL_SWR_HM_TEST_1:
+- case WCD938X_DIGITAL_EFUSE_T_DATA_0:
+- case WCD938X_DIGITAL_EFUSE_T_DATA_1:
+- case WCD938X_DIGITAL_PIN_STATUS_0:
+- case WCD938X_DIGITAL_PIN_STATUS_1:
+- case WCD938X_DIGITAL_MODE_STATUS_0:
+- case WCD938X_DIGITAL_MODE_STATUS_1:
+- case WCD938X_DIGITAL_EFUSE_REG_0:
+- case WCD938X_DIGITAL_EFUSE_REG_1:
+- case WCD938X_DIGITAL_EFUSE_REG_2:
+- case WCD938X_DIGITAL_EFUSE_REG_3:
+- case WCD938X_DIGITAL_EFUSE_REG_4:
+- case WCD938X_DIGITAL_EFUSE_REG_5:
+- case WCD938X_DIGITAL_EFUSE_REG_6:
+- case WCD938X_DIGITAL_EFUSE_REG_7:
+- case WCD938X_DIGITAL_EFUSE_REG_8:
+- case WCD938X_DIGITAL_EFUSE_REG_9:
+- case WCD938X_DIGITAL_EFUSE_REG_10:
+- case WCD938X_DIGITAL_EFUSE_REG_11:
+- case WCD938X_DIGITAL_EFUSE_REG_12:
+- case WCD938X_DIGITAL_EFUSE_REG_13:
+- case WCD938X_DIGITAL_EFUSE_REG_14:
+- case WCD938X_DIGITAL_EFUSE_REG_15:
+- case WCD938X_DIGITAL_EFUSE_REG_16:
+- case WCD938X_DIGITAL_EFUSE_REG_17:
+- case WCD938X_DIGITAL_EFUSE_REG_18:
+- case WCD938X_DIGITAL_EFUSE_REG_19:
+- case WCD938X_DIGITAL_EFUSE_REG_20:
+- case WCD938X_DIGITAL_EFUSE_REG_21:
+- case WCD938X_DIGITAL_EFUSE_REG_22:
+- case WCD938X_DIGITAL_EFUSE_REG_23:
+- case WCD938X_DIGITAL_EFUSE_REG_24:
+- case WCD938X_DIGITAL_EFUSE_REG_25:
+- case WCD938X_DIGITAL_EFUSE_REG_26:
+- case WCD938X_DIGITAL_EFUSE_REG_27:
+- case WCD938X_DIGITAL_EFUSE_REG_28:
+- case WCD938X_DIGITAL_EFUSE_REG_29:
+- case WCD938X_DIGITAL_EFUSE_REG_30:
+- case WCD938X_DIGITAL_EFUSE_REG_31:
+- return true;
+- }
+- return false;
+-}
+-
+-static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
+-{
+- bool ret;
+-
+- ret = wcd938x_readonly_register(dev, reg);
+- if (!ret)
+- return wcd938x_rdwr_register(dev, reg);
+-
+- return ret;
+-}
+-
+-static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
+-{
+- return wcd938x_rdwr_register(dev, reg);
+-}
+-
+-static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
+-{
+- if (reg <= WCD938X_BASE_ADDRESS)
+- return false;
+-
+- if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
+- return true;
+-
+- if (wcd938x_readonly_register(dev, reg))
+- return true;
+-
+- return false;
+-}
+-
+-static struct regmap_config wcd938x_regmap_config = {
+- .name = "wcd938x_csr",
+- .reg_bits = 32,
+- .val_bits = 8,
+- .cache_type = REGCACHE_RBTREE,
+- .reg_defaults = wcd938x_defaults,
+- .num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
+- .max_register = WCD938X_MAX_REGISTER,
+- .readable_reg = wcd938x_readable_register,
+- .writeable_reg = wcd938x_writeable_register,
+- .volatile_reg = wcd938x_volatile_register,
+- .can_multi_write = true,
+-};
+-
+ static const struct regmap_irq wcd938x_irqs[WCD938X_NUM_IRQS] = {
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_PRESS_DET, 0, 0x01),
+ REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_RELEASE_DET, 0, 0x02),
+@@ -4412,10 +3417,10 @@ static int wcd938x_bind(struct device *dev)
+ return -EINVAL;
+ }
+
+- wcd938x->regmap = devm_regmap_init_sdw(wcd938x->tx_sdw_dev, &wcd938x_regmap_config);
+- if (IS_ERR(wcd938x->regmap)) {
+- dev_err(dev, "%s: tx csr regmap not found\n", __func__);
+- return PTR_ERR(wcd938x->regmap);
++ wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
++ if (!wcd938x->regmap) {
++ dev_err(dev, "could not get TX device regmap\n");
++ return -EINVAL;
+ }
+
+ ret = wcd938x_irq_init(wcd938x, dev);
+diff --git a/sound/soc/codecs/wcd938x.h b/sound/soc/codecs/wcd938x.h
+index ea82039e78435..74b1498fec38b 100644
+--- a/sound/soc/codecs/wcd938x.h
++++ b/sound/soc/codecs/wcd938x.h
+@@ -663,6 +663,7 @@ struct wcd938x_sdw_priv {
+ bool is_tx;
+ struct wcd938x_priv *wcd938x;
+ struct irq_domain *slave_irq;
++ struct regmap *regmap;
+ };
+
+ #if IS_ENABLED(CONFIG_SND_SOC_WCD938X_SDW)
+--
+2.39.2
+
--- /dev/null
+From 1e40b8026c11eb25d99ef837676f2a3c437c67f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Nov 2022 17:27:27 +0800
+Subject: ASoC: rt1318: Add RT1318 SDCA vendor-specific driver
+
+From: Shuming Fan <shumingf@realtek.com>
+
+[ Upstream commit 6ad73a2b42ea6d43fc5bf32033e8f6b21df3109e ]
+
+This is the initial amplifier driver for rt1318 SDCA version.
+
+Signed-off-by: Shuming Fan <shumingf@realtek.com>
+Link: https://lore.kernel.org/r/20221108092727.13011-1-shumingf@realtek.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 84822215acd1 ("ASoC: codecs: wcd938x: fix accessing regmap on unattached devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/Kconfig | 6 +
+ sound/soc/codecs/Makefile | 2 +
+ sound/soc/codecs/rt1318-sdw.c | 884 ++++++++++++++++++++++++++++++++++
+ sound/soc/codecs/rt1318-sdw.h | 101 ++++
+ 4 files changed, 993 insertions(+)
+ create mode 100644 sound/soc/codecs/rt1318-sdw.c
+ create mode 100644 sound/soc/codecs/rt1318-sdw.h
+
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 3f16ad1c37585..965ae55fa1607 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -199,6 +199,7 @@ config SND_SOC_ALL_CODECS
+ imply SND_SOC_RT715_SDCA_SDW
+ imply SND_SOC_RT1308_SDW
+ imply SND_SOC_RT1316_SDW
++ imply SND_SOC_RT1318_SDW
+ imply SND_SOC_RT9120
+ imply SND_SOC_SDW_MOCKUP
+ imply SND_SOC_SGTL5000
+@@ -1311,6 +1312,11 @@ config SND_SOC_RT1316_SDW
+ depends on SOUNDWIRE
+ select REGMAP_SOUNDWIRE
+
++config SND_SOC_RT1318_SDW
++ tristate "Realtek RT1318 Codec - SDW"
++ depends on SOUNDWIRE
++ select REGMAP_SOUNDWIRE
++
+ config SND_SOC_RT5514
+ tristate
+ depends on I2C
+diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
+index 9170ee1447dda..71d3ce5867e4f 100644
+--- a/sound/soc/codecs/Makefile
++++ b/sound/soc/codecs/Makefile
+@@ -196,6 +196,7 @@ snd-soc-rt1305-objs := rt1305.o
+ snd-soc-rt1308-objs := rt1308.o
+ snd-soc-rt1308-sdw-objs := rt1308-sdw.o
+ snd-soc-rt1316-sdw-objs := rt1316-sdw.o
++snd-soc-rt1318-sdw-objs := rt1318-sdw.o
+ snd-soc-rt274-objs := rt274.o
+ snd-soc-rt286-objs := rt286.o
+ snd-soc-rt298-objs := rt298.o
+@@ -551,6 +552,7 @@ obj-$(CONFIG_SND_SOC_RT1305) += snd-soc-rt1305.o
+ obj-$(CONFIG_SND_SOC_RT1308) += snd-soc-rt1308.o
+ obj-$(CONFIG_SND_SOC_RT1308_SDW) += snd-soc-rt1308-sdw.o
+ obj-$(CONFIG_SND_SOC_RT1316_SDW) += snd-soc-rt1316-sdw.o
++obj-$(CONFIG_SND_SOC_RT1318_SDW) += snd-soc-rt1318-sdw.o
+ obj-$(CONFIG_SND_SOC_RT274) += snd-soc-rt274.o
+ obj-$(CONFIG_SND_SOC_RT286) += snd-soc-rt286.o
+ obj-$(CONFIG_SND_SOC_RT298) += snd-soc-rt298.o
+diff --git a/sound/soc/codecs/rt1318-sdw.c b/sound/soc/codecs/rt1318-sdw.c
+new file mode 100644
+index 0000000000000..f85f5ab2c6d04
+--- /dev/null
++++ b/sound/soc/codecs/rt1318-sdw.c
+@@ -0,0 +1,884 @@
++// SPDX-License-Identifier: GPL-2.0-only
++//
++// rt1318-sdw.c -- rt1318 SDCA ALSA SoC amplifier audio driver
++//
++// Copyright(c) 2022 Realtek Semiconductor Corp.
++//
++//
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/pm_runtime.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <linux/regmap.h>
++#include <linux/dmi.h>
++#include <linux/firmware.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc-dapm.h>
++#include <sound/initval.h>
++#include "rt1318-sdw.h"
++
++static const struct reg_sequence rt1318_blind_write[] = {
++ { 0xc001, 0x43 },
++ { 0xc003, 0xa2 },
++ { 0xc004, 0x44 },
++ { 0xc005, 0x44 },
++ { 0xc006, 0x33 },
++ { 0xc007, 0x64 },
++ { 0xc320, 0x20 },
++ { 0xf203, 0x18 },
++ { 0xf211, 0x00 },
++ { 0xf212, 0x26 },
++ { 0xf20d, 0x17 },
++ { 0xf214, 0x06 },
++ { 0xf20e, 0x00 },
++ { 0xf223, 0x7f },
++ { 0xf224, 0xdb },
++ { 0xf225, 0xee },
++ { 0xf226, 0x3f },
++ { 0xf227, 0x0f },
++ { 0xf21a, 0x78 },
++ { 0xf242, 0x3c },
++ { 0xc321, 0x0b },
++ { 0xc200, 0xd8 },
++ { 0xc201, 0x27 },
++ { 0xc202, 0x0f },
++ { 0xf800, 0x20 },
++ { 0xdf00, 0x10 },
++ { 0xdf5f, 0x01 },
++ { 0xdf60, 0xa7 },
++ { 0xc400, 0x0e },
++ { 0xc401, 0x43 },
++ { 0xc402, 0xe0 },
++ { 0xc403, 0x00 },
++ { 0xc404, 0x4c },
++ { 0xc407, 0x02 },
++ { 0xc408, 0x3f },
++ { 0xc300, 0x01 },
++ { 0xc206, 0x78 },
++ { 0xc203, 0x84 },
++ { 0xc120, 0xc0 },
++ { 0xc121, 0x03 },
++ { 0xe000, 0x88 },
++ { 0xc321, 0x09 },
++ { 0xc322, 0x01 },
++ { 0xe706, 0x0f },
++ { 0xe707, 0x30 },
++ { 0xe806, 0x0f },
++ { 0xe807, 0x30 },
++ { 0xed00, 0xb0 },
++ { 0xce04, 0x02 },
++ { 0xce05, 0x63 },
++ { 0xce06, 0x68 },
++ { 0xce07, 0x07 },
++ { 0xcf04, 0x02 },
++ { 0xcf05, 0x63 },
++ { 0xcf06, 0x68 },
++ { 0xcf07, 0x07 },
++ { 0xce60, 0xe3 },
++ { 0xc130, 0x51 },
++ { 0xf102, 0x00 },
++ { 0xf103, 0x00 },
++ { 0xf104, 0xf5 },
++ { 0xf105, 0x06 },
++ { 0xf109, 0x9b },
++ { 0xf10a, 0x0b },
++ { 0xf10b, 0x4c },
++ { 0xf10b, 0x5c },
++ { 0xf102, 0x00 },
++ { 0xf103, 0x00 },
++ { 0xf104, 0xf5 },
++ { 0xf105, 0x0b },
++ { 0xf109, 0x03 },
++ { 0xf10a, 0x0b },
++ { 0xf10b, 0x4c },
++ { 0xf10b, 0x5c },
++ { 0xf102, 0x00 },
++ { 0xf103, 0x00 },
++ { 0xf104, 0xf5 },
++ { 0xf105, 0x0c },
++ { 0xf109, 0x7f },
++ { 0xf10a, 0x0b },
++ { 0xf10b, 0x4c },
++ { 0xf10b, 0x5c },
++
++ { 0xe604, 0x00 },
++ { 0xdb00, 0x0c },
++ { 0xdd00, 0x0c },
++ { 0xdc19, 0x00 },
++ { 0xdc1a, 0xff },
++ { 0xdc1b, 0xff },
++ { 0xdc1c, 0xff },
++ { 0xdc1d, 0x00 },
++ { 0xdc1e, 0x00 },
++ { 0xdc1f, 0x00 },
++ { 0xdc20, 0xff },
++ { 0xde19, 0x00 },
++ { 0xde1a, 0xff },
++ { 0xde1b, 0xff },
++ { 0xde1c, 0xff },
++ { 0xde1d, 0x00 },
++ { 0xde1e, 0x00 },
++ { 0xde1f, 0x00 },
++ { 0xde20, 0xff },
++ { 0xdb32, 0x00 },
++ { 0xdd32, 0x00 },
++ { 0xdb33, 0x0a },
++ { 0xdd33, 0x0a },
++ { 0xdb34, 0x1a },
++ { 0xdd34, 0x1a },
++ { 0xdb17, 0xef },
++ { 0xdd17, 0xef },
++ { 0xdba7, 0x00 },
++ { 0xdba8, 0x64 },
++ { 0xdda7, 0x00 },
++ { 0xdda8, 0x64 },
++ { 0xdb19, 0x40 },
++ { 0xdd19, 0x40 },
++ { 0xdb00, 0x4c },
++ { 0xdb01, 0x79 },
++ { 0xdd01, 0x79 },
++ { 0xdb04, 0x05 },
++ { 0xdb05, 0x03 },
++ { 0xdd04, 0x05 },
++ { 0xdd05, 0x03 },
++ { 0xdbbb, 0x09 },
++ { 0xdbbc, 0x30 },
++ { 0xdbbd, 0xf0 },
++ { 0xdbbe, 0xf1 },
++ { 0xddbb, 0x09 },
++ { 0xddbc, 0x30 },
++ { 0xddbd, 0xf0 },
++ { 0xddbe, 0xf1 },
++ { 0xdb01, 0x79 },
++ { 0xdd01, 0x79 },
++ { 0xdc52, 0xef },
++ { 0xde52, 0xef },
++ { 0x2f55, 0x22 },
++};
++
++static const struct reg_default rt1318_reg_defaults[] = {
++ { 0x3000, 0x00 },
++ { 0x3004, 0x01 },
++ { 0x3005, 0x23 },
++ { 0x3202, 0x00 },
++ { 0x3203, 0x01 },
++ { 0x3206, 0x00 },
++ { 0xc000, 0x00 },
++ { 0xc001, 0x43 },
++ { 0xc003, 0x22 },
++ { 0xc004, 0x44 },
++ { 0xc005, 0x44 },
++ { 0xc006, 0x33 },
++ { 0xc007, 0x64 },
++ { 0xc008, 0x05 },
++ { 0xc00a, 0xfc },
++ { 0xc00b, 0x0f },
++ { 0xc00c, 0x0e },
++ { 0xc00d, 0xef },
++ { 0xc00e, 0xe5 },
++ { 0xc00f, 0xff },
++ { 0xc120, 0xc0 },
++ { 0xc121, 0x00 },
++ { 0xc122, 0x00 },
++ { 0xc123, 0x14 },
++ { 0xc125, 0x00 },
++ { 0xc200, 0x00 },
++ { 0xc201, 0x00 },
++ { 0xc202, 0x00 },
++ { 0xc203, 0x04 },
++ { 0xc204, 0x00 },
++ { 0xc205, 0x00 },
++ { 0xc206, 0x68 },
++ { 0xc207, 0x70 },
++ { 0xc208, 0x00 },
++ { 0xc20a, 0x00 },
++ { 0xc20b, 0x01 },
++ { 0xc20c, 0x7f },
++ { 0xc20d, 0x01 },
++ { 0xc20e, 0x7f },
++ { 0xc300, 0x00 },
++ { 0xc301, 0x00 },
++ { 0xc303, 0x80 },
++ { 0xc320, 0x00 },
++ { 0xc321, 0x09 },
++ { 0xc322, 0x02 },
++ { 0xc410, 0x04 },
++ { 0xc430, 0x00 },
++ { 0xc431, 0x00 },
++ { 0xca00, 0x10 },
++ { 0xca01, 0x00 },
++ { 0xca02, 0x0b },
++ { 0xca10, 0x10 },
++ { 0xca11, 0x00 },
++ { 0xca12, 0x0b },
++ { 0xdd93, 0x00 },
++ { 0xdd94, 0x64 },
++ { 0xe300, 0xa0 },
++ { 0xed00, 0x80 },
++ { 0xed01, 0x0f },
++ { 0xed02, 0xff },
++ { 0xed03, 0x00 },
++ { 0xed04, 0x00 },
++ { 0xed05, 0x0f },
++ { 0xed06, 0xff },
++ { 0xf010, 0x10 },
++ { 0xf011, 0xec },
++ { 0xf012, 0x68 },
++ { 0xf013, 0x21 },
++ { 0xf800, 0x00 },
++ { 0xf801, 0x12 },
++ { 0xf802, 0xe0 },
++ { 0xf803, 0x2f },
++ { 0xf804, 0x00 },
++ { 0xf805, 0x00 },
++ { 0xf806, 0x07 },
++ { 0xf807, 0xff },
++ { SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_UDMPU21, RT1318_SDCA_CTL_UDMPU_CLUSTER, 0), 0x00 },
++ { SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_L), 0x01 },
++ { SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_R), 0x01 },
++ { SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23, RT1318_SDCA_CTL_REQ_POWER_STATE, 0), 0x03 },
++ { SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_CS21, RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX, 0), 0x09 },
++};
++
++static bool rt1318_readable_register(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case 0x2f55:
++ case 0x3000:
++ case 0x3004 ... 0x3005:
++ case 0x3202 ... 0x3203:
++ case 0x3206:
++ case 0xc000 ... 0xc00f:
++ case 0xc120 ... 0xc125:
++ case 0xc200 ... 0xc20e:
++ case 0xc300 ... 0xc303:
++ case 0xc320 ... 0xc322:
++ case 0xc410:
++ case 0xc430 ... 0xc431:
++ case 0xca00 ... 0xca02:
++ case 0xca10 ... 0xca12:
++ case 0xcb00 ... 0xcb0b:
++ case 0xcc00 ... 0xcce5:
++ case 0xcd00 ... 0xcde5:
++ case 0xce00 ... 0xce6a:
++ case 0xcf00 ... 0xcf53:
++ case 0xd000 ... 0xd0cc:
++ case 0xd100 ... 0xd1b9:
++ case 0xdb00 ... 0xdc53:
++ case 0xdd00 ... 0xde53:
++ case 0xdf00 ... 0xdf6b:
++ case 0xe300:
++ case 0xeb00 ... 0xebcc:
++ case 0xec00 ... 0xecb9:
++ case 0xed00 ... 0xed06:
++ case 0xf010 ... 0xf014:
++ case 0xf800 ... 0xf807:
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_UDMPU21, RT1318_SDCA_CTL_UDMPU_CLUSTER, 0):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_L):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_R):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23, RT1318_SDCA_CTL_REQ_POWER_STATE, 0):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_CS21, RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX, 0):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_MODE, 0):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_STATUS, 0):
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool rt1318_volatile_register(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case 0x2f55:
++ case 0x3000 ... 0x3001:
++ case 0xc000:
++ case 0xc301:
++ case 0xc410:
++ case 0xc430 ... 0xc431:
++ case 0xdb06:
++ case 0xdb12:
++ case 0xdb1d ... 0xdb1f:
++ case 0xdb35:
++ case 0xdb37:
++ case 0xdb8a ... 0xdb92:
++ case 0xdbc5 ... 0xdbc8:
++ case 0xdc2b ... 0xdc49:
++ case 0xdd0b:
++ case 0xdd12:
++ case 0xdd1d ... 0xdd1f:
++ case 0xdd35:
++ case 0xdd8a ... 0xdd92:
++ case 0xddc5 ... 0xddc8:
++ case 0xde2b ... 0xde44:
++ case 0xdf4a ... 0xdf55:
++ case 0xe224 ... 0xe23b:
++ case 0xea01:
++ case 0xebc5:
++ case 0xebc8:
++ case 0xebcb ... 0xebcc:
++ case 0xed03 ... 0xed06:
++ case 0xf010 ... 0xf014:
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_MODE, 0):
++ case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_STATUS, 0):
++ return true;
++ default:
++ return false;
++ }
++}
++
++static const struct regmap_config rt1318_sdw_regmap = {
++ .reg_bits = 32,
++ .val_bits = 8,
++ .readable_reg = rt1318_readable_register,
++ .volatile_reg = rt1318_volatile_register,
++ .max_register = 0x41081488,
++ .reg_defaults = rt1318_reg_defaults,
++ .num_reg_defaults = ARRAY_SIZE(rt1318_reg_defaults),
++ .cache_type = REGCACHE_RBTREE,
++ .use_single_read = true,
++ .use_single_write = true,
++};
++
++static int rt1318_read_prop(struct sdw_slave *slave)
++{
++ struct sdw_slave_prop *prop = &slave->prop;
++ int nval;
++ int i, j;
++ u32 bit;
++ unsigned long addr;
++ struct sdw_dpn_prop *dpn;
++
++ prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
++ prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
++ prop->is_sdca = true;
++
++ prop->paging_support = true;
++
++ /* first we need to allocate memory for set bits in port lists */
++ prop->source_ports = BIT(2);
++ prop->sink_ports = BIT(1);
++
++ nval = hweight32(prop->source_ports);
++ prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
++ sizeof(*prop->src_dpn_prop), GFP_KERNEL);
++ if (!prop->src_dpn_prop)
++ return -ENOMEM;
++
++ i = 0;
++ dpn = prop->src_dpn_prop;
++ addr = prop->source_ports;
++ for_each_set_bit(bit, &addr, 32) {
++ dpn[i].num = bit;
++ dpn[i].type = SDW_DPN_FULL;
++ dpn[i].simple_ch_prep_sm = true;
++ dpn[i].ch_prep_timeout = 10;
++ i++;
++ }
++
++ /* do this again for sink now */
++ nval = hweight32(prop->sink_ports);
++ prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
++ sizeof(*prop->sink_dpn_prop), GFP_KERNEL);
++ if (!prop->sink_dpn_prop)
++ return -ENOMEM;
++
++ j = 0;
++ dpn = prop->sink_dpn_prop;
++ addr = prop->sink_ports;
++ for_each_set_bit(bit, &addr, 32) {
++ dpn[j].num = bit;
++ dpn[j].type = SDW_DPN_FULL;
++ dpn[j].simple_ch_prep_sm = true;
++ dpn[j].ch_prep_timeout = 10;
++ j++;
++ }
++
++ /* set the timeout values */
++ prop->clk_stop_timeout = 20;
++
++ return 0;
++}
++
++static int rt1318_io_init(struct device *dev, struct sdw_slave *slave)
++{
++ struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(dev);
++
++ if (rt1318->hw_init)
++ return 0;
++
++ if (rt1318->first_hw_init) {
++ regcache_cache_only(rt1318->regmap, false);
++ regcache_cache_bypass(rt1318->regmap, true);
++ } else {
++ /*
++ * PM runtime is only enabled when a Slave reports as Attached
++ */
++
++ /* set autosuspend parameters */
++ pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
++ pm_runtime_use_autosuspend(&slave->dev);
++
++ /* update count of parent 'active' children */
++ pm_runtime_set_active(&slave->dev);
++
++ /* make sure the device does not suspend immediately */
++ pm_runtime_mark_last_busy(&slave->dev);
++
++ pm_runtime_enable(&slave->dev);
++ }
++
++ pm_runtime_get_noresume(&slave->dev);
++
++ /* blind write */
++ regmap_multi_reg_write(rt1318->regmap, rt1318_blind_write,
++ ARRAY_SIZE(rt1318_blind_write));
++
++ if (rt1318->first_hw_init) {
++ regcache_cache_bypass(rt1318->regmap, false);
++ regcache_mark_dirty(rt1318->regmap);
++ }
++
++ /* Mark Slave initialization complete */
++ rt1318->first_hw_init = true;
++ rt1318->hw_init = true;
++
++ pm_runtime_mark_last_busy(&slave->dev);
++ pm_runtime_put_autosuspend(&slave->dev);
++
++ dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
++ return 0;
++}
++
++static int rt1318_update_status(struct sdw_slave *slave,
++ enum sdw_slave_status status)
++{
++ struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(&slave->dev);
++
++ /* Update the status */
++ rt1318->status = status;
++
++ if (status == SDW_SLAVE_UNATTACHED)
++ rt1318->hw_init = false;
++
++ /*
++ * Perform initialization only if slave status is present and
++ * hw_init flag is false
++ */
++ if (rt1318->hw_init || rt1318->status != SDW_SLAVE_ATTACHED)
++ return 0;
++
++ /* perform I/O transfers required for Slave initialization */
++ return rt1318_io_init(&slave->dev, slave);
++}
++
++static int rt1318_classd_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_component *component =
++ snd_soc_dapm_to_component(w->dapm);
++ struct rt1318_sdw_priv *rt1318 = snd_soc_component_get_drvdata(component);
++ unsigned char ps0 = 0x0, ps3 = 0x3;
++
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMU:
++ regmap_write(rt1318->regmap,
++ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23,
++ RT1318_SDCA_CTL_REQ_POWER_STATE, 0),
++ ps0);
++ break;
++ case SND_SOC_DAPM_PRE_PMD:
++ regmap_write(rt1318->regmap,
++ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23,
++ RT1318_SDCA_CTL_REQ_POWER_STATE, 0),
++ ps3);
++ break;
++
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++static const char * const rt1318_rx_data_ch_select[] = {
++ "L,R",
++ "L,L",
++ "L,R",
++ "L,L+R",
++ "R,L",
++ "R,R",
++ "R,L+R",
++ "L+R,L",
++ "L+R,R",
++ "L+R,L+R",
++};
++
++static SOC_ENUM_SINGLE_DECL(rt1318_rx_data_ch_enum,
++ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_UDMPU21, RT1318_SDCA_CTL_UDMPU_CLUSTER, 0), 0,
++ rt1318_rx_data_ch_select);
++
++static const struct snd_kcontrol_new rt1318_snd_controls[] = {
++
++ /* UDMPU Cluster Selection */
++ SOC_ENUM("RX Channel Select", rt1318_rx_data_ch_enum),
++};
++
++static const struct snd_kcontrol_new rt1318_sto_dac =
++ SOC_DAPM_DOUBLE_R("Switch",
++ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_L),
++ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_R),
++ 0, 1, 1);
++
++static const struct snd_soc_dapm_widget rt1318_dapm_widgets[] = {
++ /* Audio Interface */
++ SND_SOC_DAPM_AIF_IN("DP1RX", "DP1 Playback", 0, SND_SOC_NOPM, 0, 0),
++ SND_SOC_DAPM_AIF_OUT("DP2TX", "DP2 Capture", 0, SND_SOC_NOPM, 0, 0),
++
++ /* Digital Interface */
++ SND_SOC_DAPM_SWITCH("DAC", SND_SOC_NOPM, 0, 0, &rt1318_sto_dac),
++
++ /* Output */
++ SND_SOC_DAPM_PGA_E("CLASS D", SND_SOC_NOPM, 0, 0, NULL, 0,
++ rt1318_classd_event, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
++ SND_SOC_DAPM_OUTPUT("SPOL"),
++ SND_SOC_DAPM_OUTPUT("SPOR"),
++ /* Input */
++ SND_SOC_DAPM_PGA("FB Data", SND_SOC_NOPM, 0, 0, NULL, 0),
++ SND_SOC_DAPM_SIGGEN("FB Gen"),
++};
++
++static const struct snd_soc_dapm_route rt1318_dapm_routes[] = {
++ { "DAC", "Switch", "DP1RX" },
++ { "CLASS D", NULL, "DAC" },
++ { "SPOL", NULL, "CLASS D" },
++ { "SPOR", NULL, "CLASS D" },
++
++ { "FB Data", NULL, "FB Gen" },
++ { "DP2TX", NULL, "FB Data" },
++};
++
++static int rt1318_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
++ int direction)
++{
++ struct sdw_stream_data *stream;
++
++ if (!sdw_stream)
++ return 0;
++
++ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
++ if (!stream)
++ return -ENOMEM;
++
++ stream->sdw_stream = sdw_stream;
++
++ /* Use tx_mask or rx_mask to configure stream tag and set dma_data */
++ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
++ dai->playback_dma_data = stream;
++ else
++ dai->capture_dma_data = stream;
++
++ return 0;
++}
++
++static void rt1318_sdw_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct sdw_stream_data *stream;
++
++ stream = snd_soc_dai_get_dma_data(dai, substream);
++ snd_soc_dai_set_dma_data(dai, substream, NULL);
++ kfree(stream);
++}
++
++static int rt1318_sdw_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
++{
++ struct snd_soc_component *component = dai->component;
++ struct rt1318_sdw_priv *rt1318 =
++ snd_soc_component_get_drvdata(component);
++ struct sdw_stream_config stream_config;
++ struct sdw_port_config port_config;
++ enum sdw_data_direction direction;
++ struct sdw_stream_data *stream;
++ int retval, port, num_channels, ch_mask;
++ unsigned int sampling_rate;
++
++ dev_dbg(dai->dev, "%s %s", __func__, dai->name);
++ stream = snd_soc_dai_get_dma_data(dai, substream);
++
++ if (!stream)
++ return -EINVAL;
++
++ if (!rt1318->sdw_slave)
++ return -EINVAL;
++
++ /* SoundWire specific configuration */
++ /* port 1 for playback */
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ direction = SDW_DATA_DIR_RX;
++ port = 1;
++ } else {
++ direction = SDW_DATA_DIR_TX;
++ port = 2;
++ }
++
++ num_channels = params_channels(params);
++ ch_mask = (1 << num_channels) - 1;
++
++ stream_config.frame_rate = params_rate(params);
++ stream_config.ch_count = num_channels;
++ stream_config.bps = snd_pcm_format_width(params_format(params));
++ stream_config.direction = direction;
++
++ port_config.ch_mask = ch_mask;
++ port_config.num = port;
++
++ retval = sdw_stream_add_slave(rt1318->sdw_slave, &stream_config,
++ &port_config, 1, stream->sdw_stream);
++ if (retval) {
++ dev_err(dai->dev, "Unable to configure port\n");
++ return retval;
++ }
++
++ /* sampling rate configuration */
++ switch (params_rate(params)) {
++ case 16000:
++ sampling_rate = RT1318_SDCA_RATE_16000HZ;
++ break;
++ case 32000:
++ sampling_rate = RT1318_SDCA_RATE_32000HZ;
++ break;
++ case 44100:
++ sampling_rate = RT1318_SDCA_RATE_44100HZ;
++ break;
++ case 48000:
++ sampling_rate = RT1318_SDCA_RATE_48000HZ;
++ break;
++ case 96000:
++ sampling_rate = RT1318_SDCA_RATE_96000HZ;
++ break;
++ case 192000:
++ sampling_rate = RT1318_SDCA_RATE_192000HZ;
++ break;
++ default:
++ dev_err(component->dev, "Rate %d is not supported\n",
++ params_rate(params));
++ return -EINVAL;
++ }
++
++ /* set sampling frequency */
++ regmap_write(rt1318->regmap,
++ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_CS21, RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX, 0),
++ sampling_rate);
++
++ return 0;
++}
++
++static int rt1318_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_component *component = dai->component;
++ struct rt1318_sdw_priv *rt1318 =
++ snd_soc_component_get_drvdata(component);
++ struct sdw_stream_data *stream =
++ snd_soc_dai_get_dma_data(dai, substream);
++
++ if (!rt1318->sdw_slave)
++ return -EINVAL;
++
++ sdw_stream_remove_slave(rt1318->sdw_slave, stream->sdw_stream);
++ return 0;
++}
++
++/*
++ * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
++ * port_prep are not defined for now
++ */
++static struct sdw_slave_ops rt1318_slave_ops = {
++ .read_prop = rt1318_read_prop,
++ .update_status = rt1318_update_status,
++};
++
++static int rt1318_sdw_component_probe(struct snd_soc_component *component)
++{
++ int ret;
++ struct rt1318_sdw_priv *rt1318 = snd_soc_component_get_drvdata(component);
++
++ rt1318->component = component;
++
++ ret = pm_runtime_resume(component->dev);
++ dev_dbg(&rt1318->sdw_slave->dev, "%s pm_runtime_resume, ret=%d", __func__, ret);
++ if (ret < 0 && ret != -EACCES)
++ return ret;
++
++ return 0;
++}
++
++static const struct snd_soc_component_driver soc_component_sdw_rt1318 = {
++ .probe = rt1318_sdw_component_probe,
++ .controls = rt1318_snd_controls,
++ .num_controls = ARRAY_SIZE(rt1318_snd_controls),
++ .dapm_widgets = rt1318_dapm_widgets,
++ .num_dapm_widgets = ARRAY_SIZE(rt1318_dapm_widgets),
++ .dapm_routes = rt1318_dapm_routes,
++ .num_dapm_routes = ARRAY_SIZE(rt1318_dapm_routes),
++ .endianness = 1,
++};
++
++static const struct snd_soc_dai_ops rt1318_aif_dai_ops = {
++ .hw_params = rt1318_sdw_hw_params,
++ .hw_free = rt1318_sdw_pcm_hw_free,
++ .set_stream = rt1318_set_sdw_stream,
++ .shutdown = rt1318_sdw_shutdown,
++};
++
++#define RT1318_STEREO_RATES (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
++ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
++#define RT1318_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
++ SNDRV_PCM_FMTBIT_S32_LE)
++
++static struct snd_soc_dai_driver rt1318_sdw_dai[] = {
++ {
++ .name = "rt1318-aif",
++ .playback = {
++ .stream_name = "DP1 Playback",
++ .channels_min = 1,
++ .channels_max = 2,
++ .rates = RT1318_STEREO_RATES,
++ .formats = RT1318_FORMATS,
++ },
++ .capture = {
++ .stream_name = "DP2 Capture",
++ .channels_min = 1,
++ .channels_max = 2,
++ .rates = RT1318_STEREO_RATES,
++ .formats = RT1318_FORMATS,
++ },
++ .ops = &rt1318_aif_dai_ops,
++ },
++};
++
++static int rt1318_sdw_init(struct device *dev, struct regmap *regmap,
++ struct sdw_slave *slave)
++{
++ struct rt1318_sdw_priv *rt1318;
++ int ret;
++
++ rt1318 = devm_kzalloc(dev, sizeof(*rt1318), GFP_KERNEL);
++ if (!rt1318)
++ return -ENOMEM;
++
++ dev_set_drvdata(dev, rt1318);
++ rt1318->sdw_slave = slave;
++ rt1318->regmap = regmap;
++
++ /*
++ * Mark hw_init to false
++ * HW init will be performed when device reports present
++ */
++ rt1318->hw_init = false;
++ rt1318->first_hw_init = false;
++
++ ret = devm_snd_soc_register_component(dev,
++ &soc_component_sdw_rt1318,
++ rt1318_sdw_dai,
++ ARRAY_SIZE(rt1318_sdw_dai));
++
++ dev_dbg(&slave->dev, "%s\n", __func__);
++
++ return ret;
++}
++
++static int rt1318_sdw_probe(struct sdw_slave *slave,
++ const struct sdw_device_id *id)
++{
++ struct regmap *regmap;
++
++ /* Regmap Initialization */
++ regmap = devm_regmap_init_sdw(slave, &rt1318_sdw_regmap);
++ if (IS_ERR(regmap))
++ return PTR_ERR(regmap);
++
++ return rt1318_sdw_init(&slave->dev, regmap, slave);
++}
++
++static int rt1318_sdw_remove(struct sdw_slave *slave)
++{
++ struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(&slave->dev);
++
++ if (rt1318->first_hw_init)
++ pm_runtime_disable(&slave->dev);
++
++ return 0;
++}
++
++static const struct sdw_device_id rt1318_id[] = {
++ SDW_SLAVE_ENTRY_EXT(0x025d, 0x1318, 0x3, 0x1, 0),
++ {},
++};
++MODULE_DEVICE_TABLE(sdw, rt1318_id);
++
++static int __maybe_unused rt1318_dev_suspend(struct device *dev)
++{
++ struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(dev);
++
++ if (!rt1318->hw_init)
++ return 0;
++
++ regcache_cache_only(rt1318->regmap, true);
++ return 0;
++}
++
++#define RT1318_PROBE_TIMEOUT 5000
++
++static int __maybe_unused rt1318_dev_resume(struct device *dev)
++{
++ struct sdw_slave *slave = dev_to_sdw_dev(dev);
++ struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(dev);
++ unsigned long time;
++
++ if (!rt1318->first_hw_init)
++ return 0;
++
++ if (!slave->unattach_request)
++ goto regmap_sync;
++
++ time = wait_for_completion_timeout(&slave->initialization_complete,
++ msecs_to_jiffies(RT1318_PROBE_TIMEOUT));
++ if (!time) {
++ dev_err(&slave->dev, "Initialization not complete, timed out\n");
++ return -ETIMEDOUT;
++ }
++
++regmap_sync:
++ slave->unattach_request = 0;
++ regcache_cache_only(rt1318->regmap, false);
++ regcache_sync(rt1318->regmap);
++
++ return 0;
++}
++
++static const struct dev_pm_ops rt1318_pm = {
++ SET_SYSTEM_SLEEP_PM_OPS(rt1318_dev_suspend, rt1318_dev_resume)
++ SET_RUNTIME_PM_OPS(rt1318_dev_suspend, rt1318_dev_resume, NULL)
++};
++
++static struct sdw_driver rt1318_sdw_driver = {
++ .driver = {
++ .name = "rt1318-sdca",
++ .owner = THIS_MODULE,
++ .pm = &rt1318_pm,
++ },
++ .probe = rt1318_sdw_probe,
++ .remove = rt1318_sdw_remove,
++ .ops = &rt1318_slave_ops,
++ .id_table = rt1318_id,
++};
++module_sdw_driver(rt1318_sdw_driver);
++
++MODULE_DESCRIPTION("ASoC RT1318 driver SDCA SDW");
++MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/codecs/rt1318-sdw.h b/sound/soc/codecs/rt1318-sdw.h
+new file mode 100644
+index 0000000000000..4d7ac9c4bd8de
+--- /dev/null
++++ b/sound/soc/codecs/rt1318-sdw.h
+@@ -0,0 +1,101 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * rt1318-sdw.h -- RT1318 SDCA ALSA SoC audio driver header
++ *
++ * Copyright(c) 2022 Realtek Semiconductor Corp.
++ */
++
++#ifndef __RT1318_SDW_H__
++#define __RT1318_SDW_H__
++
++#include <linux/regmap.h>
++#include <linux/soundwire/sdw.h>
++#include <linux/soundwire/sdw_type.h>
++#include <linux/soundwire/sdw_registers.h>
++#include <sound/soc.h>
++
++/* imp-defined registers */
++#define RT1318_SAPU_SM 0x3203
++
++#define R1318_TCON 0xc203
++#define R1318_TCON_RELATED_1 0xc206
++
++#define R1318_SPK_TEMPERATRUE_PROTECTION_0 0xdb00
++#define R1318_SPK_TEMPERATRUE_PROTECTION_L_4 0xdb08
++#define R1318_SPK_TEMPERATRUE_PROTECTION_R_4 0xdd08
++
++#define R1318_SPK_TEMPERATRUE_PROTECTION_L_6 0xdb12
++#define R1318_SPK_TEMPERATRUE_PROTECTION_R_6 0xdd12
++
++#define RT1318_INIT_RECIPROCAL_REG_L_24 0xdbb5
++#define RT1318_INIT_RECIPROCAL_REG_L_23_16 0xdbb6
++#define RT1318_INIT_RECIPROCAL_REG_L_15_8 0xdbb7
++#define RT1318_INIT_RECIPROCAL_REG_L_7_0 0xdbb8
++#define RT1318_INIT_RECIPROCAL_REG_R_24 0xddb5
++#define RT1318_INIT_RECIPROCAL_REG_R_23_16 0xddb6
++#define RT1318_INIT_RECIPROCAL_REG_R_15_8 0xddb7
++#define RT1318_INIT_RECIPROCAL_REG_R_7_0 0xddb8
++
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_24 0xdbc5
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_23_16 0xdbc6
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_15_8 0xdbc7
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_7_0 0xdbc8
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_24 0xddc5
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_23_16 0xddc6
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_15_8 0xddc7
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_7_0 0xddc8
++
++#define RT1318_R0_COMPARE_FLAG_L 0xdb35
++#define RT1318_R0_COMPARE_FLAG_R 0xdd35
++
++#define RT1318_STP_INITIAL_RS_TEMP_H 0xdd93
++#define RT1318_STP_INITIAL_RS_TEMP_L 0xdd94
++
++/* RT1318 SDCA Control - function number */
++#define FUNC_NUM_SMART_AMP 0x04
++
++/* RT1318 SDCA entity */
++#define RT1318_SDCA_ENT_PDE23 0x31
++#define RT1318_SDCA_ENT_XU24 0x24
++#define RT1318_SDCA_ENT_FU21 0x03
++#define RT1318_SDCA_ENT_UDMPU21 0x02
++#define RT1318_SDCA_ENT_CS21 0x21
++#define RT1318_SDCA_ENT_SAPU 0x29
++
++/* RT1318 SDCA control */
++#define RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX 0x10
++#define RT1318_SDCA_CTL_REQ_POWER_STATE 0x01
++#define RT1318_SDCA_CTL_FU_MUTE 0x01
++#define RT1318_SDCA_CTL_FU_VOLUME 0x02
++#define RT1318_SDCA_CTL_UDMPU_CLUSTER 0x10
++#define RT1318_SDCA_CTL_SAPU_PROTECTION_MODE 0x10
++#define RT1318_SDCA_CTL_SAPU_PROTECTION_STATUS 0x11
++
++/* RT1318 SDCA channel */
++#define CH_L 0x01
++#define CH_R 0x02
++
++/* sample frequency index */
++#define RT1318_SDCA_RATE_16000HZ 0x04
++#define RT1318_SDCA_RATE_32000HZ 0x07
++#define RT1318_SDCA_RATE_44100HZ 0x08
++#define RT1318_SDCA_RATE_48000HZ 0x09
++#define RT1318_SDCA_RATE_96000HZ 0x0b
++#define RT1318_SDCA_RATE_192000HZ 0x0d
++
++
++struct rt1318_sdw_priv {
++ struct snd_soc_component *component;
++ struct regmap *regmap;
++ struct sdw_slave *sdw_slave;
++ enum sdw_slave_status status;
++ struct sdw_bus_params params;
++ bool hw_init;
++ bool first_hw_init;
++};
++
++struct sdw_stream_data {
++ struct sdw_stream_runtime *sdw_stream;
++};
++
++#endif /* __RT1318_SDW_H__ */
+--
+2.39.2
+
--- /dev/null
+From 9395b56995173d02bcada195c0cfcacbae2aba6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Nov 2022 11:42:17 +0200
+Subject: drm: Add missing DP DSC extended capability definitions.
+
+From: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+
+[ Upstream commit 1482ec00be4a3634aeffbcc799791a723df69339 ]
+
+Adding DP DSC register definitions, we might need for further
+DSC implementation, supporting MST and DP branch pass-through mode.
+
+v2: - Fixed checkpatch comment warning
+v3: - Removed function which is not yet used(Jani Nikula)
+
+Reviewed-by: Vinod Govindapillai <vinod.govindapillai@intel.com>
+Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221101094222.22091-2-stanislav.lisovskiy@intel.com
+Stable-dep-of: 13525645e224 ("drm/dsc: fix drm_edp_dsc_sink_output_bpp() DPCD high byte usage")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/drm/display/drm_dp.h | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index e934aab357bea..9bc22a02874d9 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -240,6 +240,8 @@
+ #define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
+ # define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
+ # define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1)
++# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP (1 << 2)
++# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP (1 << 3)
+
+ #define DP_DSC_REV 0x061
+ # define DP_DSC_MAJOR_MASK (0xf << 0)
+@@ -278,12 +280,15 @@
+
+ #define DP_DSC_BLK_PREDICTION_SUPPORT 0x066
+ # define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
++# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1)
+
+ #define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
+
+ #define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+ # define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+ # define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
++# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK 0x06
++# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY 0x08
+
+ #define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
+ # define DP_DSC_RGB (1 << 0)
+@@ -345,11 +350,13 @@
+ # define DP_DSC_24_PER_DP_DSC_SINK (1 << 2)
+
+ #define DP_DSC_BITS_PER_PIXEL_INC 0x06F
++# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f
++# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0
+ # define DP_DSC_BITS_PER_PIXEL_1_16 0x0
+ # define DP_DSC_BITS_PER_PIXEL_1_8 0x1
+ # define DP_DSC_BITS_PER_PIXEL_1_4 0x2
+ # define DP_DSC_BITS_PER_PIXEL_1_2 0x3
+-# define DP_DSC_BITS_PER_PIXEL_1 0x4
++# define DP_DSC_BITS_PER_PIXEL_1_1 0x4
+
+ #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
+ # define DP_PSR_IS_SUPPORTED 1
+--
+2.39.2
+
--- /dev/null
+From cf982d1dc25799ed875e10ea43ee3754665cac6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Nov 2022 11:30:38 -0500
+Subject: drm/amd/display: Add debug option to skip PSR CRTC disable
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 00812bfc7bcb02faf127ee05f6ac27a5581eb701 ]
+
+[Why]
+It's currently tied to Z10 support, and is required for Z10, but
+we can still support Z10 display off without PSR.
+
+We currently need to skip the PSR CRTC disable to prevent stuttering
+and underflow from occuring during PSR-SU.
+
+[How]
+Add a debug option to allow specifying this separately.
+
+Reviewed-by: Robin Chen <robin.chen@amd.com>
+Acked-by: Stylon Wang <stylon.wang@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: d893f39320e1 ("drm/amd/display: Lowering min Z8 residency time")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 1 +
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index bf7fcd268cb47..6299130663a3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -3381,7 +3381,7 @@ bool dc_link_setup_psr(struct dc_link *link,
+ case FAMILY_YELLOW_CARP:
+ case AMDGPU_FAMILY_GC_10_3_6:
+ case AMDGPU_FAMILY_GC_11_0_1:
+- if (dc->debug.disable_z10)
++ if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+ break;
+ default:
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 6d64d3b0dc211..e038a180b941d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -829,6 +829,7 @@ struct dc_debug_options {
+ int crb_alloc_policy_min_disp_count;
+ bool disable_z10;
+ bool enable_z9_disable_interface;
++ bool psr_skip_crtc_disable;
+ union dpia_debug_options dpia_debug;
+ bool disable_fixed_vs_aux_timeout_wa;
+ bool force_disable_subvp;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 94a90c8f3abbe..58931df853f1e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,6 +884,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
++ .psr_skip_crtc_disable = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+--
+2.39.2
+
--- /dev/null
+From 027edd2523d49987e36a918806230a7a06af9242 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Feb 2023 11:17:50 -0500
+Subject: drm/amd/display: Add minimum Z8 residency debug option
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 0db13eae41fcc67f408dbb3dfda59633c4fa03fb ]
+
+[Why]
+Allows finer control and tuning for debug and profiling.
+
+[How]
+Add the debug option into DC. The default remains the same as before
+for now.
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: d893f39320e1 ("drm/amd/display: Lowering min Z8 residency time")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 +
+ drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 1 +
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 3 ++-
+ 3 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index e038a180b941d..3f277009075fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -781,6 +781,7 @@ struct dc_debug_options {
+ unsigned int force_odm_combine; //bit vector based on otg inst
+ unsigned int seamless_boot_odm_combine;
+ unsigned int force_odm_combine_4to1; //bit vector based on otg inst
++ int minimum_z8_residency_time;
+ bool disable_z9_mpc;
+ unsigned int force_fclk_khz;
+ bool enable_tri_buf;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 58931df853f1e..67c892b9e2cf5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,6 +884,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
++ .minimum_z8_residency_time = 1000,
+ .psr_skip_crtc_disable = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 6a7bcba4a7dad..186538e3e3c0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -973,7 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ struct dc_stream_status *stream_status = &context->stream_status[0];
+- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
++ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
++ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ bool is_pwrseq0 = link->link_index == 0;
+
+ if (dc_extended_blank_supported(dc)) {
+--
+2.39.2
+
--- /dev/null
+From e6c8f44071a45b4fc335c53c4f133702dee2b866 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Nov 2022 11:37:25 -0500
+Subject: drm/amd/display: Add Z8 allow states to z-state support list
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 80676936805e46c79c38008e5142a77a1b2f2dc7 ]
+
+[Why]
+Even if we block Z9 based on crossover threshold it's possible to
+allow for Z8.
+
+[How]
+There's support for this on DCN314, so update the support types to
+include a z8 only and z8_z10 only state.
+
+Update the decide_zstate_support function to allow for specifying
+these modes based on the Z8 threshold.
+
+DCN31 has z-state disabled, but still update the legacy code to
+map z8_only = disallow and z10_z8_only = z10_only to keep the support
+the same.
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Brian Chang <Brian.Chang@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: d893f39320e1 ("drm/amd/display: Lowering min Z8 residency time")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 4 ++--
+ .../drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c | 12 ++++++++++--
+ drivers/gpu/drm/amd/display/dc/dc.h | 2 ++
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 12 +++++++++---
+ 4 files changed, 23 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+index 090b2c02aee17..0827c7df28557 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+@@ -333,8 +333,8 @@ void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
+ (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
+ support = DCN_ZSTATE_SUPPORT_DISALLOW;
+
+-
+- if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY)
++ if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY ||
++ support == DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY)
+ param = 1;
+ else
+ param = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+index aa264c600408d..0765334f08259 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+@@ -349,8 +349,6 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
+ if (!clk_mgr->smu_present)
+ return;
+
+- // Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
+- // Arg[16] = Disallow Z9 -> new bit
+ switch (support) {
+
+ case DCN_ZSTATE_SUPPORT_ALLOW:
+@@ -369,6 +367,16 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
+ param = (1 << 10);
+ break;
+
++ case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
++ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
++ param = (1 << 10) | (1 << 8);
++ break;
++
++ case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
++ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
++ param = (1 << 8);
++ break;
++
+ default: //DCN_ZSTATE_SUPPORT_UNKNOWN
+ msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ param = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 8757d7ff8ff62..6d64d3b0dc211 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -491,6 +491,8 @@ enum dcn_pwr_state {
+ enum dcn_zstate_support_state {
+ DCN_ZSTATE_SUPPORT_UNKNOWN,
+ DCN_ZSTATE_SUPPORT_ALLOW,
++ DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY,
++ DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY,
+ DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
+ DCN_ZSTATE_SUPPORT_DISALLOW,
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 602e885ed52c4..feef0a75878f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -949,6 +949,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ int plane_count;
+ int i;
+ unsigned int optimized_min_dst_y_next_start_us;
++ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
+
+ plane_count = 0;
+ optimized_min_dst_y_next_start_us = 0;
+@@ -963,6 +964,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ * 2. single eDP, on link 0, 1 plane and stutter period > 5ms
+ * Z10 only cases:
+ * 1. single eDP, on link 0, 1 plane and stutter period >= 5ms
++ * Z8 cases:
++ * 1. stutter period sufficient
+ * Zstate not allowed cases:
+ * 1. Everything else
+ */
+@@ -990,11 +993,14 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+- return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
++ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else
+- return DCN_ZSTATE_SUPPORT_DISALLOW;
+- } else
++ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
++ } else if (allow_z8) {
++ return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
++ } else {
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
++ }
+ }
+
+ void dcn20_calculate_dlg_params(
+--
+2.39.2
+
--- /dev/null
+From 6024cfc3867a8bf6f9dd81450856bb37cdc3a3ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Apr 2023 17:34:24 -0400
+Subject: drm/amd/display: Change default Z8 watermark values
+
+From: Leo Chen <sancchen@amd.com>
+
+[ Upstream commit 8f586cc16c1fc3c2202c9d54563db8c7ed365f82 ]
+
+[Why & How]
+Previous Z8 watermark values were causing flickering and OTC underflow.
+Updating Z8 watermark values based on the measurement.
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Acked-by: Alan Liu <HaoPing.Liu@amd.com>
+Signed-off-by: Leo Chen <sancchen@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 2c99193b63fa6..4f91e64754239 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -148,8 +148,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ .num_states = 5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
+- .sr_exit_z8_time_us = 210.0,
+- .sr_enter_plus_exit_z8_time_us = 310.0,
++ .sr_exit_z8_time_us = 268.0,
++ .sr_enter_plus_exit_z8_time_us = 393.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+--
+2.39.2
+
--- /dev/null
+From 9696f39dcad23e16a449541743583cb58ea3fea5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Jan 2023 09:49:32 -0500
+Subject: drm/amd/display: Fix Z8 support configurations
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 73dd4ca4b5a01235607231839bd351bbef75a1d2 ]
+
+[Why]
+It's not supported in multi-display, but it is supported in 2nd eDP
+screen only.
+
+[How]
+Remove multi display support, restrict number of planes for all
+z-states support, but still allow Z8 if we're not using PWRSEQ0.
+
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: d893f39320e1 ("drm/amd/display: Lowering min Z8 residency time")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index feef0a75878f9..6a7bcba4a7dad 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -949,7 +949,6 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ int plane_count;
+ int i;
+ unsigned int optimized_min_dst_y_next_start_us;
+- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
+
+ plane_count = 0;
+ optimized_min_dst_y_next_start_us = 0;
+@@ -974,6 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ struct dc_stream_status *stream_status = &context->stream_status[0];
++ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
++ bool is_pwrseq0 = link->link_index == 0;
+
+ if (dc_extended_blank_supported(dc)) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -986,18 +987,17 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ }
+ }
+ }
+- /* zstate only supported on PWRSEQ0 and when there's <2 planes*/
+- if (link->link_index != 0 || stream_status->plane_count > 1)
++
++ /* Don't support multi-plane configurations */
++ if (stream_status->plane_count > 1)
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+
+- if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
++ if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+- else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
++ else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else
+ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
+- } else if (allow_z8) {
+- return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ } else {
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+ }
+--
+2.39.2
+
--- /dev/null
+From 8c0b41f54c2d4d0979de65861db346207ccb5b6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Apr 2023 10:49:38 -0400
+Subject: drm/amd/display: Lowering min Z8 residency time
+
+From: Leo Chen <sancchen@amd.com>
+
+[ Upstream commit d893f39320e1248d1c97fde0d6e51e5ea008a76b ]
+
+[Why & How]
+Per HW team request, we're lowering the minimum Z8
+residency time to 2000us. This enables Z8 support for additional
+modes we were previously blocking like 2k>60hz
+
+Cc: stable@vger.kernel.org
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Leo Chen <sancchen@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index ca3aabdf81d2f..b7782433ce6ba 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,7 +884,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
+- .minimum_z8_residency_time = 3080,
++ .minimum_z8_residency_time = 2000,
+ .psr_skip_crtc_disable = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+--
+2.39.2
+
--- /dev/null
+From 10a1546468d406be962f9d1dc720dd52dda4ca87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 11:46:36 -0400
+Subject: drm/amd/display: Refactor eDP PSR codes
+
+From: Ian Chen <ian.chen@amd.com>
+
+[ Upstream commit bd829d5707730072fecc3267016a675a4789905b ]
+
+We split out PSR config from "global" to "per-panel" config settings.
+
+Tested-by: Mark Broadworth <mark.broadworth@amd.com>
+Reviewed-by: Robin Chen <robin.chen@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Ian Chen <ian.chen@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: d893f39320e1 ("drm/amd/display: Lowering min Z8 residency time")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dc.h | 1 -
+ drivers/gpu/drm/amd/display/dc/dc_link.h | 14 +++++++++++---
+ .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 5 ++++-
+ .../gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 15 +++++++++++++--
+ .../drm/amd/display/dc/dcn302/dcn302_resource.c | 14 +++++++++++++-
+ .../drm/amd/display/dc/dcn303/dcn303_resource.c | 13 ++++++++++++-
+ .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 4 ++++
+ .../drm/amd/display/dc/dcn314/dcn314_resource.c | 4 ++++
+ .../drm/amd/display/dc/dcn315/dcn315_resource.c | 4 ++++
+ .../drm/amd/display/dc/dcn316/dcn316_resource.c | 4 ++++
+ .../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 2 +-
+ 11 files changed, 70 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 0598465fd1a1b..8757d7ff8ff62 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -764,7 +764,6 @@ struct dc_debug_options {
+ bool disable_mem_low_power;
+ bool pstate_enabled;
+ bool disable_dmcu;
+- bool disable_psr;
+ bool force_abm_enable;
+ bool disable_stereo_support;
+ bool vsr_support;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index caf0c7af2d0b9..17f080f8af6cd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -117,7 +117,7 @@ struct psr_settings {
+ * Add a struct dc_panel_config under dc_link
+ */
+ struct dc_panel_config {
+- // extra panel power sequence parameters
++ /* extra panel power sequence parameters */
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+@@ -127,13 +127,21 @@ struct dc_panel_config {
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+- // ABM
++ /* PSR */
++ struct psr {
++ bool disable_psr;
++ bool disallow_psrsu;
++ bool rc_disable;
++ bool rc_allow_static_screen;
++ bool rc_allow_fullscreen_VPB;
++ } psr;
++ /* ABM */
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
+- // edp DSC
++ /* edp DSC */
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 887081472c0d8..ce6c70e25703d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -671,12 +671,15 @@ static const struct dc_debug_options debug_defaults_diags = {
+ .disable_pplib_wm_range = true,
+ .disable_stutter = true,
+ .disable_48mhz_pwrdwn = true,
+- .disable_psr = true,
+ .enable_tri_buf = true,
+ .use_max_lb = true
+ };
+
+ static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index e958f838c8041..5a8d1a0513149 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -723,7 +723,6 @@ static const struct dc_debug_options debug_defaults_drv = {
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+- .disable_psr = false,
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
+ };
+@@ -742,11 +741,17 @@ static const struct dc_debug_options debug_defaults_diags = {
+ .scl_reset_length10 = true,
+ .dwb_fi_phase = -1, // -1 = disable
+ .dmub_command_table = true,
+- .disable_psr = true,
+ .enable_tri_buf = true,
+ .use_max_lb = true
+ };
+
++static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
++};
++
+ static void dcn30_dpp_destroy(struct dpp **dpp)
+ {
+ kfree(TO_DCN20_DPP(*dpp));
+@@ -2214,6 +2219,11 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
+ }
+ }
+
++static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config)
++{
++ *panel_config = panel_config_defaults;
++}
++
+ static const struct resource_funcs dcn30_res_pool_funcs = {
+ .destroy = dcn30_destroy_resource_pool,
+ .link_enc_create = dcn30_link_encoder_create,
+@@ -2233,6 +2243,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn30_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
++ .get_panel_config_defaults = dcn30_get_panel_config_defaults,
+ };
+
+ #define CTX ctx
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+index b925b6ddde5a3..d3945876aceda 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+@@ -112,10 +112,16 @@ static const struct dc_debug_options debug_defaults_diags = {
+ .dwb_fi_phase = -1, // -1 = disable
+ .dmub_command_table = true,
+ .enable_tri_buf = true,
+- .disable_psr = true,
+ .use_max_lb = true
+ };
+
++static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
++};
++
+ enum dcn302_clk_src_array_id {
+ DCN302_CLK_SRC_PLL0,
+ DCN302_CLK_SRC_PLL1,
+@@ -1132,6 +1138,11 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
+ DC_FP_END();
+ }
+
++static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config)
++{
++ *panel_config = panel_config_defaults;
++}
++
+ static struct resource_funcs dcn302_res_pool_funcs = {
+ .destroy = dcn302_destroy_resource_pool,
+ .link_enc_create = dcn302_link_encoder_create,
+@@ -1151,6 +1162,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn302_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
++ .get_panel_config_defaults = dcn302_get_panel_config_defaults,
+ };
+
+ static struct dc_cap_funcs cap_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+index 527d5c9028785..7e7f18bef0986 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+@@ -96,7 +96,13 @@ static const struct dc_debug_options debug_defaults_diags = {
+ .dwb_fi_phase = -1, // -1 = disable
+ .dmub_command_table = true,
+ .enable_tri_buf = true,
+- .disable_psr = true,
++};
++
++static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
+ };
+
+ enum dcn303_clk_src_array_id {
+@@ -1055,6 +1061,10 @@ static void dcn303_destroy_resource_pool(struct resource_pool **pool)
+ *pool = NULL;
+ }
+
++static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config)
++{
++ *panel_config = panel_config_defaults;
++}
+
+ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+ {
+@@ -1082,6 +1092,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn303_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
++ .get_panel_config_defaults = dcn303_get_panel_config_defaults,
+ };
+
+ static struct dc_cap_funcs cap_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index d825f11b4feaa..d3f76512841b4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -911,6 +911,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+
+ static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index ffaa4e5b3fca0..94a90c8f3abbe 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -940,6 +940,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+
+ static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+index 58746c437554f..31cbc5762eab3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+@@ -907,6 +907,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+
+ static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+index 6b40a11ac83a9..af3eddc0cf32e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+@@ -906,6 +906,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+
+ static const struct dc_panel_config panel_config_defaults = {
++ .psr = {
++ .disable_psr = false,
++ .disallow_psrsu = false,
++ },
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 45db40c41882c..602e885ed52c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -989,7 +989,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+
+ if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+- else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
++ else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+--
+2.39.2
+
--- /dev/null
+From 1440b85be64440cdb3483402c6969aaba05e011c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Feb 2023 10:27:10 -0500
+Subject: drm/amd/display: Update minimum stutter residency for DCN314 Z8
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 0215ce9057edf69aff9c1a32f4254e1ec297db31 ]
+
+[Why]
+Block periods that are too short as they have the potential to
+currently cause hangs in other firmware components on the system.
+
+[How]
+Update the threshold, mostly targeting a block of 4k and downscaling.
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: d893f39320e1 ("drm/amd/display: Lowering min Z8 residency time")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 67c892b9e2cf5..ca3aabdf81d2f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,7 +884,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
+- .minimum_z8_residency_time = 1000,
++ .minimum_z8_residency_time = 3080,
+ .psr_skip_crtc_disable = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+--
+2.39.2
+
--- /dev/null
+From 4a60b0073f2ab65b012cca8f093af21ec411f8ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Feb 2023 13:38:05 -0500
+Subject: drm/amd/display: Update Z8 SR exit/enter latencies
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 9b0f51e8449f6f76170fda6a8dd9c417a43ce270 ]
+
+[Why]
+Request from HW team to update the latencies to the new measured values.
+
+[How]
+Update the values in the bounding box.
+
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 8f586cc16c1f ("drm/amd/display: Change default Z8 watermark values")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 9214ce23c3bd3..2c99193b63fa6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -148,8 +148,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ .num_states = 5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
+- .sr_exit_z8_time_us = 280.0,
+- .sr_enter_plus_exit_z8_time_us = 350.0,
++ .sr_exit_z8_time_us = 210.0,
++ .sr_enter_plus_exit_z8_time_us = 310.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+--
+2.39.2
+
--- /dev/null
+From ee647b5e2489d8476e5d1ffaa8d6377883668eb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Nov 2022 11:35:25 -0500
+Subject: drm/amd/display: Update Z8 watermarks for DCN314
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit fa24e116f1ce3dcc55474f0b6ab0cac4e3ee34e1 ]
+
+[Why & How]
+Update from HW, need to lower watermarks for enter/enter+exit latency.
+
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Brian Chang <Brian.Chang@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 8f586cc16c1f ("drm/amd/display: Change default Z8 watermark values")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 34b6c763a4554..9214ce23c3bd3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -148,8 +148,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ .num_states = 5,
+ .sr_exit_time_us = 16.5,
+ .sr_enter_plus_exit_time_us = 18.5,
+- .sr_exit_z8_time_us = 442.0,
+- .sr_enter_plus_exit_z8_time_us = 560.0,
++ .sr_exit_z8_time_us = 280.0,
++ .sr_enter_plus_exit_z8_time_us = 350.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+--
+2.39.2
+
--- /dev/null
+From 46276d09550c0d750dcb46c363c490441296677a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Apr 2023 16:46:14 +0300
+Subject: drm/dsc: fix drm_edp_dsc_sink_output_bpp() DPCD high byte usage
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit 13525645e2246ebc8a21bd656248d86022a6ee8f ]
+
+The operator precedence between << and & is wrong, leading to the high
+byte being completely ignored. For example, with the 6.4 format, 32
+becomes 0 and 24 becomes 8. Fix it, and remove the slightly confusing
+and unnecessary DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT macro while at it.
+
+Fixes: 0575650077ea ("drm/dp: DRM DP helper/macros to get DP sink DSC parameters")
+Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Cc: Manasi Navare <navaremanasi@google.com>
+Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230406134615.1422509-1-jani.nikula@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/drm/display/drm_dp.h | 1 -
+ include/drm/display/drm_dp_helper.h | 5 ++---
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index 9bc22a02874d9..50428ba92ce8b 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -286,7 +286,6 @@
+
+ #define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+ # define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
+ # define DP_DSC_MAX_BPP_DELTA_VERSION_MASK 0x06
+ # define DP_DSC_MAX_BPP_DELTA_AVAILABILITY 0x08
+
+diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
+index ab55453f2d2cd..ade9df59e156a 100644
+--- a/include/drm/display/drm_dp_helper.h
++++ b/include/drm/display/drm_dp_helper.h
+@@ -181,9 +181,8 @@ static inline u16
+ drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+ {
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+- (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+- DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+- DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
++ ((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
++ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8);
+ }
+
+ static inline u32
+--
+2.39.2
+
--- /dev/null
+From 7758f6c67eb2c62a59d8d4576bc16186b1fbed84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Apr 2023 20:55:14 +0300
+Subject: drm/i915: Check pipe source size when using skl+ scalers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit d944eafed618a8507270b324ad9d5405bb7f0b3e ]
+
+The skl+ scalers only sample 12 bits of PIPESRC so we can't
+do any plane scaling at all when the pipe source size is >4k.
+
+Make sure the pipe source size is also below the scaler's src
+size limits. Might not be 100% accurate, but should at least be
+safe. We can refine the limits later if we discover that recent
+hw is less restricted.
+
+Cc: stable@vger.kernel.org
+Tested-by: Ross Zwisler <zwisler@google.com>
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8357
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230418175528.13117-2-ville.syrjala@linux.intel.com
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+(cherry picked from commit 691248d4135fe3fae64b4ee0676bc96a7fd6950c)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/skl_scaler.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index e6ec5ed0d00ec..90f42f63128ec 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -105,6 +105,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
++ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
++ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int min_src_w, min_src_h, min_dst_w, min_dst_h;
+ int max_src_w, max_src_h, max_dst_w, max_dst_h;
+
+@@ -196,6 +198,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ return -EINVAL;
+ }
+
++ /*
++ * The pipe scaler does not use all the bits of PIPESRC, at least
++ * on the earlier platforms. So even when we're scaling a plane
++ * the *pipe* source size must not be too large. For simplicity
++ * we assume the limits match the scaler source size limits. Might
++ * not be 100% accurate on all platforms, but good enough for now.
++ */
++ if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
++ drm_dbg_kms(&dev_priv->drm,
++ "scaler_user index %u.%u: pipe src size %ux%u "
++ "is out of scaler range\n",
++ crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
++ return -EINVAL;
++ }
++
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << scaler_user);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+--
+2.39.2
+
--- /dev/null
+From 5452bf511c688a3f910fbb963970587fdb1fb6a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Dec 2022 15:05:09 +0200
+Subject: drm/i915/mtl: update scaler source and destination limits for MTL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Animesh Manna <animesh.manna@intel.com>
+
+[ Upstream commit f840834a8b60ffd305f03a53007605ba4dfbbc4b ]
+
+The max source and destination limits for scalers in MTL have changed.
+Use the new values accordingly.
+
+Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
+Signed-off-by: Animesh Manna <animesh.manna@intel.com>
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
+Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221223130509.43245-3-luciano.coelho@intel.com
+Stable-dep-of: d944eafed618 ("drm/i915: Check pipe source size when using skl+ scalers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/skl_scaler.c | 40 ++++++++++++++++++-----
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index 4092679be21ec..e6ec5ed0d00ec 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -85,6 +85,10 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+ #define ICL_MAX_SRC_H 4096
+ #define ICL_MAX_DST_W 5120
+ #define ICL_MAX_DST_H 4096
++#define MTL_MAX_SRC_W 4096
++#define MTL_MAX_SRC_H 8192
++#define MTL_MAX_DST_W 8192
++#define MTL_MAX_DST_H 8192
+ #define SKL_MIN_YUV_420_SRC_W 16
+ #define SKL_MIN_YUV_420_SRC_H 16
+
+@@ -101,6 +105,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
++ int min_src_w, min_src_h, min_dst_w, min_dst_h;
++ int max_src_w, max_src_h, max_dst_w, max_dst_h;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+@@ -155,15 +161,33 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ return -EINVAL;
+ }
+
++ min_src_w = SKL_MIN_SRC_W;
++ min_src_h = SKL_MIN_SRC_H;
++ min_dst_w = SKL_MIN_DST_W;
++ min_dst_h = SKL_MIN_DST_H;
++
++ if (DISPLAY_VER(dev_priv) < 11) {
++ max_src_w = SKL_MAX_SRC_W;
++ max_src_h = SKL_MAX_SRC_H;
++ max_dst_w = SKL_MAX_DST_W;
++ max_dst_h = SKL_MAX_DST_H;
++ } else if (DISPLAY_VER(dev_priv) < 14) {
++ max_src_w = ICL_MAX_SRC_W;
++ max_src_h = ICL_MAX_SRC_H;
++ max_dst_w = ICL_MAX_DST_W;
++ max_dst_h = ICL_MAX_DST_H;
++ } else {
++ max_src_w = MTL_MAX_SRC_W;
++ max_src_h = MTL_MAX_SRC_H;
++ max_dst_w = MTL_MAX_DST_W;
++ max_dst_h = MTL_MAX_DST_H;
++ }
++
+ /* range checks */
+- if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+- dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+- (DISPLAY_VER(dev_priv) >= 11 &&
+- (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+- dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+- (DISPLAY_VER(dev_priv) < 11 &&
+- (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+- dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
++ if (src_w < min_src_w || src_h < min_src_h ||
++ dst_w < min_dst_w || dst_h < min_dst_h ||
++ src_w > max_src_w || src_h > max_src_h ||
++ dst_w > max_dst_w || dst_h > max_dst_h) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+--
+2.39.2
+
--- /dev/null
+From 966e633be4db8f49c40132061c985a3f7c2dfd10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Nov 2022 11:30:40 -0800
+Subject: drm/msm/adreno: Simplify read64/write64 helpers
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit cade05b2a88558847984287dd389fae0c7de31d6 ]
+
+The _HI reg is always following the _LO reg, so no need to pass these
+offsets seprately.
+
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/511581/
+Link: https://lore.kernel.org/r/20221114193049.1533391-2-robdclark@gmail.com
+Stable-dep-of: ca090c837b43 ("drm/msm: fix missing wq allocation error handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 3 +--
+ drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 27 ++++++++-------------
+ drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 4 +--
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 24 ++++++------------
+ drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 3 +--
+ drivers/gpu/drm/msm/msm_gpu.h | 12 ++++-----
+ 6 files changed, 27 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+index 7cb8d9849c073..a10feb8a4194a 100644
+--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+@@ -606,8 +606,7 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+
+ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+- *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+- REG_A4XX_RBBM_PERFCTR_CP_0_HI);
++ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 02ff306f96f42..24feae285ccd6 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -605,11 +605,9 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
+ a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
+ }
+
+- gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+- REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
++ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
+
+- gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+- REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
++ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
+
+ return 0;
+ }
+@@ -868,8 +866,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+- gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+- REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
++ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Put the GPU into 64 bit by default */
+@@ -908,8 +905,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ return ret;
+
+ /* Set the ringbuffer address */
+- gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
+- gpu->rb[0]->iova);
++ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
+
+ /*
+ * If the microcode supports the WHERE_AM_I opcode then we can use that
+@@ -936,7 +932,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+- REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
++ shadowptr(a5xx_gpu, gpu->rb[0]));
+ } else if (gpu->nr_rings > 1) {
+ /* Disable preemption if WHERE_AM_I isn't available */
+ a5xx_preempt_fini(gpu);
+@@ -1239,9 +1235,9 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+- gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
++ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+- gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
++ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+@@ -1427,8 +1423,7 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
+
+ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+- *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+- REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
++ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
+
+ return 0;
+ }
+@@ -1465,8 +1460,7 @@ static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+- gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+- REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
++ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+@@ -1666,8 +1660,7 @@ static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+ {
+ u64 busy_cycles;
+
+- busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+- REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
++ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
+ *out_sample_rate = clk_get_rate(gpu->core_clk);
+
+ return busy_cycles;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+index e0eef47dae632..f58dd564d122b 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+@@ -137,7 +137,6 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+- REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+@@ -212,8 +211,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+- gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+- REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
++ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 9d7fc44c1e2a9..dc53466864b05 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -247,8 +247,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ OUT_RING(ring, submit->seqno);
+
+ trace_msm_gpu_submit_flush(submit,
+- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+- REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
++ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO));
+
+ a6xx_flush(gpu, ring);
+ }
+@@ -947,8 +946,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
+ }
+ }
+
+- gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE,
+- REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova);
++ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+
+ return 0;
+ }
+@@ -999,8 +997,7 @@ static int hw_init(struct msm_gpu *gpu)
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+- gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+- REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
++ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Turn on 64 bit addressing for all blocks */
+@@ -1049,11 +1046,9 @@ static int hw_init(struct msm_gpu *gpu)
+
+ if (!adreno_is_a650_family(adreno_gpu)) {
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+- REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
++ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+- REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ }
+
+@@ -1145,8 +1140,7 @@ static int hw_init(struct msm_gpu *gpu)
+ goto out;
+
+ /* Set the ringbuffer address */
+- gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
+- gpu->rb[0]->iova);
++ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
+
+ /* Targets that support extended APRIV can use the RPTR shadow from
+ * hardware but all the other ones need to disable the feature. Targets
+@@ -1178,7 +1172,6 @@ static int hw_init(struct msm_gpu *gpu)
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+- REG_A6XX_CP_RB_RPTR_ADDR_HI,
+ shadowptr(a6xx_gpu, gpu->rb[0]));
+ }
+
+@@ -1506,9 +1499,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+- gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
++ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+- gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
++ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+@@ -1719,8 +1712,7 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+- *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+- REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
++ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+index a5c3d1ed255a6..a023d5f962dce 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -147,8 +147,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+- gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+- REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
++ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
+index a89bfdc3d7f90..7a36e0784f067 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.h
++++ b/drivers/gpu/drm/msm/msm_gpu.h
+@@ -548,7 +548,7 @@ static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+ msm_rmw(gpu->mmio + (reg << 2), mask, or);
+ }
+
+-static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
++static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
+ {
+ u64 val;
+
+@@ -566,17 +566,17 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+- val = (u64) msm_readl(gpu->mmio + (lo << 2));
+- val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
++ val = (u64) msm_readl(gpu->mmio + (reg << 2));
++ val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
+
+ return val;
+ }
+
+-static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
++static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
+ {
+ /* Why not a writeq here? Read the screed above */
+- msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+- msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
++ msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
++ msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
+ }
+
+ int msm_gpu_pm_suspend(struct msm_gpu *gpu);
+--
+2.39.2
+
--- /dev/null
+From 511da2dab75151da9e8552f56a638e67c9645cf1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Mar 2023 11:07:19 +0100
+Subject: drm/msm: fix missing wq allocation error handling
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit ca090c837b430752038b24e56dd182010d77f6f6 ]
+
+Add the missing sanity check to handle workqueue allocation failures.
+
+Fixes: c8afe684c95c ("drm/msm: basic KMS driver for snapdragon")
+Cc: stable@vger.kernel.org # 3.12
+Cc: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/525102/
+Link: https://lore.kernel.org/r/20230306100722.28485-8-johan+linaro@kernel.org
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_drv.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 5eeb4655fbf17..ac3d1d492a48c 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -433,6 +433,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ priv->dev = ddev;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
++ if (!priv->wq) {
++ ret = -ENOMEM;
++ goto err_put_dev;
++ }
+
+ INIT_LIST_HEAD(&priv->objects);
+ mutex_init(&priv->obj_lock);
+--
+2.39.2
+
--- /dev/null
+From dd4dd958f0d6c5649f709a5c4eb3c8aeb29d869c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Nov 2022 11:30:41 -0800
+Subject: drm/msm: Hangcheck progress detection
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit d73b1d02de0858b96f743e1e8b767fb092ae4c1b ]
+
+If the hangcheck timer expires, check if the fw's position in the
+cmdstream has advanced (changed) since last timer expiration, and
+allow it up to three additional "extensions" to it's alotted time.
+The intention is to continue to catch "shader stuck in a loop" type
+hangs quickly, but allow more time for things that are actually
+making forward progress.
+
+Because we need to sample the CP state twice to detect if there has
+not been progress, this also cuts the the timer's duration in half.
+
+v2: Fix typo (REG_A6XX_CP_CSQ_IB2_STAT), add comment
+v3: Only halve hangcheck timer duration for generations which
+ support progress detection (hdanton); removed unused a5xx
+ progress (without knowing how to adjust for data buffered
+ in ROQ it is too likely to report a false negative)
+v4: Comment updates to better describe the total hangcheck
+ duration when progress detection is applied
+
+Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
+Tested-by: Chia-I Wu <olvaffe@gmail.com> # dEQP-GLES2.functional.flush_finish.wait
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/511584/
+Link: https://lore.kernel.org/r/20221114193049.1533391-3-robdclark@gmail.com
+Stable-dep-of: ca090c837b43 ("drm/msm: fix missing wq allocation error handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 34 +++++++++++++++++++++++++++
+ drivers/gpu/drm/msm/msm_drv.c | 1 -
+ drivers/gpu/drm/msm/msm_drv.h | 8 ++++++-
+ drivers/gpu/drm/msm/msm_gpu.c | 31 +++++++++++++++++++++++-
+ drivers/gpu/drm/msm/msm_gpu.h | 10 ++++++++
+ drivers/gpu/drm/msm/msm_ringbuffer.h | 28 ++++++++++++++++++++++
+ 6 files changed, 109 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index dc53466864b05..95e73eddc5e91 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1850,6 +1850,39 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+ }
+
++static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
++{
++ struct msm_cp_state cp_state = {
++ .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
++ .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
++ .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
++ .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
++ };
++ bool progress;
++
++ /*
++ * Adjust the remaining data to account for what has already been
++ * fetched from memory, but not yet consumed by the SQE.
++ *
++ * This is not *technically* correct, the amount buffered could
++ * exceed the IB size due to hw prefetching ahead, but:
++ *
++ * (1) We aren't trying to find the exact position, just whether
++ * progress has been made
++ * (2) The CP_REG_TO_MEM at the end of a submit should be enough
++ * to prevent prefetching into an unrelated submit. (And
++ * either way, at some point the ROQ will be full.)
++ */
++ cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB1_STAT) >> 16;
++ cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB2_STAT) >> 16;
++
++ progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
++
++ ring->last_cp_state = cp_state;
++
++ return progress;
++}
++
+ static u32 a618_get_speed_bin(u32 fuse)
+ {
+ if (fuse == 0)
+@@ -1966,6 +1999,7 @@ static const struct adreno_gpu_funcs funcs = {
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
++ .progress = a6xx_progress,
+ },
+ .get_timestamp = a6xx_get_timestamp,
+ };
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 69bc2e5b4aaeb..5eeb4655fbf17 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -433,7 +433,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ priv->dev = ddev;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+- priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
+
+ INIT_LIST_HEAD(&priv->objects);
+ mutex_init(&priv->obj_lock);
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index b2ea262296a4f..d4e0ef608950e 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -224,7 +224,13 @@ struct msm_drm_private {
+
+ struct drm_atomic_state *pm_state;
+
+- /* For hang detection, in ms */
++ /**
++ * hangcheck_period: For hang detection, in ms
++ *
++ * Note that in practice, a submit/job will get at least two hangcheck
++ * periods, due to checking for progress being implemented as simply
++ * "have the CP position registers changed since last time?"
++ */
+ unsigned int hangcheck_period;
+
+ /**
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
+index 4f495eecc34ba..3802495003258 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.c
++++ b/drivers/gpu/drm/msm/msm_gpu.c
+@@ -494,6 +494,21 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
+ round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
+ }
+
++static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
++{
++ if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
++ return false;
++
++ if (!gpu->funcs->progress)
++ return false;
++
++ if (!gpu->funcs->progress(gpu, ring))
++ return false;
++
++ ring->hangcheck_progress_retries++;
++ return true;
++}
++
+ static void hangcheck_handler(struct timer_list *t)
+ {
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+@@ -504,9 +519,12 @@ static void hangcheck_handler(struct timer_list *t)
+ if (fence != ring->hangcheck_fence) {
+ /* some progress has been made.. ya! */
+ ring->hangcheck_fence = fence;
+- } else if (fence_before(fence, ring->fctx->last_fence)) {
++ ring->hangcheck_progress_retries = 0;
++ } else if (fence_before(fence, ring->fctx->last_fence) &&
++ !made_progress(gpu, ring)) {
+ /* no progress and not done.. hung! */
+ ring->hangcheck_fence = fence;
++ ring->hangcheck_progress_retries = 0;
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
+@@ -832,6 +850,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, struct msm_gpu_config *config)
+ {
++ struct msm_drm_private *priv = drm->dev_private;
+ int i, ret, nr_rings = config->nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
+@@ -859,6 +878,16 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ kthread_init_work(&gpu->recover_work, recover_worker);
+ kthread_init_work(&gpu->fault_work, fault_worker);
+
++ priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
++
++ /*
++ * If progress detection is supported, halve the hangcheck timer
++ * duration, as it takes two iterations of the hangcheck handler
++ * to detect a hang.
++ */
++ if (funcs->progress)
++ priv->hangcheck_period /= 2;
++
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
+
+ spin_lock_init(&gpu->perf_lock);
+diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
+index 7a36e0784f067..732295e256834 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.h
++++ b/drivers/gpu/drm/msm/msm_gpu.h
+@@ -78,6 +78,15 @@ struct msm_gpu_funcs {
+ struct msm_gem_address_space *(*create_private_address_space)
+ (struct msm_gpu *gpu);
+ uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
++
++ /**
++ * progress: Has the GPU made progress?
++ *
++ * Return true if GPU position in cmdstream has advanced (or changed)
++ * since the last call. To avoid false negatives, this should account
++ * for cmdstream that is buffered in this FIFO upstream of the CP fw.
++ */
++ bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ };
+
+ /* Additional state for iommu faults: */
+@@ -237,6 +246,7 @@ struct msm_gpu {
+ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
+
+ #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
++#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
+ struct timer_list hangcheck_timer;
+
+ /* Fault info for most recent iova fault: */
+diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
+index 2a5045abe46e8..698b333abccd6 100644
+--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
++++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
+@@ -35,6 +35,11 @@ struct msm_rbmemptrs {
+ volatile u64 ttbr0;
+ };
+
++struct msm_cp_state {
++ uint64_t ib1_base, ib2_base;
++ uint32_t ib1_rem, ib2_rem;
++};
++
+ struct msm_ringbuffer {
+ struct msm_gpu *gpu;
+ int id;
+@@ -64,6 +69,29 @@ struct msm_ringbuffer {
+ uint64_t memptrs_iova;
+ struct msm_fence_context *fctx;
+
++ /**
++ * hangcheck_progress_retries:
++ *
++ * The number of extra hangcheck duration cycles that we have given
++ * due to it appearing that the GPU is making forward progress.
++ *
++ * For GPU generations which support progress detection (see.
++ * msm_gpu_funcs::progress()), if the GPU appears to be making progress
++ * (ie. the CP has advanced in the command stream, we'll allow up to
++ * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
++ * before killing the job. But to detect progress we need two sample
++ * points, so the duration of the hangcheck timer is halved. In other
++ * words we'll let the submit run for up to:
++ *
++ * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
++ */
++ int hangcheck_progress_retries;
++
++ /**
++ * last_cp_state: The state of the CP at the last call to gpu->progress()
++ */
++ struct msm_cp_state last_cp_state;
++
+ /*
+ * preempt_lock protects preemption and serializes wptr updates against
+ * preemption. Can be aquired from irq context.
+--
+2.39.2
+
--- /dev/null
+From 75ddc5101756babfc54ccc19af20f0f4815213f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Dec 2022 13:51:09 -0800
+Subject: f2fs: allocate the extent_cache by default
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 72840cccc0a1a0a0dc1bb27b669a9111be6d0f6a ]
+
+Let's allocate it to remove the runtime complexity.
+
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 043d2d00b443 ("f2fs: factor out victim_entry usage from general rb_tree use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/extent_cache.c | 38 +++++++++++++++++++-------------------
+ fs/f2fs/f2fs.h | 3 ++-
+ fs/f2fs/inode.c | 6 ++++--
+ fs/f2fs/namei.c | 4 ++--
+ 4 files changed, 27 insertions(+), 24 deletions(-)
+
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 4217076df1024..794a8134687ae 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -47,20 +47,23 @@ static bool __may_read_extent_tree(struct inode *inode)
+ return S_ISREG(inode->i_mode);
+ }
+
+-static bool __may_extent_tree(struct inode *inode, enum extent_type type)
++static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
+ {
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++ if (type == EX_READ)
++ return __may_read_extent_tree(inode);
++ return false;
++}
+
++static bool __may_extent_tree(struct inode *inode, enum extent_type type)
++{
+ /*
+ * for recovered files during mount do not create extents
+ * if shrinker is not registered.
+ */
+- if (list_empty(&sbi->s_list))
++ if (list_empty(&F2FS_I_SB(inode)->s_list))
+ return false;
+
+- if (type == EX_READ)
+- return __may_read_extent_tree(inode);
+- return false;
++ return __init_may_extent_tree(inode, type);
+ }
+
+ static void __try_update_largest_extent(struct extent_tree *et,
+@@ -439,20 +442,18 @@ static void __drop_largest_extent(struct extent_tree *et,
+ }
+ }
+
+-/* return true, if inode page is changed */
+-static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage,
+- enum extent_type type)
++void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree_info *eti = &sbi->extent_tree[type];
+- struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
++ struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
++ struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
+ struct extent_tree *et;
+ struct extent_node *en;
+ struct extent_info ei;
+
+- if (!__may_extent_tree(inode, type)) {
++ if (!__may_extent_tree(inode, EX_READ)) {
+ /* drop largest read extent */
+- if (type == EX_READ && i_ext && i_ext->len) {
++ if (i_ext && i_ext->len) {
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ i_ext->len = 0;
+ set_page_dirty(ipage);
+@@ -460,13 +461,11 @@ static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage,
+ goto out;
+ }
+
+- et = __grab_extent_tree(inode, type);
++ et = __grab_extent_tree(inode, EX_READ);
+
+ if (!i_ext || !i_ext->len)
+ goto out;
+
+- BUG_ON(type != EX_READ);
+-
+ get_read_extent_info(&ei, i_ext);
+
+ write_lock(&et->lock);
+@@ -486,14 +485,15 @@ static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage,
+ unlock_out:
+ write_unlock(&et->lock);
+ out:
+- if (type == EX_READ && !F2FS_I(inode)->extent_tree[EX_READ])
++ if (!F2FS_I(inode)->extent_tree[EX_READ])
+ set_inode_flag(inode, FI_NO_EXTENT);
+ }
+
+-void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
++void f2fs_init_extent_tree(struct inode *inode)
+ {
+ /* initialize read cache */
+- __f2fs_init_extent_tree(inode, ipage, EX_READ);
++ if (__init_may_extent_tree(inode, EX_READ))
++ __grab_extent_tree(inode, EX_READ);
+ }
+
+ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index cf45af3a44a7a..3fc9d98112166 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -4147,7 +4147,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+ bool force, bool *leftmost);
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ struct rb_root_cached *root, bool check_key);
+-void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
++void f2fs_init_extent_tree(struct inode *inode);
+ void f2fs_drop_extent_tree(struct inode *inode);
+ void f2fs_destroy_extent_node(struct inode *inode);
+ void f2fs_destroy_extent_tree(struct inode *inode);
+@@ -4156,6 +4156,7 @@ int __init f2fs_create_extent_cache(void);
+ void f2fs_destroy_extent_cache(void);
+
+ /* read extent cache ops */
++void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
+ bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei);
+ void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 7bfe29626024d..2bda4e73fc1ce 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -392,8 +392,6 @@ static int do_read_inode(struct inode *inode)
+ fi->i_pino = le32_to_cpu(ri->i_pino);
+ fi->i_dir_level = ri->i_dir_level;
+
+- f2fs_init_extent_tree(inode, node_page);
+-
+ get_inline_info(inode, ri);
+
+ fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
+@@ -479,6 +477,10 @@ static int do_read_inode(struct inode *inode)
+ }
+
+ init_idisk_time(inode);
++
++ /* Need all the flag bits */
++ f2fs_init_read_extent_tree(inode, node_page);
++
+ f2fs_put_page(node_page, 1);
+
+ stat_inc_inline_xattr(inode);
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 51d0030bddb27..d879a295b688e 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -258,8 +258,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+ }
+ F2FS_I(inode)->i_inline_xattr_size = xattr_size;
+
+- f2fs_init_extent_tree(inode, NULL);
+-
+ F2FS_I(inode)->i_flags =
+ f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
+
+@@ -282,6 +280,8 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+
+ f2fs_set_inode_flags(inode);
+
++ f2fs_init_extent_tree(inode);
++
+ trace_f2fs_new_inode(inode, 0);
+ return inode;
+
+--
+2.39.2
+
--- /dev/null
+From f5c47b6c83cdb9dcc0d68c87f40f38e1431294ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Mar 2023 10:04:26 -0800
+Subject: f2fs: factor out victim_entry usage from general rb_tree use
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 043d2d00b44310f84c0593c63e51fae88c829cdd ]
+
+Let's reduce the complexity of mixed use of rb_tree in victim_entry from
+extent_cache and discard_cmd.
+
+This should fix arm32 memory alignment issue caused by shared rb_entry.
+
+[struct victim_entry] [struct rb_entry]
+[0] struct rb_node rb_node; [0] struct rb_node rb_node;
+ union {
+ struct {
+ unsigned int ofs;
+ unsigned int len;
+ };
+[16] unsigned long long mtime; [12] unsigned long long key;
+ } __packed;
+
+Cc: <stable@vger.kernel.org>
+Fixes: 093749e296e2 ("f2fs: support age threshold based garbage collection")
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/extent_cache.c | 36 +----------
+ fs/f2fs/f2fs.h | 15 +----
+ fs/f2fs/gc.c | 139 +++++++++++++++++++++++++----------------
+ fs/f2fs/gc.h | 14 +----
+ fs/f2fs/segment.c | 4 +-
+ 5 files changed, 93 insertions(+), 115 deletions(-)
+
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 794a8134687ae..cb8fb2fdfce2a 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -149,29 +149,6 @@ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
+ return re;
+ }
+
+-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
+- struct rb_root_cached *root,
+- struct rb_node **parent,
+- unsigned long long key, bool *leftmost)
+-{
+- struct rb_node **p = &root->rb_root.rb_node;
+- struct rb_entry *re;
+-
+- while (*p) {
+- *parent = *p;
+- re = rb_entry(*parent, struct rb_entry, rb_node);
+-
+- if (key < re->key) {
+- p = &(*p)->rb_left;
+- } else {
+- p = &(*p)->rb_right;
+- *leftmost = false;
+- }
+- }
+-
+- return p;
+-}
+-
+ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ struct rb_root_cached *root,
+ struct rb_node **parent,
+@@ -280,7 +257,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+ }
+
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+- struct rb_root_cached *root, bool check_key)
++ struct rb_root_cached *root)
+ {
+ #ifdef CONFIG_F2FS_CHECK_FS
+ struct rb_node *cur = rb_first_cached(root), *next;
+@@ -297,23 +274,12 @@ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ cur_re = rb_entry(cur, struct rb_entry, rb_node);
+ next_re = rb_entry(next, struct rb_entry, rb_node);
+
+- if (check_key) {
+- if (cur_re->key > next_re->key) {
+- f2fs_info(sbi, "inconsistent rbtree, "
+- "cur(%llu) next(%llu)",
+- cur_re->key, next_re->key);
+- return false;
+- }
+- goto next;
+- }
+-
+ if (cur_re->ofs + cur_re->len > next_re->ofs) {
+ f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
+ cur_re->ofs, cur_re->len,
+ next_re->ofs, next_re->len);
+ return false;
+ }
+-next:
+ cur = next;
+ }
+ #endif
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 3fc9d98112166..6fb08b3520a68 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -612,13 +612,8 @@ enum extent_type {
+
+ struct rb_entry {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+- union {
+- struct {
+- unsigned int ofs; /* start offset of the entry */
+- unsigned int len; /* length of the entry */
+- };
+- unsigned long long key; /* 64-bits key */
+- } __packed;
++ unsigned int ofs; /* start offset of the entry */
++ unsigned int len; /* length of the entry */
+ };
+
+ struct extent_info {
+@@ -4132,10 +4127,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
+ */
+ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
+ struct rb_entry *cached_re, unsigned int ofs);
+-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
+- struct rb_root_cached *root,
+- struct rb_node **parent,
+- unsigned long long key, bool *left_most);
+ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ struct rb_root_cached *root,
+ struct rb_node **parent,
+@@ -4146,7 +4137,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+ struct rb_node ***insert_p, struct rb_node **insert_parent,
+ bool force, bool *leftmost);
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+- struct rb_root_cached *root, bool check_key);
++ struct rb_root_cached *root);
+ void f2fs_init_extent_tree(struct inode *inode);
+ void f2fs_drop_extent_tree(struct inode *inode);
+ void f2fs_destroy_extent_node(struct inode *inode);
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 543de12bf88c2..5cd19fdc10596 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -389,40 +389,95 @@ static unsigned int count_bits(const unsigned long *addr,
+ return sum;
+ }
+
+-static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
+- unsigned long long mtime, unsigned int segno,
+- struct rb_node *parent, struct rb_node **p,
+- bool left_most)
++static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
++ struct rb_root_cached *root)
++{
++#ifdef CONFIG_F2FS_CHECK_FS
++ struct rb_node *cur = rb_first_cached(root), *next;
++ struct victim_entry *cur_ve, *next_ve;
++
++ while (cur) {
++ next = rb_next(cur);
++ if (!next)
++ return true;
++
++ cur_ve = rb_entry(cur, struct victim_entry, rb_node);
++ next_ve = rb_entry(next, struct victim_entry, rb_node);
++
++ if (cur_ve->mtime > next_ve->mtime) {
++ f2fs_info(sbi, "broken victim_rbtree, "
++ "cur_mtime(%llu) next_mtime(%llu)",
++ cur_ve->mtime, next_ve->mtime);
++ return false;
++ }
++ cur = next;
++ }
++#endif
++ return true;
++}
++
++static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
++ unsigned long long mtime)
++{
++ struct atgc_management *am = &sbi->am;
++ struct rb_node *node = am->root.rb_root.rb_node;
++ struct victim_entry *ve = NULL;
++
++ while (node) {
++ ve = rb_entry(node, struct victim_entry, rb_node);
++
++ if (mtime < ve->mtime)
++ node = node->rb_left;
++ else
++ node = node->rb_right;
++ }
++ return ve;
++}
++
++static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
++ unsigned long long mtime, unsigned int segno)
+ {
+ struct atgc_management *am = &sbi->am;
+ struct victim_entry *ve;
+
+- ve = f2fs_kmem_cache_alloc(victim_entry_slab,
+- GFP_NOFS, true, NULL);
++ ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
+
+ ve->mtime = mtime;
+ ve->segno = segno;
+
+- rb_link_node(&ve->rb_node, parent, p);
+- rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
+-
+ list_add_tail(&ve->list, &am->victim_list);
+-
+ am->victim_count++;
+
+ return ve;
+ }
+
+-static void insert_victim_entry(struct f2fs_sb_info *sbi,
++static void __insert_victim_entry(struct f2fs_sb_info *sbi,
+ unsigned long long mtime, unsigned int segno)
+ {
+ struct atgc_management *am = &sbi->am;
+- struct rb_node **p;
++ struct rb_root_cached *root = &am->root;
++ struct rb_node **p = &root->rb_root.rb_node;
+ struct rb_node *parent = NULL;
++ struct victim_entry *ve;
+ bool left_most = true;
+
+- p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
+- attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
++ /* look up rb tree to find parent node */
++ while (*p) {
++ parent = *p;
++ ve = rb_entry(parent, struct victim_entry, rb_node);
++
++ if (mtime < ve->mtime) {
++ p = &(*p)->rb_left;
++ } else {
++ p = &(*p)->rb_right;
++ left_most = false;
++ }
++ }
++
++ ve = __create_victim_entry(sbi, mtime, segno);
++
++ rb_link_node(&ve->rb_node, parent, p);
++ rb_insert_color_cached(&ve->rb_node, root, left_most);
+ }
+
+ static void add_victim_entry(struct f2fs_sb_info *sbi,
+@@ -458,19 +513,7 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
+ if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
+ return;
+
+- insert_victim_entry(sbi, mtime, segno);
+-}
+-
+-static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
+- struct victim_sel_policy *p)
+-{
+- struct atgc_management *am = &sbi->am;
+- struct rb_node *parent = NULL;
+- bool left_most;
+-
+- f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
+-
+- return parent;
++ __insert_victim_entry(sbi, mtime, segno);
+ }
+
+ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+@@ -480,7 +523,6 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+ struct atgc_management *am = &sbi->am;
+ struct rb_root_cached *root = &am->root;
+ struct rb_node *node;
+- struct rb_entry *re;
+ struct victim_entry *ve;
+ unsigned long long total_time;
+ unsigned long long age, u, accu;
+@@ -507,12 +549,10 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+
+ node = rb_first_cached(root);
+ next:
+- re = rb_entry_safe(node, struct rb_entry, rb_node);
+- if (!re)
++ ve = rb_entry_safe(node, struct victim_entry, rb_node);
++ if (!ve)
+ return;
+
+- ve = (struct victim_entry *)re;
+-
+ if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
+ goto skip;
+
+@@ -554,8 +594,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ {
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct atgc_management *am = &sbi->am;
+- struct rb_node *node;
+- struct rb_entry *re;
+ struct victim_entry *ve;
+ unsigned long long age;
+ unsigned long long max_mtime = sit_i->dirty_max_mtime;
+@@ -565,25 +603,22 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ unsigned int dirty_threshold = max(am->max_candidate_count,
+ am->candidate_ratio *
+ am->victim_count / 100);
+- unsigned int cost;
+- unsigned int iter = 0;
++ unsigned int cost, iter;
+ int stage = 0;
+
+ if (max_mtime < min_mtime)
+ return;
+ max_mtime += 1;
+ next_stage:
+- node = lookup_central_victim(sbi, p);
++ iter = 0;
++ ve = __lookup_victim_entry(sbi, p->age);
+ next_node:
+- re = rb_entry_safe(node, struct rb_entry, rb_node);
+- if (!re) {
+- if (stage == 0)
+- goto skip_stage;
++ if (!ve) {
++ if (stage++ == 0)
++ goto next_stage;
+ return;
+ }
+
+- ve = (struct victim_entry *)re;
+-
+ if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
+ goto skip_node;
+
+@@ -609,24 +644,20 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ }
+ skip_node:
+ if (iter < dirty_threshold) {
+- if (stage == 0)
+- node = rb_prev(node);
+- else if (stage == 1)
+- node = rb_next(node);
++ ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
++ rb_next(&ve->rb_node),
++ struct victim_entry, rb_node);
+ goto next_node;
+ }
+-skip_stage:
+- if (stage < 1) {
+- stage++;
+- iter = 0;
++
++ if (stage++ == 0)
+ goto next_stage;
+- }
+ }
++
+ static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
+ struct victim_sel_policy *p)
+ {
+- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+- &sbi->am.root, true));
++ f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
+
+ if (p->gc_mode == GC_AT)
+ atgc_lookup_victim(sbi, p);
+diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
+index 19b956c2d697a..ca84024b9c9e7 100644
+--- a/fs/f2fs/gc.h
++++ b/fs/f2fs/gc.h
+@@ -55,20 +55,10 @@ struct gc_inode_list {
+ struct radix_tree_root iroot;
+ };
+
+-struct victim_info {
+- unsigned long long mtime; /* mtime of section */
+- unsigned int segno; /* section No. */
+-};
+-
+ struct victim_entry {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+- union {
+- struct {
+- unsigned long long mtime; /* mtime of section */
+- unsigned int segno; /* segment No. */
+- };
+- struct victim_info vi; /* victim info */
+- };
++ unsigned long long mtime; /* mtime of section */
++ unsigned int segno; /* segment No. */
+ struct list_head list;
+ };
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 7d5bea9d92641..cbbf95b995414 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1474,7 +1474,7 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
+ goto next;
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+- &dcc->root, false));
++ &dcc->root));
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ f2fs_bug_on(sbi, dc->state != D_PREP);
+@@ -3002,7 +3002,7 @@ static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+ mutex_lock(&dcc->cmd_lock);
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+- &dcc->root, false));
++ &dcc->root));
+
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+ NULL, start,
+--
+2.39.2
+
--- /dev/null
+From c58b27fa5e149eabf8db3ef279b0260851284293 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 09:44:58 -0800
+Subject: f2fs: move internal functions into extent_cache.c
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 3bac20a8f011b8ed4012b43f4f33010432b3c647 ]
+
+No functional change.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 043d2d00b443 ("f2fs: factor out victim_entry usage from general rb_tree use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/extent_cache.c | 88 +++++++++++++++++++++++++++++++++++++-----
+ fs/f2fs/f2fs.h | 69 +--------------------------------
+ 2 files changed, 81 insertions(+), 76 deletions(-)
+
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 84078eda19ff1..a626ce0b70a50 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -15,6 +15,77 @@
+ #include "node.h"
+ #include <trace/events/f2fs.h>
+
++static void __set_extent_info(struct extent_info *ei,
++ unsigned int fofs, unsigned int len,
++ block_t blk, bool keep_clen)
++{
++ ei->fofs = fofs;
++ ei->blk = blk;
++ ei->len = len;
++
++ if (keep_clen)
++ return;
++
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ ei->c_len = 0;
++#endif
++}
++
++static bool f2fs_may_extent_tree(struct inode *inode)
++{
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++
++ /*
++ * for recovered files during mount do not create extents
++ * if shrinker is not registered.
++ */
++ if (list_empty(&sbi->s_list))
++ return false;
++
++ if (!test_opt(sbi, READ_EXTENT_CACHE) ||
++ is_inode_flag_set(inode, FI_NO_EXTENT) ||
++ (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++ !f2fs_sb_has_readonly(sbi)))
++ return false;
++
++ return S_ISREG(inode->i_mode);
++}
++
++static void __try_update_largest_extent(struct extent_tree *et,
++ struct extent_node *en)
++{
++ if (en->ei.len <= et->largest.len)
++ return;
++
++ et->largest = en->ei;
++ et->largest_updated = true;
++}
++
++static bool __is_extent_mergeable(struct extent_info *back,
++ struct extent_info *front)
++{
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (back->c_len && back->len != back->c_len)
++ return false;
++ if (front->c_len && front->len != front->c_len)
++ return false;
++#endif
++ return (back->fofs + back->len == front->fofs &&
++ back->blk + back->len == front->blk);
++}
++
++static bool __is_back_mergeable(struct extent_info *cur,
++ struct extent_info *back)
++{
++ return __is_extent_mergeable(back, cur);
++}
++
++static bool __is_front_mergeable(struct extent_info *cur,
++ struct extent_info *front)
++{
++ return __is_extent_mergeable(cur, front);
++}
++
+ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
+ unsigned int ofs)
+ {
+@@ -592,16 +663,16 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+
+ if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
+ if (parts) {
+- set_extent_info(&ei, end,
+- end - dei.fofs + dei.blk,
+- org_end - end);
++ __set_extent_info(&ei,
++ end, org_end - end,
++ end - dei.fofs + dei.blk, false);
+ en1 = __insert_extent_tree(sbi, et, &ei,
+ NULL, NULL, true);
+ next_en = en1;
+ } else {
+- en->ei.fofs = end;
+- en->ei.blk += end - dei.fofs;
+- en->ei.len -= end - dei.fofs;
++ __set_extent_info(&en->ei,
++ end, en->ei.len - (end - dei.fofs),
++ en->ei.blk + (end - dei.fofs), true);
+ next_en = en;
+ }
+ parts++;
+@@ -633,8 +704,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+
+ /* 3. update extent in extent cache */
+ if (blkaddr) {
+-
+- set_extent_info(&ei, fofs, blkaddr, len);
++ __set_extent_info(&ei, fofs, len, blkaddr, false);
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+ __insert_extent_tree(sbi, et, &ei,
+ insert_p, insert_parent, leftmost);
+@@ -693,7 +763,7 @@ void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+ if (en)
+ goto unlock_out;
+
+- set_extent_info(&ei, fofs, blkaddr, llen);
++ __set_extent_info(&ei, fofs, llen, blkaddr, true);
+ ei.c_len = c_len;
+
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index f2d1be26d0d05..076bdf27df547 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -618,7 +618,7 @@ struct rb_entry {
+ struct extent_info {
+ unsigned int fofs; /* start offset in a file */
+ unsigned int len; /* length of the extent */
+- u32 blk; /* start block address of the extent */
++ block_t blk; /* start block address of the extent */
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ unsigned int c_len; /* physical extent length of compressed blocks */
+ #endif
+@@ -842,17 +842,6 @@ static inline void set_raw_read_extent(struct extent_info *ext,
+ i_ext->len = cpu_to_le32(ext->len);
+ }
+
+-static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
+- u32 blk, unsigned int len)
+-{
+- ei->fofs = fofs;
+- ei->blk = blk;
+- ei->len = len;
+-#ifdef CONFIG_F2FS_FS_COMPRESSION
+- ei->c_len = 0;
+-#endif
+-}
+-
+ static inline bool __is_discard_mergeable(struct discard_info *back,
+ struct discard_info *front, unsigned int max_len)
+ {
+@@ -872,41 +861,6 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
+ return __is_discard_mergeable(cur, front, max_len);
+ }
+
+-static inline bool __is_extent_mergeable(struct extent_info *back,
+- struct extent_info *front)
+-{
+-#ifdef CONFIG_F2FS_FS_COMPRESSION
+- if (back->c_len && back->len != back->c_len)
+- return false;
+- if (front->c_len && front->len != front->c_len)
+- return false;
+-#endif
+- return (back->fofs + back->len == front->fofs &&
+- back->blk + back->len == front->blk);
+-}
+-
+-static inline bool __is_back_mergeable(struct extent_info *cur,
+- struct extent_info *back)
+-{
+- return __is_extent_mergeable(back, cur);
+-}
+-
+-static inline bool __is_front_mergeable(struct extent_info *cur,
+- struct extent_info *front)
+-{
+- return __is_extent_mergeable(cur, front);
+-}
+-
+-extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
+-static inline void __try_update_largest_extent(struct extent_tree *et,
+- struct extent_node *en)
+-{
+- if (en->ei.len > et->largest.len) {
+- et->largest = en->ei;
+- et->largest_updated = true;
+- }
+-}
+-
+ /*
+ * For free nid management
+ */
+@@ -2578,6 +2532,7 @@ static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
+ return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
+ }
+
++extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
+ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, bool is_inode)
+ {
+@@ -4400,26 +4355,6 @@ F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
+ F2FS_FEATURE_FUNCS(compression, COMPRESSION);
+ F2FS_FEATURE_FUNCS(readonly, RO);
+
+-static inline bool f2fs_may_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, READ_EXTENT_CACHE) ||
+- is_inode_flag_set(inode, FI_NO_EXTENT) ||
+- (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+- !f2fs_sb_has_readonly(sbi)))
+- return false;
+-
+- /*
+- * for recovered files during mount do not create extents
+- * if shrinker is not registered.
+- */
+- if (list_empty(&sbi->s_list))
+- return false;
+-
+- return S_ISREG(inode->i_mode);
+-}
+-
+ #ifdef CONFIG_BLK_DEV_ZONED
+ static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+ block_t blkaddr)
+--
+2.39.2
+
--- /dev/null
+From 697fdb785ae0403a30ee660646681421a40a2e85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 09:26:29 -0800
+Subject: f2fs: refactor extent_cache to support for read and more
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit e7547daccd6a37522f0af74ec4b5a3036f3dd328 ]
+
+This patch prepares extent_cache to be ready for addition.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 043d2d00b443 ("f2fs: factor out victim_entry usage from general rb_tree use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/data.c | 20 +-
+ fs/f2fs/debug.c | 65 +++--
+ fs/f2fs/extent_cache.c | 463 +++++++++++++++++++++---------------
+ fs/f2fs/f2fs.h | 119 +++++----
+ fs/f2fs/file.c | 8 +-
+ fs/f2fs/gc.c | 4 +-
+ fs/f2fs/inode.c | 6 +-
+ fs/f2fs/node.c | 8 +-
+ fs/f2fs/segment.c | 3 +-
+ fs/f2fs/shrinker.c | 19 +-
+ include/trace/events/f2fs.h | 62 +++--
+ 11 files changed, 470 insertions(+), 307 deletions(-)
+
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 770a606eb3f6a..de6b056f090b3 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1134,7 +1134,7 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+ dn->data_blkaddr = blkaddr;
+ f2fs_set_data_blkaddr(dn);
+- f2fs_update_extent_cache(dn);
++ f2fs_update_read_extent_cache(dn);
+ }
+
+ /* dn->ofs_in_node will be returned with up-to-date last block pointer */
+@@ -1203,7 +1203,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
+ struct extent_info ei = {0, };
+ struct inode *inode = dn->inode;
+
+- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++ if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ dn->data_blkaddr = ei.blk + index - ei.fofs;
+ return 0;
+ }
+@@ -1224,7 +1224,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++ if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
+ DATA_GENERIC_ENHANCE_READ)) {
+@@ -1486,7 +1486,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ pgofs = (pgoff_t)map->m_lblk;
+ end = pgofs + maxblocks;
+
+- if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
++ if (!create && f2fs_lookup_read_extent_cache(inode, pgofs, &ei)) {
+ if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
+ map->m_may_create)
+ goto next_dnode;
+@@ -1696,7 +1696,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ if (map->m_flags & F2FS_MAP_MAPPED) {
+ unsigned int ofs = start_pgofs - map->m_lblk;
+
+- f2fs_update_extent_cache_range(&dn,
++ f2fs_update_read_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
+ }
+@@ -1741,7 +1741,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ if (map->m_flags & F2FS_MAP_MAPPED) {
+ unsigned int ofs = start_pgofs - map->m_lblk;
+
+- f2fs_update_extent_cache_range(&dn,
++ f2fs_update_read_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
+ }
+@@ -2202,7 +2202,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ if (f2fs_cluster_is_empty(cc))
+ goto out;
+
+- if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
++ if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
+ from_dnode = false;
+
+ if (!from_dnode)
+@@ -2636,7 +2636,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+
+ if (need_inplace_update(fio) &&
+- f2fs_lookup_extent_cache(inode, page->index, &ei)) {
++ f2fs_lookup_read_extent_cache(inode, page->index, &ei)) {
+ fio->old_blkaddr = ei.blk + page->index - ei.fofs;
+
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+@@ -3361,7 +3361,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
+ } else if (locked) {
+ err = f2fs_get_block(&dn, index);
+ } else {
+- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++ if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ } else {
+ /* hole case */
+@@ -3402,7 +3402,7 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
+
+ set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++ if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ } else {
+ /* hole case */
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index a216dcdf69418..a9baa121d829f 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -72,15 +72,23 @@ static void update_general_status(struct f2fs_sb_info *sbi)
+ si->main_area_zones = si->main_area_sections /
+ le32_to_cpu(raw_super->secs_per_zone);
+
+- /* validation check of the segment numbers */
++ /* general extent cache stats */
++ for (i = 0; i < NR_EXTENT_CACHES; i++) {
++ struct extent_tree_info *eti = &sbi->extent_tree[i];
++
++ si->hit_cached[i] = atomic64_read(&sbi->read_hit_cached[i]);
++ si->hit_rbtree[i] = atomic64_read(&sbi->read_hit_rbtree[i]);
++ si->total_ext[i] = atomic64_read(&sbi->total_hit_ext[i]);
++ si->hit_total[i] = si->hit_cached[i] + si->hit_rbtree[i];
++ si->ext_tree[i] = atomic_read(&eti->total_ext_tree);
++ si->zombie_tree[i] = atomic_read(&eti->total_zombie_tree);
++ si->ext_node[i] = atomic_read(&eti->total_ext_node);
++ }
++ /* read extent_cache only */
+ si->hit_largest = atomic64_read(&sbi->read_hit_largest);
+- si->hit_cached = atomic64_read(&sbi->read_hit_cached);
+- si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
+- si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
+- si->total_ext = atomic64_read(&sbi->total_hit_ext);
+- si->ext_tree = atomic_read(&sbi->total_ext_tree);
+- si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
+- si->ext_node = atomic_read(&sbi->total_ext_node);
++ si->hit_total[EX_READ] += si->hit_largest;
++
++ /* validation check of the segment numbers */
+ si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
+ si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
+ si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+@@ -294,10 +302,16 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
+ sizeof(struct nat_entry_set);
+ for (i = 0; i < MAX_INO_ENTRY; i++)
+ si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
+- si->cache_mem += atomic_read(&sbi->total_ext_tree) *
++
++ for (i = 0; i < NR_EXTENT_CACHES; i++) {
++ struct extent_tree_info *eti = &sbi->extent_tree[i];
++
++ si->ext_mem[i] = atomic_read(&eti->total_ext_tree) *
+ sizeof(struct extent_tree);
+- si->cache_mem += atomic_read(&sbi->total_ext_node) *
++ si->ext_mem[i] += atomic_read(&eti->total_ext_node) *
+ sizeof(struct extent_node);
++ si->cache_mem += si->ext_mem[i];
++ }
+
+ si->page_mem = 0;
+ if (sbi->node_inode) {
+@@ -490,16 +504,18 @@ static int stat_show(struct seq_file *s, void *v)
+ si->bg_node_blks);
+ seq_printf(s, "BG skip : IO: %u, Other: %u\n",
+ si->io_skip_bggc, si->other_skip_bggc);
+- seq_puts(s, "\nExtent Cache:\n");
++ seq_puts(s, "\nExtent Cache (Read):\n");
+ seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
+- si->hit_largest, si->hit_cached,
+- si->hit_rbtree);
++ si->hit_largest, si->hit_cached[EX_READ],
++ si->hit_rbtree[EX_READ]);
+ seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n",
+- !si->total_ext ? 0 :
+- div64_u64(si->hit_total * 100, si->total_ext),
+- si->hit_total, si->total_ext);
++ !si->total_ext[EX_READ] ? 0 :
++ div64_u64(si->hit_total[EX_READ] * 100,
++ si->total_ext[EX_READ]),
++ si->hit_total[EX_READ], si->total_ext[EX_READ]);
+ seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
+- si->ext_tree, si->zombie_tree, si->ext_node);
++ si->ext_tree[EX_READ], si->zombie_tree[EX_READ],
++ si->ext_node[EX_READ]);
+ seq_puts(s, "\nBalancing F2FS Async:\n");
+ seq_printf(s, " - DIO (R: %4d, W: %4d)\n",
+ si->nr_dio_read, si->nr_dio_write);
+@@ -566,8 +582,10 @@ static int stat_show(struct seq_file *s, void *v)
+ (si->base_mem + si->cache_mem + si->page_mem) >> 10);
+ seq_printf(s, " - static: %llu KB\n",
+ si->base_mem >> 10);
+- seq_printf(s, " - cached: %llu KB\n",
++ seq_printf(s, " - cached all: %llu KB\n",
+ si->cache_mem >> 10);
++ seq_printf(s, " - read extent cache: %llu KB\n",
++ si->ext_mem[EX_READ] >> 10);
+ seq_printf(s, " - paged : %llu KB\n",
+ si->page_mem >> 10);
+ }
+@@ -600,10 +618,15 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
+ si->sbi = sbi;
+ sbi->stat_info = si;
+
+- atomic64_set(&sbi->total_hit_ext, 0);
+- atomic64_set(&sbi->read_hit_rbtree, 0);
++ /* general extent cache stats */
++ for (i = 0; i < NR_EXTENT_CACHES; i++) {
++ atomic64_set(&sbi->total_hit_ext[i], 0);
++ atomic64_set(&sbi->read_hit_rbtree[i], 0);
++ atomic64_set(&sbi->read_hit_cached[i], 0);
++ }
++
++ /* read extent_cache only */
+ atomic64_set(&sbi->read_hit_largest, 0);
+- atomic64_set(&sbi->read_hit_cached, 0);
+
+ atomic_set(&sbi->inline_xattr, 0);
+ atomic_set(&sbi->inline_inode, 0);
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index d3c3b1b627c63..4217076df1024 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -17,21 +17,37 @@
+
+ static void __set_extent_info(struct extent_info *ei,
+ unsigned int fofs, unsigned int len,
+- block_t blk, bool keep_clen)
++ block_t blk, bool keep_clen,
++ enum extent_type type)
+ {
+ ei->fofs = fofs;
+- ei->blk = blk;
+ ei->len = len;
+
+- if (keep_clen)
+- return;
+-
++ if (type == EX_READ) {
++ ei->blk = blk;
++ if (keep_clen)
++ return;
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+- ei->c_len = 0;
++ ei->c_len = 0;
+ #endif
++ }
++}
++
++static bool __may_read_extent_tree(struct inode *inode)
++{
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++
++ if (!test_opt(sbi, READ_EXTENT_CACHE))
++ return false;
++ if (is_inode_flag_set(inode, FI_NO_EXTENT))
++ return false;
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++ !f2fs_sb_has_readonly(sbi))
++ return false;
++ return S_ISREG(inode->i_mode);
+ }
+
+-static bool f2fs_may_extent_tree(struct inode *inode)
++static bool __may_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+@@ -42,18 +58,16 @@ static bool f2fs_may_extent_tree(struct inode *inode)
+ if (list_empty(&sbi->s_list))
+ return false;
+
+- if (!test_opt(sbi, READ_EXTENT_CACHE) ||
+- is_inode_flag_set(inode, FI_NO_EXTENT) ||
+- (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+- !f2fs_sb_has_readonly(sbi)))
+- return false;
+-
+- return S_ISREG(inode->i_mode);
++ if (type == EX_READ)
++ return __may_read_extent_tree(inode);
++ return false;
+ }
+
+ static void __try_update_largest_extent(struct extent_tree *et,
+ struct extent_node *en)
+ {
++ if (et->type != EX_READ)
++ return;
+ if (en->ei.len <= et->largest.len)
+ return;
+
+@@ -62,28 +76,31 @@ static void __try_update_largest_extent(struct extent_tree *et,
+ }
+
+ static bool __is_extent_mergeable(struct extent_info *back,
+- struct extent_info *front)
++ struct extent_info *front, enum extent_type type)
+ {
++ if (type == EX_READ) {
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+- if (back->c_len && back->len != back->c_len)
+- return false;
+- if (front->c_len && front->len != front->c_len)
+- return false;
++ if (back->c_len && back->len != back->c_len)
++ return false;
++ if (front->c_len && front->len != front->c_len)
++ return false;
+ #endif
+- return (back->fofs + back->len == front->fofs &&
+- back->blk + back->len == front->blk);
++ return (back->fofs + back->len == front->fofs &&
++ back->blk + back->len == front->blk);
++ }
++ return false;
+ }
+
+ static bool __is_back_mergeable(struct extent_info *cur,
+- struct extent_info *back)
++ struct extent_info *back, enum extent_type type)
+ {
+- return __is_extent_mergeable(back, cur);
++ return __is_extent_mergeable(back, cur, type);
+ }
+
+ static bool __is_front_mergeable(struct extent_info *cur,
+- struct extent_info *front)
++ struct extent_info *front, enum extent_type type)
+ {
+- return __is_extent_mergeable(cur, front);
++ return __is_extent_mergeable(cur, front, type);
+ }
+
+ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
+@@ -308,6 +325,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+ struct rb_node *parent, struct rb_node **p,
+ bool leftmost)
+ {
++ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ struct extent_node *en;
+
+ en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
+@@ -321,16 +339,18 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+ rb_link_node(&en->rb_node, parent, p);
+ rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
+ atomic_inc(&et->node_cnt);
+- atomic_inc(&sbi->total_ext_node);
++ atomic_inc(&eti->total_ext_node);
+ return en;
+ }
+
+ static void __detach_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+ {
++ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
++
+ rb_erase_cached(&en->rb_node, &et->root);
+ atomic_dec(&et->node_cnt);
+- atomic_dec(&sbi->total_ext_node);
++ atomic_dec(&eti->total_ext_node);
+
+ if (et->cached_en == en)
+ et->cached_en = NULL;
+@@ -346,42 +366,47 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
+ static void __release_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+ {
+- spin_lock(&sbi->extent_lock);
++ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
++
++ spin_lock(&eti->extent_lock);
+ f2fs_bug_on(sbi, list_empty(&en->list));
+ list_del_init(&en->list);
+- spin_unlock(&sbi->extent_lock);
++ spin_unlock(&eti->extent_lock);
+
+ __detach_extent_node(sbi, et, en);
+ }
+
+-static struct extent_tree *__grab_extent_tree(struct inode *inode)
++static struct extent_tree *__grab_extent_tree(struct inode *inode,
++ enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++ struct extent_tree_info *eti = &sbi->extent_tree[type];
+ struct extent_tree *et;
+ nid_t ino = inode->i_ino;
+
+- mutex_lock(&sbi->extent_tree_lock);
+- et = radix_tree_lookup(&sbi->extent_tree_root, ino);
++ mutex_lock(&eti->extent_tree_lock);
++ et = radix_tree_lookup(&eti->extent_tree_root, ino);
+ if (!et) {
+ et = f2fs_kmem_cache_alloc(extent_tree_slab,
+ GFP_NOFS, true, NULL);
+- f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
++ f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
+ memset(et, 0, sizeof(struct extent_tree));
+ et->ino = ino;
++ et->type = type;
+ et->root = RB_ROOT_CACHED;
+ et->cached_en = NULL;
+ rwlock_init(&et->lock);
+ INIT_LIST_HEAD(&et->list);
+ atomic_set(&et->node_cnt, 0);
+- atomic_inc(&sbi->total_ext_tree);
++ atomic_inc(&eti->total_ext_tree);
+ } else {
+- atomic_dec(&sbi->total_zombie_tree);
++ atomic_dec(&eti->total_zombie_tree);
+ list_del_init(&et->list);
+ }
+- mutex_unlock(&sbi->extent_tree_lock);
++ mutex_unlock(&eti->extent_tree_lock);
+
+ /* never died until evict_inode */
+- F2FS_I(inode)->extent_tree = et;
++ F2FS_I(inode)->extent_tree[type] = et;
+
+ return et;
+ }
+@@ -415,35 +440,38 @@ static void __drop_largest_extent(struct extent_tree *et,
+ }
+
+ /* return true, if inode page is changed */
+-static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
++static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage,
++ enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++ struct extent_tree_info *eti = &sbi->extent_tree[type];
+ struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
+ struct extent_tree *et;
+ struct extent_node *en;
+ struct extent_info ei;
+
+- if (!f2fs_may_extent_tree(inode)) {
+- /* drop largest extent */
+- if (i_ext && i_ext->len) {
++ if (!__may_extent_tree(inode, type)) {
++ /* drop largest read extent */
++ if (type == EX_READ && i_ext && i_ext->len) {
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ i_ext->len = 0;
+ set_page_dirty(ipage);
+- return;
+ }
+- return;
++ goto out;
+ }
+
+- et = __grab_extent_tree(inode);
++ et = __grab_extent_tree(inode, type);
+
+ if (!i_ext || !i_ext->len)
+- return;
++ goto out;
++
++ BUG_ON(type != EX_READ);
+
+ get_read_extent_info(&ei, i_ext);
+
+ write_lock(&et->lock);
+ if (atomic_read(&et->node_cnt))
+- goto out;
++ goto unlock_out;
+
+ en = __attach_extent_node(sbi, et, &ei, NULL,
+ &et->root.rb_root.rb_node, true);
+@@ -451,38 +479,41 @@ static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
+ et->largest = en->ei;
+ et->cached_en = en;
+
+- spin_lock(&sbi->extent_lock);
+- list_add_tail(&en->list, &sbi->extent_list);
+- spin_unlock(&sbi->extent_lock);
++ spin_lock(&eti->extent_lock);
++ list_add_tail(&en->list, &eti->extent_list);
++ spin_unlock(&eti->extent_lock);
+ }
+-out:
++unlock_out:
+ write_unlock(&et->lock);
++out:
++ if (type == EX_READ && !F2FS_I(inode)->extent_tree[EX_READ])
++ set_inode_flag(inode, FI_NO_EXTENT);
+ }
+
+ void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
+ {
+- __f2fs_init_extent_tree(inode, ipage);
+-
+- if (!F2FS_I(inode)->extent_tree)
+- set_inode_flag(inode, FI_NO_EXTENT);
++ /* initialize read cache */
++ __f2fs_init_extent_tree(inode, ipage, EX_READ);
+ }
+
+-static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+- struct extent_info *ei)
++static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
++ struct extent_info *ei, enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree_info *eti = &sbi->extent_tree[type];
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ struct extent_node *en;
+ bool ret = false;
+
+ if (!et)
+ return false;
+
+- trace_f2fs_lookup_extent_tree_start(inode, pgofs);
++ trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
+
+ read_lock(&et->lock);
+
+- if (et->largest.fofs <= pgofs &&
++ if (type == EX_READ &&
++ et->largest.fofs <= pgofs &&
+ et->largest.fofs + et->largest.len > pgofs) {
+ *ei = et->largest;
+ ret = true;
+@@ -496,23 +527,24 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+ goto out;
+
+ if (en == et->cached_en)
+- stat_inc_cached_node_hit(sbi);
++ stat_inc_cached_node_hit(sbi, type);
+ else
+- stat_inc_rbtree_node_hit(sbi);
++ stat_inc_rbtree_node_hit(sbi, type);
+
+ *ei = en->ei;
+- spin_lock(&sbi->extent_lock);
++ spin_lock(&eti->extent_lock);
+ if (!list_empty(&en->list)) {
+- list_move_tail(&en->list, &sbi->extent_list);
++ list_move_tail(&en->list, &eti->extent_list);
+ et->cached_en = en;
+ }
+- spin_unlock(&sbi->extent_lock);
++ spin_unlock(&eti->extent_lock);
+ ret = true;
+ out:
+- stat_inc_total_hit(sbi);
++ stat_inc_total_hit(sbi, type);
+ read_unlock(&et->lock);
+
+- trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
++ if (type == EX_READ)
++ trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
+ return ret;
+ }
+
+@@ -521,18 +553,20 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_node *prev_ex,
+ struct extent_node *next_ex)
+ {
++ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ struct extent_node *en = NULL;
+
+- if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
++ if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
+ prev_ex->ei.len += ei->len;
+ ei = &prev_ex->ei;
+ en = prev_ex;
+ }
+
+- if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
++ if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
+ next_ex->ei.fofs = ei->fofs;
+- next_ex->ei.blk = ei->blk;
+ next_ex->ei.len += ei->len;
++ if (et->type == EX_READ)
++ next_ex->ei.blk = ei->blk;
+ if (en)
+ __release_extent_node(sbi, et, prev_ex);
+
+@@ -544,12 +578,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+
+ __try_update_largest_extent(et, en);
+
+- spin_lock(&sbi->extent_lock);
++ spin_lock(&eti->extent_lock);
+ if (!list_empty(&en->list)) {
+- list_move_tail(&en->list, &sbi->extent_list);
++ list_move_tail(&en->list, &eti->extent_list);
+ et->cached_en = en;
+ }
+- spin_unlock(&sbi->extent_lock);
++ spin_unlock(&eti->extent_lock);
+ return en;
+ }
+
+@@ -559,6 +593,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+ struct rb_node *insert_parent,
+ bool leftmost)
+ {
++ struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct extent_node *en = NULL;
+@@ -581,47 +616,50 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+ __try_update_largest_extent(et, en);
+
+ /* update in global extent list */
+- spin_lock(&sbi->extent_lock);
+- list_add_tail(&en->list, &sbi->extent_list);
++ spin_lock(&eti->extent_lock);
++ list_add_tail(&en->list, &eti->extent_list);
+ et->cached_en = en;
+- spin_unlock(&sbi->extent_lock);
++ spin_unlock(&eti->extent_lock);
+ return en;
+ }
+
+-static void f2fs_update_extent_tree_range(struct inode *inode,
+- pgoff_t fofs, block_t blkaddr, unsigned int len)
++static void __update_extent_tree_range(struct inode *inode,
++ struct extent_info *tei, enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ struct extent_node *en = NULL, *en1 = NULL;
+ struct extent_node *prev_en = NULL, *next_en = NULL;
+ struct extent_info ei, dei, prev;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
++ unsigned int fofs = tei->fofs, len = tei->len;
+ unsigned int end = fofs + len;
+- unsigned int pos = (unsigned int)fofs;
+ bool updated = false;
+ bool leftmost = false;
+
+ if (!et)
+ return;
+
+- trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len, 0);
+-
++ if (type == EX_READ)
++ trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
++ tei->blk, 0);
+ write_lock(&et->lock);
+
+- if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
+- write_unlock(&et->lock);
+- return;
+- }
++ if (type == EX_READ) {
++ if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
++ write_unlock(&et->lock);
++ return;
++ }
+
+- prev = et->largest;
+- dei.len = 0;
++ prev = et->largest;
++ dei.len = 0;
+
+- /*
+- * drop largest extent before lookup, in case it's already
+- * been shrunk from extent tree
+- */
+- __drop_largest_extent(et, fofs, len);
++ /*
++ * drop largest extent before lookup, in case it's already
++ * been shrunk from extent tree
++ */
++ __drop_largest_extent(et, fofs, len);
++ }
+
+ /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
+ en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
+@@ -642,26 +680,30 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+
+ dei = en->ei;
+ org_end = dei.fofs + dei.len;
+- f2fs_bug_on(sbi, pos >= org_end);
++ f2fs_bug_on(sbi, fofs >= org_end);
+
+- if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+- en->ei.len = pos - en->ei.fofs;
++ if (fofs > dei.fofs && (type != EX_READ ||
++ fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
++ en->ei.len = fofs - en->ei.fofs;
+ prev_en = en;
+ parts = 1;
+ }
+
+- if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
++ if (end < org_end && (type != EX_READ ||
++ org_end - end >= F2FS_MIN_EXTENT_LEN)) {
+ if (parts) {
+ __set_extent_info(&ei,
+ end, org_end - end,
+- end - dei.fofs + dei.blk, false);
++ end - dei.fofs + dei.blk, false,
++ type);
+ en1 = __insert_extent_tree(sbi, et, &ei,
+ NULL, NULL, true);
+ next_en = en1;
+ } else {
+ __set_extent_info(&en->ei,
+ end, en->ei.len - (end - dei.fofs),
+- en->ei.blk + (end - dei.fofs), true);
++ en->ei.blk + (end - dei.fofs), true,
++ type);
+ next_en = en;
+ }
+ parts++;
+@@ -691,9 +733,11 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+ en = next_en;
+ }
+
+- /* 3. update extent in extent cache */
+- if (blkaddr) {
+- __set_extent_info(&ei, fofs, len, blkaddr, false);
++ /* 3. update extent in read extent cache */
++ BUG_ON(type != EX_READ);
++
++ if (tei->blk) {
++ __set_extent_info(&ei, fofs, len, tei->blk, false, EX_READ);
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+ __insert_extent_tree(sbi, et, &ei,
+ insert_p, insert_parent, leftmost);
+@@ -723,19 +767,20 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+ }
+
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-void f2fs_update_extent_tree_range_compressed(struct inode *inode,
++void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
+ pgoff_t fofs, block_t blkaddr, unsigned int llen,
+ unsigned int c_len)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
+ struct extent_node *en = NULL;
+ struct extent_node *prev_en = NULL, *next_en = NULL;
+ struct extent_info ei;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ bool leftmost = false;
+
+- trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen, c_len);
++ trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
++ blkaddr, c_len);
+
+ /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+@@ -752,7 +797,7 @@ void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+ if (en)
+ goto unlock_out;
+
+- __set_extent_info(&ei, fofs, llen, blkaddr, true);
++ __set_extent_info(&ei, fofs, llen, blkaddr, true, EX_READ);
+ ei.c_len = c_len;
+
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+@@ -763,24 +808,43 @@ void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+ }
+ #endif
+
+-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
++static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
+ {
++ struct extent_info ei;
++
++ if (!__may_extent_tree(dn->inode, type))
++ return;
++
++ ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
++ dn->ofs_in_node;
++ ei.len = 1;
++
++ if (type == EX_READ) {
++ if (dn->data_blkaddr == NEW_ADDR)
++ ei.blk = NULL_ADDR;
++ else
++ ei.blk = dn->data_blkaddr;
++ }
++ __update_extent_tree_range(dn->inode, &ei, type);
++}
++
++static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
++ enum extent_type type)
++{
++ struct extent_tree_info *eti = &sbi->extent_tree[type];
+ struct extent_tree *et, *next;
+ struct extent_node *en;
+ unsigned int node_cnt = 0, tree_cnt = 0;
+ int remained;
+
+- if (!test_opt(sbi, READ_EXTENT_CACHE))
+- return 0;
+-
+- if (!atomic_read(&sbi->total_zombie_tree))
++ if (!atomic_read(&eti->total_zombie_tree))
+ goto free_node;
+
+- if (!mutex_trylock(&sbi->extent_tree_lock))
++ if (!mutex_trylock(&eti->extent_tree_lock))
+ goto out;
+
+ /* 1. remove unreferenced extent tree */
+- list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
++ list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
+ if (atomic_read(&et->node_cnt)) {
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et);
+@@ -788,61 +852,100 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+ }
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+ list_del_init(&et->list);
+- radix_tree_delete(&sbi->extent_tree_root, et->ino);
++ radix_tree_delete(&eti->extent_tree_root, et->ino);
+ kmem_cache_free(extent_tree_slab, et);
+- atomic_dec(&sbi->total_ext_tree);
+- atomic_dec(&sbi->total_zombie_tree);
++ atomic_dec(&eti->total_ext_tree);
++ atomic_dec(&eti->total_zombie_tree);
+ tree_cnt++;
+
+ if (node_cnt + tree_cnt >= nr_shrink)
+ goto unlock_out;
+ cond_resched();
+ }
+- mutex_unlock(&sbi->extent_tree_lock);
++ mutex_unlock(&eti->extent_tree_lock);
+
+ free_node:
+ /* 2. remove LRU extent entries */
+- if (!mutex_trylock(&sbi->extent_tree_lock))
++ if (!mutex_trylock(&eti->extent_tree_lock))
+ goto out;
+
+ remained = nr_shrink - (node_cnt + tree_cnt);
+
+- spin_lock(&sbi->extent_lock);
++ spin_lock(&eti->extent_lock);
+ for (; remained > 0; remained--) {
+- if (list_empty(&sbi->extent_list))
++ if (list_empty(&eti->extent_list))
+ break;
+- en = list_first_entry(&sbi->extent_list,
++ en = list_first_entry(&eti->extent_list,
+ struct extent_node, list);
+ et = en->et;
+ if (!write_trylock(&et->lock)) {
+ /* refresh this extent node's position in extent list */
+- list_move_tail(&en->list, &sbi->extent_list);
++ list_move_tail(&en->list, &eti->extent_list);
+ continue;
+ }
+
+ list_del_init(&en->list);
+- spin_unlock(&sbi->extent_lock);
++ spin_unlock(&eti->extent_lock);
+
+ __detach_extent_node(sbi, et, en);
+
+ write_unlock(&et->lock);
+ node_cnt++;
+- spin_lock(&sbi->extent_lock);
++ spin_lock(&eti->extent_lock);
+ }
+- spin_unlock(&sbi->extent_lock);
++ spin_unlock(&eti->extent_lock);
+
+ unlock_out:
+- mutex_unlock(&sbi->extent_tree_lock);
++ mutex_unlock(&eti->extent_tree_lock);
+ out:
+- trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
++ trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
+
+ return node_cnt + tree_cnt;
+ }
+
+-unsigned int f2fs_destroy_extent_node(struct inode *inode)
++/* read extent cache operations */
++bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
++ struct extent_info *ei)
++{
++ if (!__may_extent_tree(inode, EX_READ))
++ return false;
++
++ return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
++}
++
++void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
++{
++ return __update_extent_cache(dn, EX_READ);
++}
++
++void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
++ pgoff_t fofs, block_t blkaddr, unsigned int len)
++{
++ struct extent_info ei = {
++ .fofs = fofs,
++ .len = len,
++ .blk = blkaddr,
++ };
++
++ if (!__may_extent_tree(dn->inode, EX_READ))
++ return;
++
++ __update_extent_tree_range(dn->inode, &ei, EX_READ);
++}
++
++unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
++{
++ if (!test_opt(sbi, READ_EXTENT_CACHE))
++ return 0;
++
++ return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
++}
++
++static unsigned int __destroy_extent_node(struct inode *inode,
++ enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ unsigned int node_cnt = 0;
+
+ if (!et || !atomic_read(&et->node_cnt))
+@@ -855,31 +958,44 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
+ return node_cnt;
+ }
+
+-void f2fs_drop_extent_tree(struct inode *inode)
++void f2fs_destroy_extent_node(struct inode *inode)
++{
++ __destroy_extent_node(inode, EX_READ);
++}
++
++static void __drop_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ bool updated = false;
+
+- if (!f2fs_may_extent_tree(inode))
++ if (!__may_extent_tree(inode, type))
+ return;
+
+ write_lock(&et->lock);
+- set_inode_flag(inode, FI_NO_EXTENT);
+ __free_extent_tree(sbi, et);
+- if (et->largest.len) {
+- et->largest.len = 0;
+- updated = true;
++ if (type == EX_READ) {
++ set_inode_flag(inode, FI_NO_EXTENT);
++ if (et->largest.len) {
++ et->largest.len = 0;
++ updated = true;
++ }
+ }
+ write_unlock(&et->lock);
+ if (updated)
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+
+-void f2fs_destroy_extent_tree(struct inode *inode)
++void f2fs_drop_extent_tree(struct inode *inode)
++{
++ __drop_extent_tree(inode, EX_READ);
++}
++
++static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree_info *eti = &sbi->extent_tree[type];
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ unsigned int node_cnt = 0;
+
+ if (!et)
+@@ -887,76 +1003,49 @@ void f2fs_destroy_extent_tree(struct inode *inode)
+
+ if (inode->i_nlink && !is_bad_inode(inode) &&
+ atomic_read(&et->node_cnt)) {
+- mutex_lock(&sbi->extent_tree_lock);
+- list_add_tail(&et->list, &sbi->zombie_list);
+- atomic_inc(&sbi->total_zombie_tree);
+- mutex_unlock(&sbi->extent_tree_lock);
++ mutex_lock(&eti->extent_tree_lock);
++ list_add_tail(&et->list, &eti->zombie_list);
++ atomic_inc(&eti->total_zombie_tree);
++ mutex_unlock(&eti->extent_tree_lock);
+ return;
+ }
+
+ /* free all extent info belong to this extent tree */
+- node_cnt = f2fs_destroy_extent_node(inode);
++ node_cnt = __destroy_extent_node(inode, type);
+
+ /* delete extent tree entry in radix tree */
+- mutex_lock(&sbi->extent_tree_lock);
++ mutex_lock(&eti->extent_tree_lock);
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+- radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
++ radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
+ kmem_cache_free(extent_tree_slab, et);
+- atomic_dec(&sbi->total_ext_tree);
+- mutex_unlock(&sbi->extent_tree_lock);
++ atomic_dec(&eti->total_ext_tree);
++ mutex_unlock(&eti->extent_tree_lock);
+
+- F2FS_I(inode)->extent_tree = NULL;
++ F2FS_I(inode)->extent_tree[type] = NULL;
+
+- trace_f2fs_destroy_extent_tree(inode, node_cnt);
++ trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
+ }
+
+-bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+- struct extent_info *ei)
+-{
+- if (!f2fs_may_extent_tree(inode))
+- return false;
+-
+- return f2fs_lookup_extent_tree(inode, pgofs, ei);
+-}
+-
+-void f2fs_update_extent_cache(struct dnode_of_data *dn)
++void f2fs_destroy_extent_tree(struct inode *inode)
+ {
+- pgoff_t fofs;
+- block_t blkaddr;
+-
+- if (!f2fs_may_extent_tree(dn->inode))
+- return;
+-
+- if (dn->data_blkaddr == NEW_ADDR)
+- blkaddr = NULL_ADDR;
+- else
+- blkaddr = dn->data_blkaddr;
+-
+- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+- dn->ofs_in_node;
+- f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
++ __destroy_extent_tree(inode, EX_READ);
+ }
+
+-void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+- pgoff_t fofs, block_t blkaddr, unsigned int len)
+-
++static void __init_extent_tree_info(struct extent_tree_info *eti)
+ {
+- if (!f2fs_may_extent_tree(dn->inode))
+- return;
+-
+- f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
++ INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
++ mutex_init(&eti->extent_tree_lock);
++ INIT_LIST_HEAD(&eti->extent_list);
++ spin_lock_init(&eti->extent_lock);
++ atomic_set(&eti->total_ext_tree, 0);
++ INIT_LIST_HEAD(&eti->zombie_list);
++ atomic_set(&eti->total_zombie_tree, 0);
++ atomic_set(&eti->total_ext_node, 0);
+ }
+
+ void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
+ {
+- INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+- mutex_init(&sbi->extent_tree_lock);
+- INIT_LIST_HEAD(&sbi->extent_list);
+- spin_lock_init(&sbi->extent_lock);
+- atomic_set(&sbi->total_ext_tree, 0);
+- INIT_LIST_HEAD(&sbi->zombie_list);
+- atomic_set(&sbi->total_zombie_tree, 0);
+- atomic_set(&sbi->total_ext_node, 0);
++ __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
+ }
+
+ int __init f2fs_create_extent_cache(void)
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 076bdf27df547..cf45af3a44a7a 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -593,16 +593,22 @@ enum {
+ /* dirty segments threshold for triggering CP */
+ #define DEFAULT_DIRTY_THRESHOLD 4
+
++#define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
++#define RECOVERY_MIN_RA_BLOCKS 1
++
++#define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
++
+ /* for in-memory extent cache entry */
+ #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
+
+ /* number of extent info in extent cache we try to shrink */
+ #define READ_EXTENT_CACHE_SHRINK_NUMBER 128
+
+-#define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
+-#define RECOVERY_MIN_RA_BLOCKS 1
+-
+-#define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
++/* extent cache type */
++enum extent_type {
++ EX_READ,
++ NR_EXTENT_CACHES,
++};
+
+ struct rb_entry {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+@@ -618,10 +624,17 @@ struct rb_entry {
+ struct extent_info {
+ unsigned int fofs; /* start offset in a file */
+ unsigned int len; /* length of the extent */
+- block_t blk; /* start block address of the extent */
++ union {
++ /* read extent_cache */
++ struct {
++ /* start block address of the extent */
++ block_t blk;
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+- unsigned int c_len; /* physical extent length of compressed blocks */
++ /* physical extent length of compressed blocks */
++ unsigned int c_len;
+ #endif
++ };
++ };
+ };
+
+ struct extent_node {
+@@ -633,13 +646,25 @@ struct extent_node {
+
+ struct extent_tree {
+ nid_t ino; /* inode number */
++ enum extent_type type; /* keep the extent tree type */
+ struct rb_root_cached root; /* root of extent info rb-tree */
+ struct extent_node *cached_en; /* recently accessed extent node */
+- struct extent_info largest; /* largested extent info */
+ struct list_head list; /* to be used by sbi->zombie_list */
+ rwlock_t lock; /* protect extent info rb-tree */
+ atomic_t node_cnt; /* # of extent node in rb-tree*/
+ bool largest_updated; /* largest extent updated */
++ struct extent_info largest; /* largest cached extent for EX_READ */
++};
++
++struct extent_tree_info {
++ struct radix_tree_root extent_tree_root;/* cache extent cache entries */
++ struct mutex extent_tree_lock; /* locking extent radix tree */
++ struct list_head extent_list; /* lru list for shrinker */
++ spinlock_t extent_lock; /* locking extent lru list */
++ atomic_t total_ext_tree; /* extent tree count */
++ struct list_head zombie_list; /* extent zombie tree list */
++ atomic_t total_zombie_tree; /* extent zombie tree count */
++ atomic_t total_ext_node; /* extent info count */
+ };
+
+ /*
+@@ -801,7 +826,8 @@ struct f2fs_inode_info {
+ struct list_head dirty_list; /* dirty list for dirs and files */
+ struct list_head gdirty_list; /* linked in global dirty list */
+ struct task_struct *atomic_write_task; /* store atomic write task */
+- struct extent_tree *extent_tree; /* cached extent_tree entry */
++ struct extent_tree *extent_tree[NR_EXTENT_CACHES];
++ /* cached extent_tree entry */
+ struct inode *cow_inode; /* copy-on-write inode for atomic write */
+
+ /* avoid racing between foreground op and gc */
+@@ -1624,14 +1650,7 @@ struct f2fs_sb_info {
+ struct mutex flush_lock; /* for flush exclusion */
+
+ /* for extent tree cache */
+- struct radix_tree_root extent_tree_root;/* cache extent cache entries */
+- struct mutex extent_tree_lock; /* locking extent radix tree */
+- struct list_head extent_list; /* lru list for shrinker */
+- spinlock_t extent_lock; /* locking extent lru list */
+- atomic_t total_ext_tree; /* extent tree count */
+- struct list_head zombie_list; /* extent zombie tree list */
+- atomic_t total_zombie_tree; /* extent zombie tree count */
+- atomic_t total_ext_node; /* extent info count */
++ struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
+
+ /* basic filesystem units */
+ unsigned int log_sectors_per_block; /* log2 sectors per block */
+@@ -1715,10 +1734,14 @@ struct f2fs_sb_info {
+ unsigned int segment_count[2]; /* # of allocated segments */
+ unsigned int block_count[2]; /* # of allocated blocks */
+ atomic_t inplace_count; /* # of inplace update */
+- atomic64_t total_hit_ext; /* # of lookup extent cache */
+- atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
+- atomic64_t read_hit_largest; /* # of hit largest extent node */
+- atomic64_t read_hit_cached; /* # of hit cached extent node */
++ /* # of lookup extent cache */
++ atomic64_t total_hit_ext[NR_EXTENT_CACHES];
++ /* # of hit rbtree extent node */
++ atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
++ /* # of hit cached extent node */
++ atomic64_t read_hit_cached[NR_EXTENT_CACHES];
++ /* # of hit largest extent node in read extent cache */
++ atomic64_t read_hit_largest;
+ atomic_t inline_xattr; /* # of inline_xattr inodes */
+ atomic_t inline_inode; /* # of inline_data inodes */
+ atomic_t inline_dir; /* # of inline_dentry inodes */
+@@ -3820,9 +3843,17 @@ struct f2fs_stat_info {
+ struct f2fs_sb_info *sbi;
+ int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
+ int main_area_segs, main_area_sections, main_area_zones;
+- unsigned long long hit_largest, hit_cached, hit_rbtree;
+- unsigned long long hit_total, total_ext;
+- int ext_tree, zombie_tree, ext_node;
++ unsigned long long hit_cached[NR_EXTENT_CACHES];
++ unsigned long long hit_rbtree[NR_EXTENT_CACHES];
++ unsigned long long total_ext[NR_EXTENT_CACHES];
++ unsigned long long hit_total[NR_EXTENT_CACHES];
++ int ext_tree[NR_EXTENT_CACHES];
++ int zombie_tree[NR_EXTENT_CACHES];
++ int ext_node[NR_EXTENT_CACHES];
++ /* to count memory footprint */
++ unsigned long long ext_mem[NR_EXTENT_CACHES];
++ /* for read extent cache */
++ unsigned long long hit_largest;
+ int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
+ int ndirty_data, ndirty_qdata;
+ unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
+@@ -3881,10 +3912,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+ #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
+ #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
+ #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
+-#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
+-#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
++#define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
++#define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
+ #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
+-#define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
++#define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
+ #define stat_inc_inline_xattr(inode) \
+ do { \
+ if (f2fs_has_inline_xattr(inode)) \
+@@ -4007,10 +4038,10 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
+ #define stat_other_skip_bggc_count(sbi) do { } while (0)
+ #define stat_inc_dirty_inode(sbi, type) do { } while (0)
+ #define stat_dec_dirty_inode(sbi, type) do { } while (0)
+-#define stat_inc_total_hit(sbi) do { } while (0)
+-#define stat_inc_rbtree_node_hit(sbi) do { } while (0)
++#define stat_inc_total_hit(sbi, type) do { } while (0)
++#define stat_inc_rbtree_node_hit(sbi, type) do { } while (0)
+ #define stat_inc_largest_node_hit(sbi) do { } while (0)
+-#define stat_inc_cached_node_hit(sbi) do { } while (0)
++#define stat_inc_cached_node_hit(sbi, type) do { } while (0)
+ #define stat_inc_inline_xattr(inode) do { } while (0)
+ #define stat_dec_inline_xattr(inode) do { } while (0)
+ #define stat_inc_inline_inode(inode) do { } while (0)
+@@ -4116,20 +4147,23 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+ bool force, bool *leftmost);
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ struct rb_root_cached *root, bool check_key);
+-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
+ void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
+ void f2fs_drop_extent_tree(struct inode *inode);
+-unsigned int f2fs_destroy_extent_node(struct inode *inode);
++void f2fs_destroy_extent_node(struct inode *inode);
+ void f2fs_destroy_extent_tree(struct inode *inode);
+-bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+- struct extent_info *ei);
+-void f2fs_update_extent_cache(struct dnode_of_data *dn);
+-void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+- pgoff_t fofs, block_t blkaddr, unsigned int len);
+ void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
+ int __init f2fs_create_extent_cache(void);
+ void f2fs_destroy_extent_cache(void);
+
++/* read extent cache ops */
++bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
++ struct extent_info *ei);
++void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
++void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
++ pgoff_t fofs, block_t blkaddr, unsigned int len);
++unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
++ int nr_shrink);
++
+ /*
+ * sysfs.c
+ */
+@@ -4199,9 +4233,9 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
+ struct writeback_control *wbc,
+ enum iostat_type io_type);
+ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+-void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+- pgoff_t fofs, block_t blkaddr, unsigned int llen,
+- unsigned int c_len);
++void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
++ pgoff_t fofs, block_t blkaddr,
++ unsigned int llen, unsigned int c_len);
+ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ unsigned nr_pages, sector_t *last_block_in_bio,
+ bool is_readahead, bool for_write);
+@@ -4282,9 +4316,10 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
+ static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
+ nid_t ino) { }
+ #define inc_compr_inode_stat(inode) do { } while (0)
+-static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+- pgoff_t fofs, block_t blkaddr, unsigned int llen,
+- unsigned int c_len) { }
++static inline void f2fs_update_read_extent_tree_range_compressed(
++ struct inode *inode,
++ pgoff_t fofs, block_t blkaddr,
++ unsigned int llen, unsigned int c_len) { }
+ #endif
+
+ static inline int set_compress_context(struct inode *inode)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index bf37983304a33..dbad2db68f1bc 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -618,7 +618,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ */
+ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
+ dn->inode) + ofs;
+- f2fs_update_extent_cache_range(dn, fofs, 0, len);
++ f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
+ dec_valid_block_count(sbi, dn->inode, nr_free);
+ }
+ dn->ofs_in_node = ofs;
+@@ -1496,7 +1496,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ f2fs_set_data_blkaddr(dn);
+ }
+
+- f2fs_update_extent_cache_range(dn, start, 0, index - start);
++ f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
+
+ return ret;
+ }
+@@ -2573,7 +2573,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ struct f2fs_map_blocks map = { .m_next_extent = NULL,
+ .m_seg_type = NO_CHECK_TYPE,
+ .m_may_create = false };
+- struct extent_info ei = {0, 0, 0};
++ struct extent_info ei = {0, };
+ pgoff_t pg_start, pg_end, next_pgofs;
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
+ unsigned int total = 0, sec_num;
+@@ -2605,7 +2605,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ * lookup mapping info in extent cache, skip defragmenting if physical
+ * block addresses are continuous.
+ */
+- if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
++ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
+ if (ei.fofs + ei.len >= pg_end)
+ goto out;
+ }
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index aa928d1c81597..543de12bf88c2 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1147,7 +1147,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ struct address_space *mapping = inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+- struct extent_info ei = {0, 0, 0};
++ struct extent_info ei = {0, };
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .ino = inode->i_ino,
+@@ -1165,7 +1165,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ if (!page)
+ return -ENOMEM;
+
+- if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++ if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ DATA_GENERIC_ENHANCE_READ))) {
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 896b0d5e4ee3f..7bfe29626024d 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -262,8 +262,8 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ return false;
+ }
+
+- if (fi->extent_tree) {
+- struct extent_info *ei = &fi->extent_tree->largest;
++ if (fi->extent_tree[EX_READ]) {
++ struct extent_info *ei = &fi->extent_tree[EX_READ]->largest;
+
+ if (ei->len &&
+ (!f2fs_is_valid_blkaddr(sbi, ei->blk,
+@@ -607,7 +607,7 @@ struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
+ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ {
+ struct f2fs_inode *ri;
+- struct extent_tree *et = F2FS_I(inode)->extent_tree;
++ struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
+
+ f2fs_wait_on_page_writeback(node_page, NODE, true, true);
+ set_page_dirty(node_page);
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 84b147966080e..07419c3e42a52 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -86,9 +86,11 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
+ mem_size >>= PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else if (type == READ_EXTENT_CACHE) {
+- mem_size = (atomic_read(&sbi->total_ext_tree) *
++ struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
++
++ mem_size = (atomic_read(&eti->total_ext_tree) *
+ sizeof(struct extent_tree) +
+- atomic_read(&sbi->total_ext_node) *
++ atomic_read(&eti->total_ext_node) *
+ sizeof(struct extent_node)) >> PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ } else if (type == DISCARD_CACHE) {
+@@ -859,7 +861,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + 1);
+
+- f2fs_update_extent_tree_range_compressed(dn->inode,
++ f2fs_update_read_extent_tree_range_compressed(dn->inode,
+ index, blkaddr,
+ F2FS_I(dn->inode)->i_cluster_size,
+ c_len);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 426559092930d..7d5bea9d92641 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -453,7 +453,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
+
+ /* try to shrink extent cache when there is no enough memory */
+ if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
+- f2fs_shrink_extent_tree(sbi, READ_EXTENT_CACHE_SHRINK_NUMBER);
++ f2fs_shrink_read_extent_tree(sbi,
++ READ_EXTENT_CACHE_SHRINK_NUMBER);
+
+ /* check the # of cached NAT entries */
+ if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
+diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
+index dd3c3c7a90ec8..33c490e69ae30 100644
+--- a/fs/f2fs/shrinker.c
++++ b/fs/f2fs/shrinker.c
+@@ -28,10 +28,13 @@ static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+ return count > 0 ? count : 0;
+ }
+
+-static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
++static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
++ enum extent_type type)
+ {
+- return atomic_read(&sbi->total_zombie_tree) +
+- atomic_read(&sbi->total_ext_node);
++ struct extent_tree_info *eti = &sbi->extent_tree[type];
++
++ return atomic_read(&eti->total_zombie_tree) +
++ atomic_read(&eti->total_ext_node);
+ }
+
+ unsigned long f2fs_shrink_count(struct shrinker *shrink,
+@@ -53,8 +56,8 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
+ }
+ spin_unlock(&f2fs_list_lock);
+
+- /* count extent cache entries */
+- count += __count_extent_cache(sbi);
++ /* count read extent cache entries */
++ count += __count_extent_cache(sbi, EX_READ);
+
+ /* count clean nat cache entries */
+ count += __count_nat_entries(sbi);
+@@ -99,8 +102,8 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+
+ sbi->shrinker_run_no = run_no;
+
+- /* shrink extent cache entries */
+- freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
++ /* shrink read extent cache entries */
++ freed += f2fs_shrink_read_extent_tree(sbi, nr >> 1);
+
+ /* shrink clean nat cache entries */
+ if (freed < nr)
+@@ -130,7 +133,7 @@ void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
+
+ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+ {
+- f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
++ f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
+
+ spin_lock(&f2fs_list_lock);
+ list_del_init(&sbi->s_list);
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index eb53e96b7a29c..5f58684f6107a 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -48,6 +48,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
+ TRACE_DEFINE_ENUM(CP_TRIMMED);
+ TRACE_DEFINE_ENUM(CP_PAUSE);
+ TRACE_DEFINE_ENUM(CP_RESIZE);
++TRACE_DEFINE_ENUM(EX_READ);
+
+ #define show_block_type(type) \
+ __print_symbolic(type, \
+@@ -1559,28 +1560,31 @@ TRACE_EVENT(f2fs_issue_flush,
+
+ TRACE_EVENT(f2fs_lookup_extent_tree_start,
+
+- TP_PROTO(struct inode *inode, unsigned int pgofs),
++ TP_PROTO(struct inode *inode, unsigned int pgofs, enum extent_type type),
+
+- TP_ARGS(inode, pgofs),
++ TP_ARGS(inode, pgofs, type),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
++ __field(enum extent_type, type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
++ __entry->type = type;
+ ),
+
+- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
++ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s",
+ show_dev_ino(__entry),
+- __entry->pgofs)
++ __entry->pgofs,
++ __entry->type == EX_READ ? "Read" : "N/A")
+ );
+
+-TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
++TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs,
+ struct extent_info *ei),
+@@ -1594,8 +1598,8 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(unsigned int, fofs)
+- __field(u32, blk)
+ __field(unsigned int, len)
++ __field(u32, blk)
+ ),
+
+ TP_fast_assign(
+@@ -1603,26 +1607,26 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->fofs = ei->fofs;
+- __entry->blk = ei->blk;
+ __entry->len = ei->len;
++ __entry->blk = ei->blk;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+- "ext_info(fofs: %u, blk: %u, len: %u)",
++ "read_ext_info(fofs: %u, len: %u, blk: %u)",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->fofs,
+- __entry->blk,
+- __entry->len)
++ __entry->len,
++ __entry->blk)
+ );
+
+-TRACE_EVENT(f2fs_update_extent_tree_range,
++TRACE_EVENT(f2fs_update_read_extent_tree_range,
+
+- TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
+- unsigned int len,
++ TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len,
++ block_t blkaddr,
+ unsigned int c_len),
+
+- TP_ARGS(inode, pgofs, blkaddr, len, c_len),
++ TP_ARGS(inode, pgofs, len, blkaddr, c_len),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+@@ -1637,67 +1641,73 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+- __entry->blk = blkaddr;
+ __entry->len = len;
++ __entry->blk = blkaddr;
+ __entry->c_len = c_len;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+- "blkaddr = %u, len = %u, "
+- "c_len = %u",
++ "len = %u, blkaddr = %u, c_len = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+- __entry->blk,
+ __entry->len,
++ __entry->blk,
+ __entry->c_len)
+ );
+
+ TRACE_EVENT(f2fs_shrink_extent_tree,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
+- unsigned int tree_cnt),
++ unsigned int tree_cnt, enum extent_type type),
+
+- TP_ARGS(sbi, node_cnt, tree_cnt),
++ TP_ARGS(sbi, node_cnt, tree_cnt, type),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, node_cnt)
+ __field(unsigned int, tree_cnt)
++ __field(enum extent_type, type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->node_cnt = node_cnt;
+ __entry->tree_cnt = tree_cnt;
++ __entry->type = type;
+ ),
+
+- TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
++ TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u, type = %s",
+ show_dev(__entry->dev),
+ __entry->node_cnt,
+- __entry->tree_cnt)
++ __entry->tree_cnt,
++ __entry->type == EX_READ ? "Read" : "N/A")
+ );
+
+ TRACE_EVENT(f2fs_destroy_extent_tree,
+
+- TP_PROTO(struct inode *inode, unsigned int node_cnt),
++ TP_PROTO(struct inode *inode, unsigned int node_cnt,
++ enum extent_type type),
+
+- TP_ARGS(inode, node_cnt),
++ TP_ARGS(inode, node_cnt, type),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, node_cnt)
++ __field(enum extent_type, type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->node_cnt = node_cnt;
++ __entry->type = type;
+ ),
+
+- TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
++ TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s",
+ show_dev_ino(__entry),
+- __entry->node_cnt)
++ __entry->node_cnt,
++ __entry->type == EX_READ ? "Read" : "N/A")
+ );
+
+ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
+--
+2.39.2
+
--- /dev/null
+From 5d8457891790ed21910e9432f61e733061dbcb88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 10:01:18 -0800
+Subject: f2fs: remove unnecessary __init_extent_tree
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 749d543c0d451fff31e8f7a3e0a031ffcbf1ebb1 ]
+
+Added into the caller.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 043d2d00b443 ("f2fs: factor out victim_entry usage from general rb_tree use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/extent_cache.c | 21 +++++----------------
+ 1 file changed, 5 insertions(+), 16 deletions(-)
+
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index a626ce0b70a50..d3c3b1b627c63 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -386,21 +386,6 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
+ return et;
+ }
+
+-static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
+- struct extent_tree *et, struct extent_info *ei)
+-{
+- struct rb_node **p = &et->root.rb_root.rb_node;
+- struct extent_node *en;
+-
+- en = __attach_extent_node(sbi, et, ei, NULL, p, true);
+- if (!en)
+- return NULL;
+-
+- et->largest = en->ei;
+- et->cached_en = en;
+- return en;
+-}
+-
+ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+ struct extent_tree *et)
+ {
+@@ -460,8 +445,12 @@ static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
+ if (atomic_read(&et->node_cnt))
+ goto out;
+
+- en = __init_extent_tree(sbi, et, &ei);
++ en = __attach_extent_node(sbi, et, &ei, NULL,
++ &et->root.rb_root.rb_node, true);
+ if (en) {
++ et->largest = en->ei;
++ et->cached_en = en;
++
+ spin_lock(&sbi->extent_lock);
+ list_add_tail(&en->list, &sbi->extent_list);
+ spin_unlock(&sbi->extent_lock);
+--
+2.39.2
+
--- /dev/null
+From a39db627c2a1a67031548d469d5e66adb764cb99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 09:36:43 -0800
+Subject: f2fs: specify extent cache for read explicitly
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 12607c1ba7637e750402f555b6695c50fce77a2b ]
+
+Let's descrbie it's read extent cache.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 043d2d00b443 ("f2fs: factor out victim_entry usage from general rb_tree use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/extent_cache.c | 4 ++--
+ fs/f2fs/f2fs.h | 10 +++++-----
+ fs/f2fs/inode.c | 2 +-
+ fs/f2fs/node.c | 2 +-
+ fs/f2fs/node.h | 2 +-
+ fs/f2fs/segment.c | 4 ++--
+ fs/f2fs/super.c | 12 ++++++------
+ 7 files changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 6c9e6f78a3e37..84078eda19ff1 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -383,7 +383,7 @@ static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
+ if (!i_ext || !i_ext->len)
+ return;
+
+- get_extent_info(&ei, i_ext);
++ get_read_extent_info(&ei, i_ext);
+
+ write_lock(&et->lock);
+ if (atomic_read(&et->node_cnt))
+@@ -711,7 +711,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+ unsigned int node_cnt = 0, tree_cnt = 0;
+ int remained;
+
+- if (!test_opt(sbi, EXTENT_CACHE))
++ if (!test_opt(sbi, READ_EXTENT_CACHE))
+ return 0;
+
+ if (!atomic_read(&sbi->total_zombie_tree))
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 4b44ca1decdd3..f2d1be26d0d05 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -91,7 +91,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
+ #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
+ #define F2FS_MOUNT_NOBARRIER 0x00000800
+ #define F2FS_MOUNT_FASTBOOT 0x00001000
+-#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
++#define F2FS_MOUNT_READ_EXTENT_CACHE 0x00002000
+ #define F2FS_MOUNT_DATA_FLUSH 0x00008000
+ #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
+ #define F2FS_MOUNT_USRQUOTA 0x00080000
+@@ -597,7 +597,7 @@ enum {
+ #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
+
+ /* number of extent info in extent cache we try to shrink */
+-#define EXTENT_CACHE_SHRINK_NUMBER 128
++#define READ_EXTENT_CACHE_SHRINK_NUMBER 128
+
+ #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
+ #define RECOVERY_MIN_RA_BLOCKS 1
+@@ -826,7 +826,7 @@ struct f2fs_inode_info {
+ loff_t original_i_size; /* original i_size before atomic write */
+ };
+
+-static inline void get_extent_info(struct extent_info *ext,
++static inline void get_read_extent_info(struct extent_info *ext,
+ struct f2fs_extent *i_ext)
+ {
+ ext->fofs = le32_to_cpu(i_ext->fofs);
+@@ -834,7 +834,7 @@ static inline void get_extent_info(struct extent_info *ext,
+ ext->len = le32_to_cpu(i_ext->len);
+ }
+
+-static inline void set_raw_extent(struct extent_info *ext,
++static inline void set_raw_read_extent(struct extent_info *ext,
+ struct f2fs_extent *i_ext)
+ {
+ i_ext->fofs = cpu_to_le32(ext->fofs);
+@@ -4404,7 +4404,7 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+- if (!test_opt(sbi, EXTENT_CACHE) ||
++ if (!test_opt(sbi, READ_EXTENT_CACHE) ||
+ is_inode_flag_set(inode, FI_NO_EXTENT) ||
+ (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+ !f2fs_sb_has_readonly(sbi)))
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 229ddc2f7b079..896b0d5e4ee3f 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -629,7 +629,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+
+ if (et) {
+ read_lock(&et->lock);
+- set_raw_extent(&et->largest, &ri->i_ext);
++ set_raw_read_extent(&et->largest, &ri->i_ext);
+ read_unlock(&et->lock);
+ } else {
+ memset(&ri->i_ext, 0, sizeof(ri->i_ext));
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index b9ee5a1176a07..84b147966080e 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -85,7 +85,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
+ sizeof(struct ino_entry);
+ mem_size >>= PAGE_SHIFT;
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+- } else if (type == EXTENT_CACHE) {
++ } else if (type == READ_EXTENT_CACHE) {
+ mem_size = (atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree) +
+ atomic_read(&sbi->total_ext_node) *
+diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
+index 3c09cae058b0a..0aa48704c77a0 100644
+--- a/fs/f2fs/node.h
++++ b/fs/f2fs/node.h
+@@ -146,7 +146,7 @@ enum mem_type {
+ NAT_ENTRIES, /* indicates the cached nat entry */
+ DIRTY_DENTS, /* indicates dirty dentry pages */
+ INO_ENTRIES, /* indicates inode entries */
+- EXTENT_CACHE, /* indicates extent cache */
++ READ_EXTENT_CACHE, /* indicates read extent cache */
+ DISCARD_CACHE, /* indicates memory of cached discard cmds */
+ COMPRESS_PAGE, /* indicates memory of cached compressed pages */
+ BASE_CHECK, /* check kernel status */
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 866335db78793..426559092930d 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -452,8 +452,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
+ return;
+
+ /* try to shrink extent cache when there is no enough memory */
+- if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
+- f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
++ if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
++ f2fs_shrink_extent_tree(sbi, READ_EXTENT_CACHE_SHRINK_NUMBER);
+
+ /* check the # of cached NAT entries */
+ if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 5af05411818a5..c46533d65372c 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -810,10 +810,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ set_opt(sbi, FASTBOOT);
+ break;
+ case Opt_extent_cache:
+- set_opt(sbi, EXTENT_CACHE);
++ set_opt(sbi, READ_EXTENT_CACHE);
+ break;
+ case Opt_noextent_cache:
+- clear_opt(sbi, EXTENT_CACHE);
++ clear_opt(sbi, READ_EXTENT_CACHE);
+ break;
+ case Opt_noinline_data:
+ clear_opt(sbi, INLINE_DATA);
+@@ -1939,7 +1939,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ seq_puts(seq, ",nobarrier");
+ if (test_opt(sbi, FASTBOOT))
+ seq_puts(seq, ",fastboot");
+- if (test_opt(sbi, EXTENT_CACHE))
++ if (test_opt(sbi, READ_EXTENT_CACHE))
+ seq_puts(seq, ",extent_cache");
+ else
+ seq_puts(seq, ",noextent_cache");
+@@ -2057,7 +2057,7 @@ static void default_options(struct f2fs_sb_info *sbi)
+ set_opt(sbi, INLINE_XATTR);
+ set_opt(sbi, INLINE_DATA);
+ set_opt(sbi, INLINE_DENTRY);
+- set_opt(sbi, EXTENT_CACHE);
++ set_opt(sbi, READ_EXTENT_CACHE);
+ set_opt(sbi, NOHEAP);
+ clear_opt(sbi, DISABLE_CHECKPOINT);
+ set_opt(sbi, MERGE_CHECKPOINT);
+@@ -2198,7 +2198,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ bool need_restart_ckpt = false, need_stop_ckpt = false;
+ bool need_restart_flush = false, need_stop_flush = false;
+ bool need_restart_discard = false, need_stop_discard = false;
+- bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
++ bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
+ bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
+ bool no_io_align = !F2FS_IO_ALIGNED(sbi);
+ bool no_atgc = !test_opt(sbi, ATGC);
+@@ -2288,7 +2288,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ }
+
+ /* disallow enable/disable extent_cache dynamically */
+- if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
++ if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
+ err = -EINVAL;
+ f2fs_warn(sbi, "switch extent_cache option is not allowed");
+ goto restore_opts;
+--
+2.39.2
+
--- /dev/null
+From 771859319eaf24b195eaa416978c08b7480a8bec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Oct 2022 22:25:14 +0800
+Subject: irqchip/loongarch: Adjust acpi_cascade_irqdomain_init() and
+ sub-routines
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+[ Upstream commit 3d12938dbc048ecb193fec69898d95f6b4813a4b ]
+
+1, Adjust the return of acpi_cascade_irqdomain_init() and check its
+ return value.
+2, Combine unnecessary short lines to one long line.
+
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20221020142514.1725514-1-chenhuacai@loongson.cn
+Stable-dep-of: 64cc451e45e1 ("irqchip/loongson-eiointc: Fix incorrect use of acpi_get_vec_parent")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-loongarch-cpu.c | 30 +++++++++++++++-----------
+ drivers/irqchip/irq-loongson-eiointc.c | 30 +++++++++++++++-----------
+ drivers/irqchip/irq-loongson-pch-pic.c | 15 +++++++------
+ 3 files changed, 44 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
+index 741612ba6a520..fdec3e9cfacfb 100644
+--- a/drivers/irqchip/irq-loongarch-cpu.c
++++ b/drivers/irqchip/irq-loongarch-cpu.c
+@@ -92,18 +92,16 @@ static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ };
+
+-static int __init
+-liointc_parse_madt(union acpi_subtable_headers *header,
+- const unsigned long end)
++static int __init liointc_parse_madt(union acpi_subtable_headers *header,
++ const unsigned long end)
+ {
+ struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
+
+ return liointc_acpi_init(irq_domain, liointc_entry);
+ }
+
+-static int __init
+-eiointc_parse_madt(union acpi_subtable_headers *header,
+- const unsigned long end)
++static int __init eiointc_parse_madt(union acpi_subtable_headers *header,
++ const unsigned long end)
+ {
+ struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
+
+@@ -112,16 +110,24 @@ eiointc_parse_madt(union acpi_subtable_headers *header,
+
+ static int __init acpi_cascade_irqdomain_init(void)
+ {
+- acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
+- liointc_parse_madt, 0);
+- acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
+- eiointc_parse_madt, 0);
++ int r;
++
++ r = acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, liointc_parse_madt, 0);
++ if (r < 0)
++ return r;
++
++ r = acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, eiointc_parse_madt, 0);
++ if (r < 0)
++ return r;
++
+ return 0;
+ }
+
+ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+ {
++ int ret;
++
+ if (irq_domain)
+ return 0;
+
+@@ -139,9 +145,9 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
+ set_handle_irq(&handle_cpu_irq);
+ acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
+ acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
+- acpi_cascade_irqdomain_init();
++ ret = acpi_cascade_irqdomain_init();
+
+- return 0;
++ return ret;
+ }
+
+ IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index ab49cd15699f4..7e601a5f7d9f8 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -301,9 +301,8 @@ static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group
+ return NULL;
+ }
+
+-static int __init
+-pch_pic_parse_madt(union acpi_subtable_headers *header,
+- const unsigned long end)
++static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
++ const unsigned long end)
+ {
+ struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
+ unsigned int node = (pchpic_entry->address >> 44) & 0xf;
+@@ -315,9 +314,8 @@ pch_pic_parse_madt(union acpi_subtable_headers *header,
+ return 0;
+ }
+
+-static int __init
+-pch_msi_parse_madt(union acpi_subtable_headers *header,
+- const unsigned long end)
++static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
++ const unsigned long end)
+ {
+ struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+ struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
+@@ -330,17 +328,23 @@ pch_msi_parse_madt(union acpi_subtable_headers *header,
+
+ static int __init acpi_cascade_irqdomain_init(void)
+ {
+- acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC,
+- pch_pic_parse_madt, 0);
+- acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC,
+- pch_msi_parse_madt, 1);
++ int r;
++
++ r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
++ if (r < 0)
++ return r;
++
++ r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
++ if (r < 0)
++ return r;
++
+ return 0;
+ }
+
+ int __init eiointc_acpi_init(struct irq_domain *parent,
+ struct acpi_madt_eio_pic *acpi_eiointc)
+ {
+- int i, parent_irq;
++ int i, ret, parent_irq;
+ unsigned long node_map;
+ struct eiointc_priv *priv;
+
+@@ -386,9 +390,9 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+
+ acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
+ acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
+- acpi_cascade_irqdomain_init();
++ ret = acpi_cascade_irqdomain_init();
+
+- return 0;
++ return ret;
+
+ out_free_handle:
+ irq_domain_free_fwnode(priv->domain_handle);
+diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
+index 9d2250e277581..679e2b68e6e9d 100644
+--- a/drivers/irqchip/irq-loongson-pch-pic.c
++++ b/drivers/irqchip/irq-loongson-pch-pic.c
+@@ -328,9 +328,8 @@ int find_pch_pic(u32 gsi)
+ return -1;
+ }
+
+-static int __init
+-pch_lpc_parse_madt(union acpi_subtable_headers *header,
+- const unsigned long end)
++static int __init pch_lpc_parse_madt(union acpi_subtable_headers *header,
++ const unsigned long end)
+ {
+ struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header;
+
+@@ -339,8 +338,12 @@ pch_lpc_parse_madt(union acpi_subtable_headers *header,
+
+ static int __init acpi_cascade_irqdomain_init(void)
+ {
+- acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC,
+- pch_lpc_parse_madt, 0);
++ int r;
++
++ r = acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, pch_lpc_parse_madt, 0);
++ if (r < 0)
++ return r;
++
+ return 0;
+ }
+
+@@ -370,7 +373,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
+ }
+
+ if (acpi_pchpic->id == 0)
+- acpi_cascade_irqdomain_init();
++ ret = acpi_cascade_irqdomain_init();
+
+ return ret;
+ }
+--
+2.39.2
+
--- /dev/null
+From 9cca60308ef4ccd915931535bf570d90f09d41ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Apr 2023 16:34:50 +0800
+Subject: irqchip/loongson-eiointc: Fix incorrect use of acpi_get_vec_parent
+
+From: Jianmin Lv <lvjianmin@loongson.cn>
+
+[ Upstream commit 64cc451e45e146b2140211b4f45f278b93b24ac0 ]
+
+In eiointc_acpi_init(), a *eiointc* node is passed into
+acpi_get_vec_parent() instead of a required *NUMA* node (on some chip
+like 3C5000L, a *NUMA* node means a *eiointc* node, but on some chip
+like 3C5000, a *NUMA* node contains 4 *eiointc* nodes), and node in
+struct acpi_vector_group is essentially a *NUMA* node, which will
+lead to no parent matched for passed *eiointc* node. so the patch
+adjusts code to use *NUMA* node for parameter node of
+acpi_set_vec_parent/acpi_get_vec_parent.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jianmin Lv <lvjianmin@loongson.cn>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20230407083453.6305-3-lvjianmin@loongson.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-loongson-eiointc.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 7e601a5f7d9f8..768ed36f5f663 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -279,9 +279,6 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
+ {
+ int i;
+
+- if (cpu_has_flatmode)
+- node = cpu_to_node(node * CORES_PER_EIO_NODE);
+-
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ if (node == vec_group[i].node) {
+ vec_group[i].parent = parent;
+@@ -317,8 +314,16 @@ static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
+ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+ {
++ struct irq_domain *parent;
+ struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+- struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
++ int node;
++
++ if (cpu_has_flatmode)
++ node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
++ else
++ node = eiointc_priv[nr_pics - 1]->node;
++
++ parent = acpi_get_vec_parent(node, msi_group);
+
+ if (parent)
+ return pch_msi_acpi_init(parent, pchmsi_entry);
+@@ -347,6 +352,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ int i, ret, parent_irq;
+ unsigned long node_map;
+ struct eiointc_priv *priv;
++ int node;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+@@ -388,8 +394,12 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ "irqchip/loongarch/intc:starting",
+ eiointc_router_init, NULL);
+
+- acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
+- acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
++ if (cpu_has_flatmode)
++ node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
++ else
++ node = acpi_eiointc->node;
++ acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
++ acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
+ ret = acpi_cascade_irqdomain_init();
+
+ return ret;
+--
+2.39.2
+
--- /dev/null
+From b5b08440de49dabb4265a8af930d4b625afc23ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Apr 2023 16:34:51 +0800
+Subject: irqchip/loongson-eiointc: Fix registration of syscore_ops
+
+From: Jianmin Lv <lvjianmin@loongson.cn>
+
+[ Upstream commit bdd60211eebb43ba1c4c14704965f4d4b628b931 ]
+
+When support suspend/resume for loongson-eiointc, the syscore_ops
+is registered twice in dual-bridges machines where there are two
+eiointc IRQ domains. Repeated registration of an same syscore_ops
+broke syscore_ops_list. Also, cpuhp_setup_state_nocalls is only
+needed to call for once. So the patch will corret them.
+
+Fixes: a90335c2dfb4 ("irqchip/loongson-eiointc: Add suspend/resume support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jianmin Lv <lvjianmin@loongson.cn>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20230407083453.6305-4-lvjianmin@loongson.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-loongson-eiointc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 768ed36f5f663..ac04aeaa2d308 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -390,9 +390,11 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
+
+- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
++ if (nr_pics == 1) {
++ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ "irqchip/loongarch/intc:starting",
+ eiointc_router_init, NULL);
++ }
+
+ if (cpu_has_flatmode)
+ node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+--
+2.39.2
+
--- /dev/null
+From 8a771efc6a52496320c5e2dc649e38b151c44fc2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 08:43:30 +0900
+Subject: ksmbd: block asynchronous requests when making a delay on session
+ setup
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit b096d97f47326b1e2dbdef1c91fab69ffda54d17 ]
+
+ksmbd make a delay of 5 seconds on session setup to avoid dictionary
+attacks. But the 5 seconds delay can be bypassed by using asynchronous
+requests. This patch block all requests on current connection when
+making a delay on sesstion setup failure.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20482
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/smb2pdu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index d3f33194faf1a..e7594a56cbfe3 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1862,8 +1862,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
+
+ sess->last_active = jiffies;
+ sess->state = SMB2_SESSION_EXPIRED;
+- if (try_delay)
++ if (try_delay) {
++ ksmbd_conn_set_need_reconnect(conn);
+ ssleep(5);
++ ksmbd_conn_set_need_negotiate(conn);
++ }
+ }
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 0cdca5dae4798b02b4ac02af7c3265953efb0006 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 08:42:21 +0900
+Subject: ksmbd: destroy expired sessions
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit ea174a91893956450510945a0c5d1a10b5323656 ]
+
+client can indefinitely send smb2 session setup requests with
+the SessionId set to 0, thus indefinitely spawning new sessions,
+and causing indefinite memory usage. This patch limit to the number
+of sessions using expired timeout and session state.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20478
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/mgmt/user_session.c | 68 ++++++++++++++++++++----------------
+ fs/ksmbd/mgmt/user_session.h | 1 +
+ fs/ksmbd/smb2pdu.c | 1 +
+ fs/ksmbd/smb2pdu.h | 2 ++
+ 4 files changed, 41 insertions(+), 31 deletions(-)
+
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 69b85a98e2c35..b809f7987b9f4 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -174,70 +174,73 @@ static struct ksmbd_session *__session_lookup(unsigned long long id)
+ struct ksmbd_session *sess;
+
+ hash_for_each_possible(sessions_table, sess, hlist, id) {
+- if (id == sess->id)
++ if (id == sess->id) {
++ sess->last_active = jiffies;
+ return sess;
++ }
+ }
+ return NULL;
+ }
+
++static void ksmbd_expire_session(struct ksmbd_conn *conn)
++{
++ unsigned long id;
++ struct ksmbd_session *sess;
++
++ xa_for_each(&conn->sessions, id, sess) {
++ if (sess->state != SMB2_SESSION_VALID ||
++ time_after(jiffies,
++ sess->last_active + SMB2_SESSION_TIMEOUT)) {
++ xa_erase(&conn->sessions, sess->id);
++ ksmbd_session_destroy(sess);
++ continue;
++ }
++ }
++}
++
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess)
+ {
+ sess->dialect = conn->dialect;
+ memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++ ksmbd_expire_session(conn);
+ return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+
+-static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+ struct channel *chann;
+
+ chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+ if (!chann)
+- return -ENOENT;
++ return;
+
+ kfree(chann);
+-
+- return 0;
+ }
+
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ {
+ struct ksmbd_session *sess;
++ unsigned long id;
+
+- if (conn->binding) {
+- int bkt;
+-
+- down_write(&sessions_table_lock);
+- hash_for_each(sessions_table, bkt, sess, hlist) {
+- if (!ksmbd_chann_del(conn, sess)) {
+- up_write(&sessions_table_lock);
+- goto sess_destroy;
+- }
++ xa_for_each(&conn->sessions, id, sess) {
++ ksmbd_chann_del(conn, sess);
++ if (xa_empty(&sess->ksmbd_chann_list)) {
++ xa_erase(&conn->sessions, sess->id);
++ ksmbd_session_destroy(sess);
+ }
+- up_write(&sessions_table_lock);
+- } else {
+- unsigned long id;
+-
+- xa_for_each(&conn->sessions, id, sess) {
+- if (!ksmbd_chann_del(conn, sess))
+- goto sess_destroy;
+- }
+- }
+-
+- return;
+-
+-sess_destroy:
+- if (xa_empty(&sess->ksmbd_chann_list)) {
+- xa_erase(&conn->sessions, sess->id);
+- ksmbd_session_destroy(sess);
+ }
+ }
+
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ unsigned long long id)
+ {
+- return xa_load(&conn->sessions, id);
++ struct ksmbd_session *sess;
++
++ sess = xa_load(&conn->sessions, id);
++ if (sess)
++ sess->last_active = jiffies;
++ return sess;
+ }
+
+ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+@@ -246,6 +249,8 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+
+ down_read(&sessions_table_lock);
+ sess = __session_lookup(id);
++ if (sess)
++ sess->last_active = jiffies;
+ up_read(&sessions_table_lock);
+
+ return sess;
+@@ -324,6 +329,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ if (ksmbd_init_file_table(&sess->file_table))
+ goto error;
+
++ sess->last_active = jiffies;
+ sess->state = SMB2_SESSION_IN_PROGRESS;
+ set_session_flag(sess, protocol);
+ xa_init(&sess->tree_conns);
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+index 44a3c67b2bd92..51f38e5b61abb 100644
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -59,6 +59,7 @@ struct ksmbd_session {
+ __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+
+ struct ksmbd_file_table file_table;
++ unsigned long last_active;
+ };
+
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 6c26934272ad5..d3f33194faf1a 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1860,6 +1860,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+ try_delay = true;
+
++ sess->last_active = jiffies;
+ sess->state = SMB2_SESSION_EXPIRED;
+ if (try_delay)
+ ssleep(5);
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+index f4baa9800f6ee..dd10f8031606b 100644
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -61,6 +61,8 @@ struct preauth_integrity_info {
+ #define SMB2_SESSION_IN_PROGRESS BIT(0)
+ #define SMB2_SESSION_VALID BIT(1)
+
++#define SMB2_SESSION_TIMEOUT (10 * HZ)
++
+ struct create_durable_req_v2 {
+ struct create_context ccontext;
+ __u8 Name[8];
+--
+2.39.2
+
--- /dev/null
+From fa1a6bcce011cb5882d692c8c87d6e1796bd6615 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 16:45:00 +0900
+Subject: ksmbd: fix racy issue from session setup and logoff
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit f5c779b7ddbda30866cf2a27c63e34158f858c73 ]
+
+This racy issue is triggered by sending concurrent session setup and
+logoff requests. This patch does not set connection status as
+KSMBD_SESS_GOOD if state is KSMBD_SESS_NEED_RECONNECT in session setup.
+And relookup session to validate if session is deleted in logoff.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20481, ZDI-CAN-20590, ZDI-CAN-20596
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/connection.c | 14 ++++----
+ fs/ksmbd/connection.h | 39 ++++++++++++---------
+ fs/ksmbd/mgmt/user_session.c | 1 +
+ fs/ksmbd/server.c | 3 +-
+ fs/ksmbd/smb2pdu.c | 67 +++++++++++++++++++++++-------------
+ fs/ksmbd/transport_tcp.c | 2 +-
+ 6 files changed, 77 insertions(+), 49 deletions(-)
+
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index b8f9d627f241d..3cb88853d6932 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -56,7 +56,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ return NULL;
+
+ conn->need_neg = true;
+- conn->status = KSMBD_SESS_NEW;
++ ksmbd_conn_set_new(conn);
+ conn->local_nls = load_nls("utf8");
+ if (!conn->local_nls)
+ conn->local_nls = load_nls_default();
+@@ -149,12 +149,12 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ return ret;
+ }
+
+-static void ksmbd_conn_lock(struct ksmbd_conn *conn)
++void ksmbd_conn_lock(struct ksmbd_conn *conn)
+ {
+ mutex_lock(&conn->srv_mutex);
+ }
+
+-static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
++void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+ {
+ mutex_unlock(&conn->srv_mutex);
+ }
+@@ -245,7 +245,7 @@ bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+ if (!ksmbd_server_running())
+ return false;
+
+- if (conn->status == KSMBD_SESS_EXITING)
++ if (ksmbd_conn_exiting(conn))
+ return false;
+
+ if (kthread_should_stop())
+@@ -305,7 +305,7 @@ int ksmbd_conn_handler_loop(void *p)
+ pdu_size = get_rfc1002_len(hdr_buf);
+ ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+
+- if (conn->status == KSMBD_SESS_GOOD)
++ if (ksmbd_conn_good(conn))
+ max_allowed_pdu_size =
+ SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
+ else
+@@ -314,7 +314,7 @@ int ksmbd_conn_handler_loop(void *p)
+ if (pdu_size > max_allowed_pdu_size) {
+ pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
+ pdu_size, max_allowed_pdu_size,
+- conn->status);
++ READ_ONCE(conn->status));
+ break;
+ }
+
+@@ -418,7 +418,7 @@ static void stop_sessions(void)
+ if (task)
+ ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+ task->comm, task_pid_nr(task));
+- conn->status = KSMBD_SESS_EXITING;
++ ksmbd_conn_set_exiting(conn);
+ if (t->ops->shutdown) {
+ read_unlock(&conn_list_lock);
+ t->ops->shutdown(t);
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 0e3a848defaf3..98bb5f199fa24 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -162,6 +162,8 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+ int ksmbd_conn_handler_loop(void *p);
+ int ksmbd_conn_transport_init(void);
+ void ksmbd_conn_transport_destroy(void);
++void ksmbd_conn_lock(struct ksmbd_conn *conn);
++void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+
+ /*
+ * WARNING
+@@ -169,43 +171,48 @@ void ksmbd_conn_transport_destroy(void);
+ * This is a hack. We will move status to a proper place once we land
+ * a multi-sessions support.
+ */
+-static inline bool ksmbd_conn_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
+ {
+- return work->conn->status == KSMBD_SESS_GOOD;
++ return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
+ }
+
+-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+ {
+- return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
++ return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+
+-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+- return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
++ return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+ }
+
+-static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
++static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+ {
+- return work->conn->status == KSMBD_SESS_EXITING;
++ return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
+
+-static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+- work->conn->status = KSMBD_SESS_GOOD;
++ WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+ }
+
+-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
+ {
+- work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
++ WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
+ }
+
+-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+ {
+- work->conn->status = KSMBD_SESS_NEED_RECONNECT;
++ WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
+
+-static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
+ {
+- work->conn->status = KSMBD_SESS_EXITING;
++ WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
++}
++
++static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
++{
++ WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+ }
+ #endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index a2b128dedcfcf..69b85a98e2c35 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -324,6 +324,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ if (ksmbd_init_file_table(&sess->file_table))
+ goto error;
+
++ sess->state = SMB2_SESSION_IN_PROGRESS;
+ set_session_flag(sess, protocol);
+ xa_init(&sess->tree_conns);
+ xa_init(&sess->ksmbd_chann_list);
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index 8c2bc513445c3..8a0ad399f2456 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -93,7 +93,8 @@ static inline int check_conn_state(struct ksmbd_work *work)
+ {
+ struct smb_hdr *rsp_hdr;
+
+- if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) {
++ if (ksmbd_conn_exiting(work->conn) ||
++ ksmbd_conn_need_reconnect(work->conn)) {
+ rsp_hdr = work->response_buf;
+ rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+ return 1;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index ac79d4c86067f..6c26934272ad5 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -247,7 +247,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+
+ rsp = smb2_get_msg(work->response_buf);
+
+- WARN_ON(ksmbd_conn_good(work));
++ WARN_ON(ksmbd_conn_good(conn));
+
+ rsp->StructureSize = cpu_to_le16(65);
+ ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+@@ -277,7 +277,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+ conn->use_spnego = true;
+
+- ksmbd_conn_set_need_negotiate(work);
++ ksmbd_conn_set_need_negotiate(conn);
+ return 0;
+ }
+
+@@ -567,7 +567,7 @@ int smb2_check_user_session(struct ksmbd_work *work)
+ cmd == SMB2_SESSION_SETUP_HE)
+ return 0;
+
+- if (!ksmbd_conn_good(work))
++ if (!ksmbd_conn_good(conn))
+ return -EINVAL;
+
+ sess_id = le64_to_cpu(req_hdr->SessionId);
+@@ -600,7 +600,7 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+
+ prev_sess->state = SMB2_SESSION_EXPIRED;
+ xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+- chann->conn->status = KSMBD_SESS_EXITING;
++ ksmbd_conn_set_exiting(chann->conn);
+ }
+
+ /**
+@@ -1067,7 +1067,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+
+ ksmbd_debug(SMB, "Received negotiate request\n");
+ conn->need_neg = false;
+- if (ksmbd_conn_good(work)) {
++ if (ksmbd_conn_good(conn)) {
+ pr_err("conn->tcp_status is already in CifsGood State\n");
+ work->send_no_response = 1;
+ return rc;
+@@ -1222,7 +1222,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ }
+
+ conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+- ksmbd_conn_set_need_negotiate(work);
++ ksmbd_conn_set_need_negotiate(conn);
+
+ err_out:
+ if (rc < 0)
+@@ -1643,6 +1643,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ rsp->SecurityBufferLength = 0;
+ inc_rfc1001_len(work->response_buf, 9);
+
++ ksmbd_conn_lock(conn);
+ if (!req->hdr.SessionId) {
+ sess = ksmbd_smb2_session_create();
+ if (!sess) {
+@@ -1690,6 +1691,12 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ goto out_err;
+ }
+
++ if (ksmbd_conn_need_reconnect(conn)) {
++ rc = -EFAULT;
++ sess = NULL;
++ goto out_err;
++ }
++
+ if (ksmbd_session_lookup(conn, sess_id)) {
+ rc = -EACCES;
+ goto out_err;
+@@ -1714,12 +1721,20 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ rc = -ENOENT;
+ goto out_err;
+ }
++
++ if (sess->state == SMB2_SESSION_EXPIRED) {
++ rc = -EFAULT;
++ goto out_err;
++ }
++
++ if (ksmbd_conn_need_reconnect(conn)) {
++ rc = -EFAULT;
++ sess = NULL;
++ goto out_err;
++ }
+ }
+ work->sess = sess;
+
+- if (sess->state == SMB2_SESSION_EXPIRED)
+- sess->state = SMB2_SESSION_IN_PROGRESS;
+-
+ negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+ negblob_len = le16_to_cpu(req->SecurityBufferLength);
+ if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+@@ -1749,8 +1764,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ goto out_err;
+ }
+
+- ksmbd_conn_set_good(work);
+- sess->state = SMB2_SESSION_VALID;
++ if (!ksmbd_conn_need_reconnect(conn)) {
++ ksmbd_conn_set_good(conn);
++ sess->state = SMB2_SESSION_VALID;
++ }
+ kfree(sess->Preauth_HashValue);
+ sess->Preauth_HashValue = NULL;
+ } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+@@ -1772,8 +1789,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ if (rc)
+ goto out_err;
+
+- ksmbd_conn_set_good(work);
+- sess->state = SMB2_SESSION_VALID;
++ if (!ksmbd_conn_need_reconnect(conn)) {
++ ksmbd_conn_set_good(conn);
++ sess->state = SMB2_SESSION_VALID;
++ }
+ if (conn->binding) {
+ struct preauth_session *preauth_sess;
+
+@@ -1841,14 +1860,13 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+ try_delay = true;
+
+- xa_erase(&conn->sessions, sess->id);
+- ksmbd_session_destroy(sess);
+- work->sess = NULL;
++ sess->state = SMB2_SESSION_EXPIRED;
+ if (try_delay)
+ ssleep(5);
+ }
+ }
+
++ ksmbd_conn_unlock(conn);
+ return rc;
+ }
+
+@@ -2073,21 +2091,24 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ {
+ struct ksmbd_conn *conn = work->conn;
+ struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+- struct ksmbd_session *sess = work->sess;
++ struct ksmbd_session *sess;
++ struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+
+ rsp->StructureSize = cpu_to_le16(4);
+ inc_rfc1001_len(work->response_buf, 4);
+
+ ksmbd_debug(SMB, "request\n");
+
+- /* setting CifsExiting here may race with start_tcp_sess */
+- ksmbd_conn_set_need_reconnect(work);
++ ksmbd_conn_set_need_reconnect(conn);
+ ksmbd_close_session_fds(work);
+ ksmbd_conn_wait_idle(conn);
+
++ /*
++ * Re-lookup session to validate if session is deleted
++ * while waiting request complete
++ */
++ sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
+ if (ksmbd_tree_conn_session_logoff(sess)) {
+- struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-
+ ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ smb2_set_err_rsp(work);
+@@ -2099,9 +2120,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+
+ ksmbd_free_user(sess->user);
+ sess->user = NULL;
+-
+- /* let start_tcp_sess free connection info now */
+- ksmbd_conn_set_need_negotiate(work);
++ ksmbd_conn_set_need_negotiate(conn);
+ return 0;
+ }
+
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 20e85e2701f26..eff7a1d793f00 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -333,7 +333,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ if (length == -EINTR) {
+ total_read = -ESHUTDOWN;
+ break;
+- } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
++ } else if (ksmbd_conn_need_reconnect(conn)) {
+ total_read = -EAGAIN;
+ break;
+ } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+--
+2.39.2
+
--- /dev/null
+From 0f2604381fae52696888feb2fa9621d3e5ee8d43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 14:03:40 +0900
+Subject: ksmbd: fix racy issue from smb2 close and logoff with multichannel
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit abcc506a9a71976a8b4c9bf3ee6efd13229c1e19 ]
+
+When smb client send concurrent smb2 close and logoff request
+with multichannel connection, It can cause racy issue. logoff request
+free tcon and can cause UAF issues in smb2 close. When receiving logoff
+request with multichannel, ksmbd should wait until all remaning requests
+complete as well as ones in the current connection, and then make
+session expired.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20796 ZDI-CAN-20595
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/connection.c | 54 +++++++++++++++++++++++++++---------
+ fs/ksmbd/connection.h | 19 +++++++++++--
+ fs/ksmbd/mgmt/tree_connect.c | 3 ++
+ fs/ksmbd/mgmt/user_session.c | 36 ++++++++++++++++++++----
+ fs/ksmbd/smb2pdu.c | 21 +++++++-------
+ 5 files changed, 101 insertions(+), 32 deletions(-)
+
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 3cb88853d6932..e3312fbf4c090 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
+ static struct ksmbd_conn_ops default_conn_ops;
+
+ LIST_HEAD(conn_list);
+-DEFINE_RWLOCK(conn_list_lock);
++DECLARE_RWSEM(conn_list_lock);
+
+ /**
+ * ksmbd_conn_free() - free resources of the connection instance
+@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
+ */
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+- write_lock(&conn_list_lock);
++ down_write(&conn_list_lock);
+ list_del(&conn->conns_list);
+- write_unlock(&conn_list_lock);
++ up_write(&conn_list_lock);
+
+ xa_destroy(&conn->sessions);
+ kvfree(conn->request_buf);
+@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ spin_lock_init(&conn->llist_lock);
+ INIT_LIST_HEAD(&conn->lock_list);
+
+- write_lock(&conn_list_lock);
++ down_write(&conn_list_lock);
+ list_add(&conn->conns_list, &conn_list);
+- write_unlock(&conn_list_lock);
++ up_write(&conn_list_lock);
+ return conn;
+ }
+
+@@ -95,7 +95,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ struct ksmbd_conn *t;
+ bool ret = false;
+
+- read_lock(&conn_list_lock);
++ down_read(&conn_list_lock);
+ list_for_each_entry(t, &conn_list, conns_list) {
+ if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+ continue;
+@@ -103,7 +103,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ ret = true;
+ break;
+ }
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+ return ret;
+ }
+
+@@ -159,9 +159,37 @@ void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+ mutex_unlock(&conn->srv_mutex);
+ }
+
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
++ struct ksmbd_conn *conn;
++
++ down_read(&conn_list_lock);
++ list_for_each_entry(conn, &conn_list, conns_list) {
++ if (conn->binding || xa_load(&conn->sessions, sess_id))
++ WRITE_ONCE(conn->status, status);
++ }
++ up_read(&conn_list_lock);
++}
++
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++{
++ struct ksmbd_conn *bind_conn;
++
+ wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++
++ down_read(&conn_list_lock);
++ list_for_each_entry(bind_conn, &conn_list, conns_list) {
++ if (bind_conn == conn)
++ continue;
++
++ if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
++ !ksmbd_conn_releasing(bind_conn) &&
++ atomic_read(&bind_conn->req_running)) {
++ wait_event(bind_conn->req_running_q,
++ atomic_read(&bind_conn->req_running) == 0);
++ }
++ }
++ up_read(&conn_list_lock);
+ }
+
+ int ksmbd_conn_write(struct ksmbd_work *work)
+@@ -362,10 +390,10 @@ int ksmbd_conn_handler_loop(void *p)
+ }
+
+ out:
++ ksmbd_conn_set_releasing(conn);
+ /* Wait till all reference dropped to the Server object*/
+ wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+
+-
+ if (IS_ENABLED(CONFIG_UNICODE))
+ utf8_unload(conn->um);
+ unload_nls(conn->local_nls);
+@@ -409,7 +437,7 @@ static void stop_sessions(void)
+ struct ksmbd_transport *t;
+
+ again:
+- read_lock(&conn_list_lock);
++ down_read(&conn_list_lock);
+ list_for_each_entry(conn, &conn_list, conns_list) {
+ struct task_struct *task;
+
+@@ -420,12 +448,12 @@ static void stop_sessions(void)
+ task->comm, task_pid_nr(task));
+ ksmbd_conn_set_exiting(conn);
+ if (t->ops->shutdown) {
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+ t->ops->shutdown(t);
+- read_lock(&conn_list_lock);
++ down_read(&conn_list_lock);
+ }
+ }
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+
+ if (!list_empty(&conn_list)) {
+ schedule_timeout_interruptible(HZ / 10); /* 100ms */
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 98bb5f199fa24..ad8dfaa48ffb3 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -26,7 +26,8 @@ enum {
+ KSMBD_SESS_GOOD,
+ KSMBD_SESS_EXITING,
+ KSMBD_SESS_NEED_RECONNECT,
+- KSMBD_SESS_NEED_NEGOTIATE
++ KSMBD_SESS_NEED_NEGOTIATE,
++ KSMBD_SESS_RELEASING
+ };
+
+ struct ksmbd_stats {
+@@ -140,10 +141,10 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
+
+ extern struct list_head conn_list;
+-extern rwlock_t conn_list_lock;
++extern struct rw_semaphore conn_list_lock;
+
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+@@ -191,6 +192,11 @@ static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+ return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
+
++static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
++{
++ return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
++}
++
+ static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+ WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+@@ -215,4 +221,11 @@ static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
+ {
+ WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+ }
++
++static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
++{
++ WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+ #endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+index f19de20c2960c..f07a05f376513 100644
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -137,6 +137,9 @@ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+ struct ksmbd_tree_connect *tc;
+ unsigned long id;
+
++ if (!sess)
++ return -EINVAL;
++
+ xa_for_each(&sess->tree_conns, id, tc)
+ ret |= ksmbd_tree_conn_disconnect(sess, tc);
+ xa_destroy(&sess->tree_conns);
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index b809f7987b9f4..ea4b56d570fbb 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -153,10 +153,6 @@ void ksmbd_session_destroy(struct ksmbd_session *sess)
+ if (!sess)
+ return;
+
+- down_write(&sessions_table_lock);
+- hash_del(&sess->hlist);
+- up_write(&sessions_table_lock);
+-
+ if (sess->user)
+ ksmbd_free_user(sess->user);
+
+@@ -187,15 +183,18 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ unsigned long id;
+ struct ksmbd_session *sess;
+
++ down_write(&sessions_table_lock);
+ xa_for_each(&conn->sessions, id, sess) {
+ if (sess->state != SMB2_SESSION_VALID ||
+ time_after(jiffies,
+ sess->last_active + SMB2_SESSION_TIMEOUT)) {
+ xa_erase(&conn->sessions, sess->id);
++ hash_del(&sess->hlist);
+ ksmbd_session_destroy(sess);
+ continue;
+ }
+ }
++ up_write(&sessions_table_lock);
+ }
+
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+@@ -207,15 +206,16 @@ int ksmbd_session_register(struct ksmbd_conn *conn,
+ return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+
+-static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+ struct channel *chann;
+
+ chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+ if (!chann)
+- return;
++ return -ENOENT;
+
+ kfree(chann);
++ return 0;
+ }
+
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+@@ -223,13 +223,37 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ struct ksmbd_session *sess;
+ unsigned long id;
+
++ down_write(&sessions_table_lock);
++ if (conn->binding) {
++ int bkt;
++ struct hlist_node *tmp;
++
++ hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
++ if (!ksmbd_chann_del(conn, sess) &&
++ xa_empty(&sess->ksmbd_chann_list)) {
++ hash_del(&sess->hlist);
++ ksmbd_session_destroy(sess);
++ }
++ }
++ }
++
+ xa_for_each(&conn->sessions, id, sess) {
++ unsigned long chann_id;
++ struct channel *chann;
++
++ xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
++ if (chann->conn != conn)
++ ksmbd_conn_set_exiting(chann->conn);
++ }
++
+ ksmbd_chann_del(conn, sess);
+ if (xa_empty(&sess->ksmbd_chann_list)) {
+ xa_erase(&conn->sessions, sess->id);
++ hash_del(&sess->hlist);
+ ksmbd_session_destroy(sess);
+ }
+ }
++ up_write(&sessions_table_lock);
+ }
+
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index e7594a56cbfe3..8f96b96dbac1a 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -2097,21 +2097,22 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct ksmbd_session *sess;
+ struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
++ u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+
+ rsp->StructureSize = cpu_to_le16(4);
+ inc_rfc1001_len(work->response_buf, 4);
+
+ ksmbd_debug(SMB, "request\n");
+
+- ksmbd_conn_set_need_reconnect(conn);
++ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+ ksmbd_close_session_fds(work);
+- ksmbd_conn_wait_idle(conn);
++ ksmbd_conn_wait_idle(conn, sess_id);
+
+ /*
+ * Re-lookup session to validate if session is deleted
+ * while waiting request complete
+ */
+- sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
++ sess = ksmbd_session_lookup_all(conn, sess_id);
+ if (ksmbd_tree_conn_session_logoff(sess)) {
+ ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+@@ -2124,7 +2125,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+
+ ksmbd_free_user(sess->user);
+ sess->user = NULL;
+- ksmbd_conn_set_need_negotiate(conn);
++ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+ return 0;
+ }
+
+@@ -6952,7 +6953,7 @@ int smb2_lock(struct ksmbd_work *work)
+
+ nolock = 1;
+ /* check locks in connection list */
+- read_lock(&conn_list_lock);
++ down_read(&conn_list_lock);
+ list_for_each_entry(conn, &conn_list, conns_list) {
+ spin_lock(&conn->llist_lock);
+ list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+@@ -6969,7 +6970,7 @@ int smb2_lock(struct ksmbd_work *work)
+ list_del(&cmp_lock->flist);
+ list_del(&cmp_lock->clist);
+ spin_unlock(&conn->llist_lock);
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+
+ locks_free_lock(cmp_lock->fl);
+ kfree(cmp_lock);
+@@ -6991,7 +6992,7 @@ int smb2_lock(struct ksmbd_work *work)
+ cmp_lock->start > smb_lock->start &&
+ cmp_lock->start < smb_lock->end) {
+ spin_unlock(&conn->llist_lock);
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+ pr_err("previous lock conflict with zero byte lock range\n");
+ goto out;
+ }
+@@ -7000,7 +7001,7 @@ int smb2_lock(struct ksmbd_work *work)
+ smb_lock->start > cmp_lock->start &&
+ smb_lock->start < cmp_lock->end) {
+ spin_unlock(&conn->llist_lock);
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+ pr_err("current lock conflict with zero byte lock range\n");
+ goto out;
+ }
+@@ -7011,14 +7012,14 @@ int smb2_lock(struct ksmbd_work *work)
+ cmp_lock->end >= smb_lock->end)) &&
+ !cmp_lock->zero_len && !smb_lock->zero_len) {
+ spin_unlock(&conn->llist_lock);
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+ pr_err("Not allow lock operation on exclusive lock range\n");
+ goto out;
+ }
+ }
+ spin_unlock(&conn->llist_lock);
+ }
+- read_unlock(&conn_list_lock);
++ up_read(&conn_list_lock);
+ out_check_cl:
+ if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+ pr_err("Try to unlock nolocked range\n");
+--
+2.39.2
+
--- /dev/null
+From 229a5995ac96f843dc3b5772a39e427a136fc363 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Jan 2023 18:32:04 +0800
+Subject: ksmbd: Implements sess->ksmbd_chann_list as xarray
+
+From: Dawei Li <set_pte_at@outlook.com>
+
+[ Upstream commit 1d9c4172110e645b383ff13eee759728d74f1a5d ]
+
+For some ops on channel:
+1. lookup_chann_list(), possibly on high frequency.
+2. ksmbd_chann_del().
+
+Connection is used as indexing key to lookup channel, in that case,
+linear search based on list may suffer a bit for performance.
+
+Implements sess->ksmbd_chann_list as xarray.
+
+Signed-off-by: Dawei Li <set_pte_at@outlook.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: f5c779b7ddbd ("ksmbd: fix racy issue from session setup and logoff")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/mgmt/user_session.c | 61 ++++++++++++++----------------------
+ fs/ksmbd/mgmt/user_session.h | 4 +--
+ fs/ksmbd/smb2pdu.c | 34 +++-----------------
+ 3 files changed, 30 insertions(+), 69 deletions(-)
+
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 92b1603b5abeb..a2b128dedcfcf 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -30,15 +30,15 @@ struct ksmbd_session_rpc {
+
+ static void free_channel_list(struct ksmbd_session *sess)
+ {
+- struct channel *chann, *tmp;
++ struct channel *chann;
++ unsigned long index;
+
+- write_lock(&sess->chann_lock);
+- list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+- chann_list) {
+- list_del(&chann->chann_list);
++ xa_for_each(&sess->ksmbd_chann_list, index, chann) {
++ xa_erase(&sess->ksmbd_chann_list, index);
+ kfree(chann);
+ }
+- write_unlock(&sess->chann_lock);
++
++ xa_destroy(&sess->ksmbd_chann_list);
+ }
+
+ static void __session_rpc_close(struct ksmbd_session *sess,
+@@ -190,21 +190,15 @@ int ksmbd_session_register(struct ksmbd_conn *conn,
+
+ static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+- struct channel *chann, *tmp;
+-
+- write_lock(&sess->chann_lock);
+- list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+- chann_list) {
+- if (chann->conn == conn) {
+- list_del(&chann->chann_list);
+- kfree(chann);
+- write_unlock(&sess->chann_lock);
+- return 0;
+- }
+- }
+- write_unlock(&sess->chann_lock);
++ struct channel *chann;
++
++ chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
++ if (!chann)
++ return -ENOENT;
+
+- return -ENOENT;
++ kfree(chann);
++
++ return 0;
+ }
+
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+@@ -234,7 +228,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ return;
+
+ sess_destroy:
+- if (list_empty(&sess->ksmbd_chann_list)) {
++ if (xa_empty(&sess->ksmbd_chann_list)) {
+ xa_erase(&conn->sessions, sess->id);
+ ksmbd_session_destroy(sess);
+ }
+@@ -320,6 +314,9 @@ static struct ksmbd_session *__session_create(int protocol)
+ struct ksmbd_session *sess;
+ int ret;
+
++ if (protocol != CIFDS_SESSION_FLAG_SMB2)
++ return NULL;
++
+ sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
+ if (!sess)
+ return NULL;
+@@ -329,30 +326,20 @@ static struct ksmbd_session *__session_create(int protocol)
+
+ set_session_flag(sess, protocol);
+ xa_init(&sess->tree_conns);
+- INIT_LIST_HEAD(&sess->ksmbd_chann_list);
++ xa_init(&sess->ksmbd_chann_list);
+ INIT_LIST_HEAD(&sess->rpc_handle_list);
+ sess->sequence_number = 1;
+- rwlock_init(&sess->chann_lock);
+-
+- switch (protocol) {
+- case CIFDS_SESSION_FLAG_SMB2:
+- ret = __init_smb2_session(sess);
+- break;
+- default:
+- ret = -EINVAL;
+- break;
+- }
+
++ ret = __init_smb2_session(sess);
+ if (ret)
+ goto error;
+
+ ida_init(&sess->tree_conn_ida);
+
+- if (protocol == CIFDS_SESSION_FLAG_SMB2) {
+- down_write(&sessions_table_lock);
+- hash_add(sessions_table, &sess->hlist, sess->id);
+- up_write(&sessions_table_lock);
+- }
++ down_write(&sessions_table_lock);
++ hash_add(sessions_table, &sess->hlist, sess->id);
++ up_write(&sessions_table_lock);
++
+ return sess;
+
+ error:
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+index 8934b8ee275ba..44a3c67b2bd92 100644
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -21,7 +21,6 @@ struct ksmbd_file_table;
+ struct channel {
+ __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ struct ksmbd_conn *conn;
+- struct list_head chann_list;
+ };
+
+ struct preauth_session {
+@@ -50,8 +49,7 @@ struct ksmbd_session {
+ char sess_key[CIFS_KEY_SIZE];
+
+ struct hlist_node hlist;
+- rwlock_t chann_lock;
+- struct list_head ksmbd_chann_list;
++ struct xarray ksmbd_chann_list;
+ struct xarray tree_conns;
+ struct ida tree_conn_ida;
+ struct list_head rpc_handle_list;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index acd66fb40c5f0..ac79d4c86067f 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -74,14 +74,7 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+
+ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+ {
+- struct channel *chann;
+-
+- list_for_each_entry(chann, &sess->ksmbd_chann_list, chann_list) {
+- if (chann->conn == conn)
+- return chann;
+- }
+-
+- return NULL;
++ return xa_load(&sess->ksmbd_chann_list, (long)conn);
+ }
+
+ /**
+@@ -592,6 +585,7 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+ struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+ struct ksmbd_user *prev_user;
+ struct channel *chann;
++ long index;
+
+ if (!prev_sess)
+ return;
+@@ -605,10 +599,8 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+ return;
+
+ prev_sess->state = SMB2_SESSION_EXPIRED;
+- write_lock(&prev_sess->chann_lock);
+- list_for_each_entry(chann, &prev_sess->ksmbd_chann_list, chann_list)
++ xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+ chann->conn->status = KSMBD_SESS_EXITING;
+- write_unlock(&prev_sess->chann_lock);
+ }
+
+ /**
+@@ -1520,19 +1512,14 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+
+ binding_session:
+ if (conn->dialect >= SMB30_PROT_ID) {
+- read_lock(&sess->chann_lock);
+ chann = lookup_chann_list(sess, conn);
+- read_unlock(&sess->chann_lock);
+ if (!chann) {
+ chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ if (!chann)
+ return -ENOMEM;
+
+ chann->conn = conn;
+- INIT_LIST_HEAD(&chann->chann_list);
+- write_lock(&sess->chann_lock);
+- list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+- write_unlock(&sess->chann_lock);
++ xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+ }
+ }
+
+@@ -1606,19 +1593,14 @@ static int krb5_authenticate(struct ksmbd_work *work)
+ }
+
+ if (conn->dialect >= SMB30_PROT_ID) {
+- read_lock(&sess->chann_lock);
+ chann = lookup_chann_list(sess, conn);
+- read_unlock(&sess->chann_lock);
+ if (!chann) {
+ chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ if (!chann)
+ return -ENOMEM;
+
+ chann->conn = conn;
+- INIT_LIST_HEAD(&chann->chann_list);
+- write_lock(&sess->chann_lock);
+- list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+- write_unlock(&sess->chann_lock);
++ xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+ }
+ }
+
+@@ -8428,14 +8410,11 @@ int smb3_check_sign_req(struct ksmbd_work *work)
+ if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+ signing_key = work->sess->smb3signingkey;
+ } else {
+- read_lock(&work->sess->chann_lock);
+ chann = lookup_chann_list(work->sess, conn);
+ if (!chann) {
+- read_unlock(&work->sess->chann_lock);
+ return 0;
+ }
+ signing_key = chann->smb3signingkey;
+- read_unlock(&work->sess->chann_lock);
+ }
+
+ if (!signing_key) {
+@@ -8495,14 +8474,11 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
+ le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+ signing_key = work->sess->smb3signingkey;
+ } else {
+- read_lock(&work->sess->chann_lock);
+ chann = lookup_chann_list(work->sess, work->conn);
+ if (!chann) {
+- read_unlock(&work->sess->chann_lock);
+ return;
+ }
+ signing_key = chann->smb3signingkey;
+- read_unlock(&work->sess->chann_lock);
+ }
+
+ if (!signing_key)
+--
+2.39.2
+
fs-ntfs3-fix-null-ptr-deref-on-inode-i_op-in-ntfs_lookup.patch
fs-ntfs3-refactoring-of-various-minor-issues.patch
drm-msm-adreno-adreno_gpu-use-suspend-instead-of-idle-on-load-error.patch
+f2fs-specify-extent-cache-for-read-explicitly.patch
+f2fs-move-internal-functions-into-extent_cache.c.patch
+f2fs-remove-unnecessary-__init_extent_tree.patch
+f2fs-refactor-extent_cache-to-support-for-read-and-m.patch
+f2fs-allocate-the-extent_cache-by-default.patch
+f2fs-factor-out-victim_entry-usage-from-general-rb_t.patch
+drm-msm-adreno-simplify-read64-write64-helpers.patch
+drm-msm-hangcheck-progress-detection.patch
+drm-msm-fix-missing-wq-allocation-error-handling.patch
+irqchip-loongarch-adjust-acpi_cascade_irqdomain_init.patch
+irqchip-loongson-eiointc-fix-incorrect-use-of-acpi_g.patch
+irqchip-loongson-eiointc-fix-registration-of-syscore.patch
+wifi-rtw88-rtw8821c-fix-rfe_option-field-width.patch
+drm-i915-mtl-update-scaler-source-and-destination-li.patch
+drm-i915-check-pipe-source-size-when-using-skl-scale.patch
+drm-amd-display-refactor-edp-psr-codes.patch
+drm-amd-display-add-z8-allow-states-to-z-state-suppo.patch
+drm-amd-display-add-debug-option-to-skip-psr-crtc-di.patch
+drm-amd-display-fix-z8-support-configurations.patch
+drm-amd-display-add-minimum-z8-residency-debug-optio.patch
+drm-amd-display-update-minimum-stutter-residency-for.patch
+drm-amd-display-lowering-min-z8-residency-time.patch
+asoc-rt1318-add-rt1318-sdca-vendor-specific-driver.patch
+asoc-codecs-constify-static-sdw_slave_ops-struct.patch
+asoc-codecs-wcd938x-fix-accessing-regmap-on-unattach.patch
+drm-amd-display-update-z8-watermarks-for-dcn314.patch
+drm-amd-display-update-z8-sr-exit-enter-latencies.patch
+drm-amd-display-change-default-z8-watermark-values.patch
+ksmbd-implements-sess-ksmbd_chann_list-as-xarray.patch
+ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch
+ksmbd-destroy-expired-sessions.patch
+ksmbd-block-asynchronous-requests-when-making-a-dela.patch
+ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with.patch
+drm-add-missing-dp-dsc-extended-capability-definitio.patch
+drm-dsc-fix-drm_edp_dsc_sink_output_bpp-dpcd-high-by.patch
--- /dev/null
+From 053c94114ec276f1bedf33fcd527c37e6ca0b644 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Apr 2023 16:03:56 +0200
+Subject: wifi: rtw88: rtw8821c: Fix rfe_option field width
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit 14705f969d98187a1cc2682e0c9bd2e230b8098f ]
+
+On my RTW8821CU chipset rfe_option reads as 0x22. Looking at the
+vendor driver suggests that the field width of rfe_option is 5 bit,
+so rfe_option should be masked with 0x1f.
+
+Without this the rfe_option comparisons with 2 further down the
+driver evaluate as false when they should really evaluate as true.
+The effect is that 2G channels do not work.
+
+rfe_option is also used as an array index into rtw8821c_rfe_defs[].
+rtw8821c_rfe_defs[34] (0x22) was added as part of adding USB support,
+likely because rfe_option reads as 0x22. As this now becomes 0x2,
+rtw8821c_rfe_defs[34] is no longer used and can be removed.
+
+Note that this might not be the whole truth. In the vendor driver
+there are indeed places where the unmasked rfe_option value is used.
+However, the driver has several places where rfe_option is tested
+with the pattern if (rfe_option == 2 || rfe_option == 0x22) or
+if (rfe_option == 4 || rfe_option == 0x24), so that rfe_option BIT(5)
+has no influence on the code path taken. We therefore mask BIT(5)
+out from rfe_option entirely until this assumption is proved wrong
+by some chip variant we do not know yet.
+
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Tested-by: Alexandru gagniuc <mr.nuke.me@gmail.com>
+Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
+Tested-by: ValdikSS <iam@valdikss.org.ru>
+Cc: stable@vger.kernel.org
+Reviewed-by: Ping-Ke Shih <pkshih@realtek.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20230417140358.2240429-3-s.hauer@pengutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/realtek/rtw88/rtw8821c.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index 9afdc5ce86b43..609a2b86330d8 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -41,7 +41,7 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+
+ map = (struct rtw8821c_efuse *)log_map;
+
+- efuse->rfe_option = map->rfe_option;
++ efuse->rfe_option = map->rfe_option & 0x1f;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+--
+2.39.2
+