--- /dev/null
+From c2c3f047d87c4b2b84c30275ecb7807f5aee1f40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 May 2021 11:36:58 -0500
+Subject: ASoC: Intel: boards: create sof-maxim-common module
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit 9c5046e4b3e736eec5b9a8f1d59c07bb0ed78a7a ]
+
+sof_maxim_common.o is linked twice, move to a dedicated module.
+
+Also clean-up interfaces to use a consistent 'max_98373' prefix for
+all symbols.
+
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Reviewed-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+Link: https://lore.kernel.org/r/20210505163705.305616-7-pierre-louis.bossart@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/Kconfig | 5 +++++
+ sound/soc/intel/boards/Makefile | 6 ++++--
+ sound/soc/intel/boards/sof_maxim_common.c | 24 ++++++++++++++++-------
+ sound/soc/intel/boards/sof_maxim_common.h | 6 +++---
+ sound/soc/intel/boards/sof_rt5682.c | 5 +++--
+ sound/soc/intel/boards/sof_sdw.c | 1 +
+ sound/soc/intel/boards/sof_sdw_max98373.c | 4 ++--
+ 7 files changed, 35 insertions(+), 16 deletions(-)
+
+diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
+index ec4d754eb348..ceeb618bd950 100644
+--- a/sound/soc/intel/boards/Kconfig
++++ b/sound/soc/intel/boards/Kconfig
+@@ -29,6 +29,9 @@ config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES
+ config SND_SOC_INTEL_HDA_DSP_COMMON
+ tristate
+
++config SND_SOC_INTEL_SOF_MAXIM_COMMON
++ tristate
++
+ if SND_SOC_INTEL_CATPT
+
+ config SND_SOC_INTEL_HASWELL_MACH
+@@ -469,6 +472,7 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_HDA_DSP_COMMON
++ select SND_SOC_INTEL_SOF_MAXIM_COMMON
+ help
+ This adds support for ASoC machine driver for SOF platforms
+ with rt5682 codec.
+@@ -579,6 +583,7 @@ config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
+ select SND_SOC_RT5682_SDW
+ select SND_SOC_DMIC
+ select SND_SOC_INTEL_HDA_DSP_COMMON
++ select SND_SOC_INTEL_SOF_MAXIM_COMMON
+ help
+ Add support for Intel SoundWire-based platforms connected to
+ MAX98373, RT700, RT711, RT1308 and RT715
+diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
+index a48ee9b74e73..855296e8dfb8 100644
+--- a/sound/soc/intel/boards/Makefile
++++ b/sound/soc/intel/boards/Makefile
+@@ -19,7 +19,7 @@ snd-soc-sst-byt-cht-cx2072x-objs := bytcht_cx2072x.o
+ snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
+ snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
+ snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
+-snd-soc-sof_rt5682-objs := sof_rt5682.o sof_maxim_common.o sof_realtek_common.o
++snd-soc-sof_rt5682-objs := sof_rt5682.o sof_realtek_common.o
+ snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o
+ snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
+ snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
+@@ -38,7 +38,6 @@ snd-soc-sof-sdw-objs += sof_sdw.o \
+ sof_sdw_rt5682.o sof_sdw_rt700.o \
+ sof_sdw_rt711.o sof_sdw_rt711_sdca.o \
+ sof_sdw_rt715.o sof_sdw_rt715_sdca.o \
+- sof_maxim_common.o \
+ sof_sdw_dmic.o sof_sdw_hdmi.o
+ obj-$(CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH) += snd-soc-sof_rt5682.o
+ obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
+@@ -78,3 +77,6 @@ obj-$(CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH) += snd-soc-sof-sdw.o
+ # common modules
+ snd-soc-intel-hda-dsp-common-objs := hda_dsp_common.o
+ obj-$(CONFIG_SND_SOC_INTEL_HDA_DSP_COMMON) += snd-soc-intel-hda-dsp-common.o
++
++snd-soc-intel-sof-maxim-common-objs += sof_maxim_common.o
++obj-$(CONFIG_SND_SOC_INTEL_SOF_MAXIM_COMMON) += snd-soc-intel-sof-maxim-common.o
+diff --git a/sound/soc/intel/boards/sof_maxim_common.c b/sound/soc/intel/boards/sof_maxim_common.c
+index 437d20562753..7c4af6ec58e8 100644
+--- a/sound/soc/intel/boards/sof_maxim_common.c
++++ b/sound/soc/intel/boards/sof_maxim_common.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ //
+ // Copyright(c) 2020 Intel Corporation. All rights reserved.
++#include <linux/module.h>
+ #include <linux/string.h>
+ #include <sound/pcm.h>
+ #include <sound/soc.h>
+@@ -16,6 +17,7 @@ const struct snd_soc_dapm_route max_98373_dapm_routes[] = {
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+ };
++EXPORT_SYMBOL_NS(max_98373_dapm_routes, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+ static struct snd_soc_codec_conf max_98373_codec_conf[] = {
+ {
+@@ -38,9 +40,10 @@ struct snd_soc_dai_link_component max_98373_components[] = {
+ .dai_name = MAX_98373_CODEC_DAI,
+ },
+ };
++EXPORT_SYMBOL_NS(max_98373_components, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+-static int max98373_hw_params(struct snd_pcm_substream *substream,
+- struct snd_pcm_hw_params *params)
++static int max_98373_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
+ {
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+@@ -59,7 +62,7 @@ static int max98373_hw_params(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
+-int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
++int max_98373_trigger(struct snd_pcm_substream *substream, int cmd)
+ {
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+@@ -102,13 +105,15 @@ int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
+
+ return ret;
+ }
++EXPORT_SYMBOL_NS(max_98373_trigger, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+ struct snd_soc_ops max_98373_ops = {
+- .hw_params = max98373_hw_params,
+- .trigger = max98373_trigger,
++ .hw_params = max_98373_hw_params,
++ .trigger = max_98373_trigger,
+ };
++EXPORT_SYMBOL_NS(max_98373_ops, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+-int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
++int max_98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+@@ -119,9 +124,14 @@ int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
+ dev_err(rtd->dev, "Speaker map addition failed: %d\n", ret);
+ return ret;
+ }
++EXPORT_SYMBOL_NS(max_98373_spk_codec_init, SND_SOC_INTEL_SOF_MAXIM_COMMON);
+
+-void sof_max98373_codec_conf(struct snd_soc_card *card)
++void max_98373_set_codec_conf(struct snd_soc_card *card)
+ {
+ card->codec_conf = max_98373_codec_conf;
+ card->num_configs = ARRAY_SIZE(max_98373_codec_conf);
+ }
++EXPORT_SYMBOL_NS(max_98373_set_codec_conf, SND_SOC_INTEL_SOF_MAXIM_COMMON);
++
++MODULE_DESCRIPTION("ASoC Intel SOF Maxim helpers");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/intel/boards/sof_maxim_common.h b/sound/soc/intel/boards/sof_maxim_common.h
+index 5240b1c9d379..566a664d5a63 100644
+--- a/sound/soc/intel/boards/sof_maxim_common.h
++++ b/sound/soc/intel/boards/sof_maxim_common.h
+@@ -20,8 +20,8 @@ extern struct snd_soc_dai_link_component max_98373_components[2];
+ extern struct snd_soc_ops max_98373_ops;
+ extern const struct snd_soc_dapm_route max_98373_dapm_routes[];
+
+-int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd);
+-void sof_max98373_codec_conf(struct snd_soc_card *card);
+-int max98373_trigger(struct snd_pcm_substream *substream, int cmd);
++int max_98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd);
++void max_98373_set_codec_conf(struct snd_soc_card *card);
++int max_98373_trigger(struct snd_pcm_substream *substream, int cmd);
+
+ #endif /* __SOF_MAXIM_COMMON_H */
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 52401cc0de92..78262c659983 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -742,7 +742,7 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ SOF_MAX98373_SPEAKER_AMP_PRESENT) {
+ links[id].codecs = max_98373_components;
+ links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+- links[id].init = max98373_spk_codec_init;
++ links[id].init = max_98373_spk_codec_init;
+ links[id].ops = &max_98373_ops;
+ /* feedback stream */
+ links[id].dpcm_capture = 1;
+@@ -863,7 +863,7 @@ static int sof_audio_probe(struct platform_device *pdev)
+ sof_audio_card_rt5682.num_links++;
+
+ if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT)
+- sof_max98373_codec_conf(&sof_audio_card_rt5682);
++ max_98373_set_codec_conf(&sof_audio_card_rt5682);
+ else if (sof_rt5682_quirk & SOF_RT1011_SPEAKER_AMP_PRESENT)
+ sof_rt1011_codec_conf(&sof_audio_card_rt5682);
+ else if (sof_rt5682_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT)
+@@ -995,3 +995,4 @@ MODULE_ALIAS("platform:cml_rt1015_rt5682");
+ MODULE_ALIAS("platform:tgl_rt1011_rt5682");
+ MODULE_ALIAS("platform:jsl_rt5682_rt1015p");
+ MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
++MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_MAXIM_COMMON);
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 5e4d26e9bb7d..3ca7e1ab48ab 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -1319,3 +1319,4 @@ MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:sof_sdw");
+ MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
++MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_MAXIM_COMMON);
+diff --git a/sound/soc/intel/boards/sof_sdw_max98373.c b/sound/soc/intel/boards/sof_sdw_max98373.c
+index cfdf970c5800..0e7ed906b341 100644
+--- a/sound/soc/intel/boards/sof_sdw_max98373.c
++++ b/sound/soc/intel/boards/sof_sdw_max98373.c
+@@ -64,7 +64,7 @@ static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /* enable max98373 first */
+- ret = max98373_trigger(substream, cmd);
++ ret = max_98373_trigger(substream, cmd);
+ if (ret < 0)
+ break;
+
+@@ -77,7 +77,7 @@ static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
+ if (ret < 0)
+ break;
+
+- ret = max98373_trigger(substream, cmd);
++ ret = max_98373_trigger(substream, cmd);
+ break;
+ default:
+ ret = -EINVAL;
+--
+2.30.2
+
--- /dev/null
+From 0db3d307cc7a5cd91d70ee5efeb69c5f97e5da1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jun 2021 15:50:39 -0500
+Subject: ASoC: Intel: boards: fix xrun issue on platform with max98373
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rander Wang <rander.wang@intel.com>
+
+[ Upstream commit 33c8516841ea4fa12fdb8961711bf95095c607ee ]
+
+On TGL platform with max98373 codec the trigger start sequence is
+fe first, then codec component and sdw link is the last. Recently
+a delay was introduced in max98373 codec driver and this resulted
+to the start of sdw stream transmission was delayed and the data
+transmitted by fw can't be consumed by sdw controller, so xrun happened.
+
+Adding delay in trigger function is a bad idea. This patch enable spk
+pin in prepare function and disable it in hw_free to avoid xrun issue
+caused by delay in trigger.
+
+Fixes: 3a27875e91fb ("ASoC: max98373: Added 30ms turn on/off time delay")
+BugLink: https://github.com/thesofproject/sof/issues/4066
+Reviewed-by: Bard Liao <bard.liao@intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Signed-off-by: Rander Wang <rander.wang@intel.com>
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Link: https://lore.kernel.org/r/20210625205042.65181-2-pierre-louis.bossart@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/sof_sdw_max98373.c | 81 +++++++++++++++--------
+ 1 file changed, 53 insertions(+), 28 deletions(-)
+
+diff --git a/sound/soc/intel/boards/sof_sdw_max98373.c b/sound/soc/intel/boards/sof_sdw_max98373.c
+index 0e7ed906b341..25daef910aee 100644
+--- a/sound/soc/intel/boards/sof_sdw_max98373.c
++++ b/sound/soc/intel/boards/sof_sdw_max98373.c
+@@ -55,43 +55,68 @@ static int spk_init(struct snd_soc_pcm_runtime *rtd)
+ return ret;
+ }
+
+-static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
++static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enable)
+ {
++ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_dai *codec_dai;
++ struct snd_soc_dai *cpu_dai;
+ int ret;
++ int j;
+
+- switch (cmd) {
+- case SNDRV_PCM_TRIGGER_START:
+- case SNDRV_PCM_TRIGGER_RESUME:
+- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+- /* enable max98373 first */
+- ret = max_98373_trigger(substream, cmd);
+- if (ret < 0)
+- break;
+-
+- ret = sdw_trigger(substream, cmd);
+- break;
+- case SNDRV_PCM_TRIGGER_STOP:
+- case SNDRV_PCM_TRIGGER_SUSPEND:
+- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+- ret = sdw_trigger(substream, cmd);
+- if (ret < 0)
+- break;
+-
+- ret = max_98373_trigger(substream, cmd);
+- break;
+- default:
+- ret = -EINVAL;
+- break;
++ /* set spk pin by playback only */
++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
++ return 0;
++
++ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ for_each_rtd_codec_dais(rtd, j, codec_dai) {
++ struct snd_soc_dapm_context *dapm =
++ snd_soc_component_get_dapm(cpu_dai->component);
++ char pin_name[16];
++
++ snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk",
++ codec_dai->component->name_prefix);
++
++ if (enable)
++ ret = snd_soc_dapm_enable_pin(dapm, pin_name);
++ else
++ ret = snd_soc_dapm_disable_pin(dapm, pin_name);
++
++ if (!ret)
++ snd_soc_dapm_sync(dapm);
+ }
+
+- return ret;
++ return 0;
++}
++
++static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
++{
++ int ret = 0;
++
++ /* according to soc_pcm_prepare dai link prepare is called first */
++ ret = sdw_prepare(substream);
++ if (ret < 0)
++ return ret;
++
++ return mx8373_enable_spk_pin(substream, true);
++}
++
++static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream)
++{
++ int ret = 0;
++
++ /* according to soc_pcm_hw_free dai link free is called first */
++ ret = sdw_hw_free(substream);
++ if (ret < 0)
++ return ret;
++
++ return mx8373_enable_spk_pin(substream, false);
+ }
+
+ static const struct snd_soc_ops max_98373_sdw_ops = {
+ .startup = sdw_startup,
+- .prepare = sdw_prepare,
+- .trigger = max98373_sdw_trigger,
+- .hw_free = sdw_hw_free,
++ .prepare = mx8373_sdw_prepare,
++ .trigger = sdw_trigger,
++ .hw_free = mx8373_sdw_hw_free,
+ .shutdown = sdw_shutdown,
+ };
+
+--
+2.30.2
+
--- /dev/null
+From 8f1c2b0aa2572f2b04b897b8a7a25f8b25c646a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 May 2021 11:36:57 -0500
+Subject: ASoC: Intel: boards: handle hda-dsp-common as a module
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+hda-dsp-common.o is linked multiple times due to copy/paste and
+inertia. Move to a dedicated module with a namespace.
+
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Reviewed-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+Link: https://lore.kernel.org/r/20210505163705.305616-6-pierre-louis.bossart@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+ sound/soc/intel/boards/Kconfig | 13 ++++++++++
+ sound/soc/intel/boards/Makefile | 24 +++++++++++--------
+ sound/soc/intel/boards/bxt_da7219_max98357a.c | 1 +
+ sound/soc/intel/boards/bxt_rt298.c | 1 +
+ sound/soc/intel/boards/cml_rt1011_rt5682.c | 1 +
+ sound/soc/intel/boards/ehl_rt5660.c | 1 +
+ sound/soc/intel/boards/glk_rt5682_max98357a.c | 1 +
+ sound/soc/intel/boards/hda_dsp_common.c | 5 ++++
+ sound/soc/intel/boards/skl_hda_dsp_generic.c | 1 +
+ sound/soc/intel/boards/sof_da7219_max98373.c | 1 +
+ sound/soc/intel/boards/sof_pcm512x.c | 1 +
+ sound/soc/intel/boards/sof_rt5682.c | 1 +
+ sound/soc/intel/boards/sof_sdw.c | 1 +
+ 13 files changed, 42 insertions(+), 10 deletions(-)
+
+diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
+index 58379393b8e4..ec4d754eb348 100644
+--- a/sound/soc/intel/boards/Kconfig
++++ b/sound/soc/intel/boards/Kconfig
+@@ -26,6 +26,9 @@ config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES
+ interface.
+ If unsure select N.
+
++config SND_SOC_INTEL_HDA_DSP_COMMON
++ tristate
++
+ if SND_SOC_INTEL_CATPT
+
+ config SND_SOC_INTEL_HASWELL_MACH
+@@ -278,6 +281,7 @@ config SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
+ select SND_SOC_MAX98390
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+
+ config SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ tristate
+@@ -304,6 +308,7 @@ config SND_SOC_INTEL_BXT_RT298_MACH
+ select SND_SOC_RT298
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ help
+ This adds support for ASoC machine driver for Broxton platforms
+ with RT286 I2S audio codec.
+@@ -422,6 +427,7 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ help
+ This adds support for ASoC machine driver for Geminilake platforms
+ with RT5682 + MAX98357A I2S audio codec.
+@@ -437,6 +443,7 @@ config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
+ depends on SND_HDA_CODEC_HDMI
+ depends on GPIOLIB
+ select SND_SOC_HDAC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ select SND_SOC_DMIC
+ # SND_SOC_HDAC_HDA is already selected
+ help
+@@ -461,6 +468,7 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
+ select SND_SOC_RT5682_I2C
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ help
+ This adds support for ASoC machine driver for SOF platforms
+ with rt5682 codec.
+@@ -473,6 +481,7 @@ config SND_SOC_INTEL_SOF_PCM512x_MACH
+ depends on (SND_SOC_SOF_HDA_AUDIO_CODEC && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
+ (SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
+ depends on SND_HDA_CODEC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ select SND_SOC_PCM512x_I2C
+ help
+ This adds support for ASoC machine driver for SOF platforms
+@@ -504,6 +513,7 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
+ select SND_SOC_RT5682_I2C
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ help
+ This adds support for ASoC machine driver for SOF platform with
+ RT1011 + RT5682 I2S codec.
+@@ -519,6 +529,7 @@ config SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH
+ depends on I2C && ACPI && GPIOLIB
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98373_I2C
+ select SND_SOC_DMIC
+@@ -539,6 +550,7 @@ config SND_SOC_INTEL_EHL_RT5660_MACH
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
+ select SND_SOC_RT5660
+ select SND_SOC_DMIC
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ help
+ This adds support for ASoC machine driver for Elkhart Lake
+ platform with RT5660 I2S audio codec.
+@@ -566,6 +578,7 @@ config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
+ select SND_SOC_RT715_SDCA_SDW
+ select SND_SOC_RT5682_SDW
+ select SND_SOC_DMIC
++ select SND_SOC_INTEL_HDA_DSP_COMMON
+ help
+ Add support for Intel SoundWire-based platforms connected to
+ MAX98373, RT700, RT711, RT1308 and RT715
+diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
+index 616c5fbab7d5..a48ee9b74e73 100644
+--- a/sound/soc/intel/boards/Makefile
++++ b/sound/soc/intel/boards/Makefile
+@@ -3,11 +3,11 @@ snd-soc-sst-haswell-objs := haswell.o
+ snd-soc-sst-bdw-rt5650-mach-objs := bdw-rt5650.o
+ snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
+ snd-soc-sst-broadwell-objs := broadwell.o
+-snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o hda_dsp_common.o
+-snd-soc-sst-bxt-rt298-objs := bxt_rt298.o hda_dsp_common.o
+-snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o hda_dsp_common.o
++snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
++snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
++snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o
+ snd-soc-sst-sof-wm8804-objs := sof_wm8804.o
+-snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o hda_dsp_common.o
++snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o
+ snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
+ snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
+ snd-soc-sst-bytcr-wm5102-objs := bytcr_wm5102.o
+@@ -19,19 +19,19 @@ snd-soc-sst-byt-cht-cx2072x-objs := bytcht_cx2072x.o
+ snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
+ snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
+ snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
+-snd-soc-sof_rt5682-objs := sof_rt5682.o hda_dsp_common.o sof_maxim_common.o sof_realtek_common.o
+-snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o hda_dsp_common.o
++snd-soc-sof_rt5682-objs := sof_rt5682.o sof_maxim_common.o sof_realtek_common.o
++snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o
+ snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
+ snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
+ snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
+ snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
+ snd-soc-kbl_rt5660-objs := kbl_rt5660.o
+ snd-soc-skl_rt286-objs := skl_rt286.o
+-snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o hda_dsp_common.o
++snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
+ snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
+ snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
+-snd-soc-sof_da7219_max98373-objs := sof_da7219_max98373.o hda_dsp_common.o
+-snd-soc-ehl-rt5660-objs := ehl_rt5660.o hda_dsp_common.o
++snd-soc-sof_da7219_max98373-objs := sof_da7219_max98373.o
++snd-soc-ehl-rt5660-objs := ehl_rt5660.o
+ snd-soc-sof-sdw-objs += sof_sdw.o \
+ sof_sdw_max98373.o \
+ sof_sdw_rt1308.o sof_sdw_rt1316.o \
+@@ -39,7 +39,7 @@ snd-soc-sof-sdw-objs += sof_sdw.o \
+ sof_sdw_rt711.o sof_sdw_rt711_sdca.o \
+ sof_sdw_rt715.o sof_sdw_rt715_sdca.o \
+ sof_maxim_common.o \
+- sof_sdw_dmic.o sof_sdw_hdmi.o hda_dsp_common.o
++ sof_sdw_dmic.o sof_sdw_hdmi.o
+ obj-$(CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH) += snd-soc-sof_rt5682.o
+ obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
+ obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON) += snd-soc-sst-bxt-da7219_max98357a.o
+@@ -74,3 +74,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH) += snd-soc-skl_hda_dsp.o
+ obj-$(CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH) += snd-soc-sof_da7219_max98373.o
+ obj-$(CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH) += snd-soc-ehl-rt5660.o
+ obj-$(CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH) += snd-soc-sof-sdw.o
++
++# common modules
++snd-soc-intel-hda-dsp-common-objs := hda_dsp_common.o
++obj-$(CONFIG_SND_SOC_INTEL_HDA_DSP_COMMON) += snd-soc-intel-hda-dsp-common.o
+diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+index 9ffef396f8f2..07ae950b0127 100644
+--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
++++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+@@ -869,3 +869,4 @@ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:bxt_da7219_max98357a");
+ MODULE_ALIAS("platform:glk_da7219_max98357a");
+ MODULE_ALIAS("platform:cml_da7219_max98357a");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
+index 0f3157dfa838..32a776fa0b86 100644
+--- a/sound/soc/intel/boards/bxt_rt298.c
++++ b/sound/soc/intel/boards/bxt_rt298.c
+@@ -667,3 +667,4 @@ MODULE_DESCRIPTION("Intel SST Audio for Broxton");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:bxt_alc298s_i2s");
+ MODULE_ALIAS("platform:glk_alc298s_i2s");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
+index 14813beb33d1..27615acddacd 100644
+--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
++++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
+@@ -594,3 +594,4 @@ MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
+ MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:cml_rt1011_rt5682");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/ehl_rt5660.c b/sound/soc/intel/boards/ehl_rt5660.c
+index 7c0d4e915406..b9b72d05b335 100644
+--- a/sound/soc/intel/boards/ehl_rt5660.c
++++ b/sound/soc/intel/boards/ehl_rt5660.c
+@@ -321,3 +321,4 @@ MODULE_DESCRIPTION("ASoC Intel(R) Elkhartlake + rt5660 Machine driver");
+ MODULE_AUTHOR("libin.yang@intel.com");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:ehl_rt5660");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+index 62cca511522e..19e2ff90886a 100644
+--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
++++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+@@ -642,3 +642,4 @@ MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
+ MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:glk_rt5682_max98357a");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/hda_dsp_common.c b/sound/soc/intel/boards/hda_dsp_common.c
+index 91ad2a0ad1ce..efdc4bc4bb1f 100644
+--- a/sound/soc/intel/boards/hda_dsp_common.c
++++ b/sound/soc/intel/boards/hda_dsp_common.c
+@@ -2,6 +2,7 @@
+ //
+ // Copyright(c) 2019 Intel Corporation. All rights reserved.
+
++#include <linux/module.h>
+ #include <sound/pcm.h>
+ #include <sound/soc.h>
+ #include <sound/hda_codec.h>
+@@ -82,5 +83,9 @@ int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+
+ return err;
+ }
++EXPORT_SYMBOL_NS(hda_dsp_hdmi_build_controls, SND_SOC_INTEL_HDA_DSP_COMMON);
+
+ #endif
++
++MODULE_DESCRIPTION("ASoC Intel HDMI helpers");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index bc50eda297ab..f4b4eeca3e03 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -258,3 +258,4 @@ MODULE_DESCRIPTION("SKL/KBL/BXT/APL HDA Generic Machine driver");
+ MODULE_AUTHOR("Rakesh Ughreja <rakesh.a.ughreja@intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:skl_hda_dsp_generic");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c
+index 8d1ad892e86b..2116d70d1ea8 100644
+--- a/sound/soc/intel/boards/sof_da7219_max98373.c
++++ b/sound/soc/intel/boards/sof_da7219_max98373.c
+@@ -458,3 +458,4 @@ MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:sof_da7219_max98360a");
+ MODULE_ALIAS("platform:sof_da7219_max98373");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
+index d2b0456236c7..8620d4f38493 100644
+--- a/sound/soc/intel/boards/sof_pcm512x.c
++++ b/sound/soc/intel/boards/sof_pcm512x.c
+@@ -437,3 +437,4 @@ MODULE_DESCRIPTION("ASoC Intel(R) SOF + PCM512x Machine driver");
+ MODULE_AUTHOR("Pierre-Louis Bossart");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:sof_pcm512x");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index cf1d053733e2..52401cc0de92 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -994,3 +994,4 @@ MODULE_ALIAS("platform:jsl_rt5682_max98360a");
+ MODULE_ALIAS("platform:cml_rt1015_rt5682");
+ MODULE_ALIAS("platform:tgl_rt1011_rt5682");
+ MODULE_ALIAS("platform:jsl_rt5682_rt1015p");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 5827a16773c9..5e4d26e9bb7d 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -1318,3 +1318,4 @@ MODULE_AUTHOR("Rander Wang <rander.wang@linux.intel.com>");
+ MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:sof_sdw");
++MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+--
+2.30.2
+
--- /dev/null
+From cabc03890071af9145b6dc87c19076fd1b17dc01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Jul 2021 09:04:59 -0700
+Subject: bpf, sockmap: On cleanup we additionally need to remove cached skb
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+[ Upstream commit 476d98018f32e68e7c5d4e8456940cf2b6d66f10 ]
+
+Its possible if a socket is closed and the receive thread is under memory
+pressure it may have cached a skb. We need to ensure these skbs are
+free'd along with the normal ingress_skb queue.
+
+Before 799aa7f98d53 ("skmsg: Avoid lock_sock() in sk_psock_backlog()") tear
+down and backlog processing both had sock_lock for the common case of
+socket close or unhash. So it was not possible to have both running in
+parrallel so all we would need is the kfree in those kernels.
+
+But, latest kernels include the commit 799aa7f98d5e and this requires a
+bit more work. Without the ingress_lock guarding reading/writing the
+state->skb case its possible the tear down could run before the state
+update causing it to leak memory or worse when the backlog reads the state
+it could potentially run interleaved with the tear down and we might end up
+free'ing the state->skb from tear down side but already have the reference
+from backlog side. To resolve such races we wrap accesses in ingress_lock
+on both sides serializing tear down and backlog case. In both cases this
+only happens after an EAGAIN error case so having an extra lock in place
+is likely fine. The normal path will skip the locks.
+
+Note, we check state->skb before grabbing lock. This works because
+we can only enqueue with the mutex we hold already. Avoiding a race
+on adding state->skb after the check. And if tear down path is running
+that is also fine if the tear down path then removes state->skb we
+will simply set skb=NULL and the subsequent goto is skipped. This
+slight complication avoids locking in normal case.
+
+With this fix we no longer see this warning splat from tcp side on
+socket close when we hit the above case with redirect to ingress self.
+
+[224913.935822] WARNING: CPU: 3 PID: 32100 at net/core/stream.c:208 sk_stream_kill_queues+0x212/0x220
+[224913.935841] Modules linked in: fuse overlay bpf_preload x86_pkg_temp_thermal intel_uncore wmi_bmof squashfs sch_fq_codel efivarfs ip_tables x_tables uas xhci_pci ixgbe mdio xfrm_algo xhci_hcd wmi
+[224913.935897] CPU: 3 PID: 32100 Comm: fgs-bench Tainted: G I 5.14.0-rc1alu+ #181
+[224913.935908] Hardware name: Dell Inc. Precision 5820 Tower/002KVM, BIOS 1.9.2 01/24/2019
+[224913.935914] RIP: 0010:sk_stream_kill_queues+0x212/0x220
+[224913.935923] Code: 8b 83 20 02 00 00 85 c0 75 20 5b 5d 41 5c 41 5d 41 5e 41 5f c3 48 89 df e8 2b 11 fe ff eb c3 0f 0b e9 7c ff ff ff 0f 0b eb ce <0f> 0b 5b 5d 41 5c 41 5d 41 5e 41 5f c3 90 0f 1f 44 00 00 41 57 41
+[224913.935932] RSP: 0018:ffff88816271fd38 EFLAGS: 00010206
+[224913.935941] RAX: 0000000000000ae8 RBX: ffff88815acd5240 RCX: dffffc0000000000
+[224913.935948] RDX: 0000000000000003 RSI: 0000000000000ae8 RDI: ffff88815acd5460
+[224913.935954] RBP: ffff88815acd5460 R08: ffffffff955c0ae8 R09: fffffbfff2e6f543
+[224913.935961] R10: ffffffff9737aa17 R11: fffffbfff2e6f542 R12: ffff88815acd5390
+[224913.935967] R13: ffff88815acd5480 R14: ffffffff98d0c080 R15: ffffffff96267500
+[224913.935974] FS: 00007f86e6bd1700(0000) GS:ffff888451cc0000(0000) knlGS:0000000000000000
+[224913.935981] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[224913.935988] CR2: 000000c0008eb000 CR3: 00000001020e0005 CR4: 00000000003706e0
+[224913.935994] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[224913.936000] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[224913.936007] Call Trace:
+[224913.936016] inet_csk_destroy_sock+0xba/0x1f0
+[224913.936033] __tcp_close+0x620/0x790
+[224913.936047] tcp_close+0x20/0x80
+[224913.936056] inet_release+0x8f/0xf0
+[224913.936070] __sock_release+0x72/0x120
+[224913.936083] sock_close+0x14/0x20
+
+Fixes: a136678c0bdbb ("bpf: sk_msg, zap ingress queue on psock down")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20210727160500.1713554-3-john.fastabend@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 35 +++++++++++++++++++++++++++++------
+ 1 file changed, 29 insertions(+), 6 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index b088fe07fc00..7e7205e93258 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -613,23 +613,42 @@ static void sock_drop(struct sock *sk, struct sk_buff *skb)
+ kfree_skb(skb);
+ }
+
++static void sk_psock_skb_state(struct sk_psock *psock,
++ struct sk_psock_work_state *state,
++ struct sk_buff *skb,
++ int len, int off)
++{
++ spin_lock_bh(&psock->ingress_lock);
++ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
++ state->skb = skb;
++ state->len = len;
++ state->off = off;
++ } else {
++ sock_drop(psock->sk, skb);
++ }
++ spin_unlock_bh(&psock->ingress_lock);
++}
++
+ static void sk_psock_backlog(struct work_struct *work)
+ {
+ struct sk_psock *psock = container_of(work, struct sk_psock, work);
+ struct sk_psock_work_state *state = &psock->work_state;
+- struct sk_buff *skb;
++ struct sk_buff *skb = NULL;
+ bool ingress;
+ u32 len, off;
+ int ret;
+
+ mutex_lock(&psock->work_mutex);
+- if (state->skb) {
++ if (unlikely(state->skb)) {
++ spin_lock_bh(&psock->ingress_lock);
+ skb = state->skb;
+ len = state->len;
+ off = state->off;
+ state->skb = NULL;
+- goto start;
++ spin_unlock_bh(&psock->ingress_lock);
+ }
++ if (skb)
++ goto start;
+
+ while ((skb = skb_dequeue(&psock->ingress_skb))) {
+ len = skb->len;
+@@ -644,9 +663,8 @@ static void sk_psock_backlog(struct work_struct *work)
+ len, ingress);
+ if (ret <= 0) {
+ if (ret == -EAGAIN) {
+- state->skb = skb;
+- state->len = len;
+- state->off = off;
++ sk_psock_skb_state(psock, state, skb,
++ len, off);
+ goto end;
+ }
+ /* Hard errors break pipe and stop xmit. */
+@@ -745,6 +763,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
+ skb_bpf_redirect_clear(skb);
+ sock_drop(psock->sk, skb);
+ }
++ kfree_skb(psock->work_state.skb);
++ /* We null the skb here to ensure that calls to sk_psock_backlog
++ * do not pick up the free'd skb.
++ */
++ psock->work_state.skb = NULL;
+ __sk_psock_purge_ingress_msg(psock);
+ }
+
+--
+2.30.2
+
--- /dev/null
+From f377dfe0b6d6c9d0796c25e67384db825766011e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jul 2021 16:38:29 +1000
+Subject: cifs: add missing parsing of backupuid
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+[ Upstream commit b946dbcfa4df80ec81b442964e07ad37000cc059 ]
+
+We lost parsing of backupuid in the switch to new mount API.
+Add it back.
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Cc: <stable@vger.kernel.org> # v5.11+
+Reported-by: Xiaoli Feng <xifeng@redhat.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/fs_context.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 553adfbcc22a..72742eb1df4a 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -918,6 +918,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ ctx->cred_uid = uid;
+ ctx->cruid_specified = true;
+ break;
++ case Opt_backupuid:
++ uid = make_kuid(current_user_ns(), result.uint_32);
++ if (!uid_valid(uid))
++ goto cifs_parse_mount_err;
++ ctx->backupuid = uid;
++ ctx->backupuid_specified = true;
++ break;
+ case Opt_backupgid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+--
+2.30.2
+
--- /dev/null
+From 32b1d07c42a983b08137497859a8f6e64c8a5042 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jul 2021 09:24:16 +1000
+Subject: cifs: use helpers when parsing uid/gid mount options and validate
+ them
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+[ Upstream commit e0a3cbcd5cef00cace01546cc6eaaa3b31940da9 ]
+
+Use the nice helpers to initialize and the uid/gid/cred_uid when passed as mount arguments.
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Acked-by: Pavel Shilovsky <pshilovsky@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/fs_context.c | 24 +++++++++++++++++++-----
+ fs/cifs/fs_context.h | 1 +
+ 2 files changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 92d4ab029c91..553adfbcc22a 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -322,7 +322,6 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
+ new_ctx->UNC = NULL;
+ new_ctx->source = NULL;
+ new_ctx->iocharset = NULL;
+-
+ /*
+ * Make sure to stay in sync with smb3_cleanup_fs_context_contents()
+ */
+@@ -792,6 +791,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ int i, opt;
+ bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
+ bool skip_parsing = false;
++ kuid_t uid;
++ kgid_t gid;
+
+ cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
+
+@@ -904,18 +905,31 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ }
+ break;
+ case Opt_uid:
+- ctx->linux_uid.val = result.uint_32;
++ uid = make_kuid(current_user_ns(), result.uint_32);
++ if (!uid_valid(uid))
++ goto cifs_parse_mount_err;
++ ctx->linux_uid = uid;
+ ctx->uid_specified = true;
+ break;
+ case Opt_cruid:
+- ctx->cred_uid.val = result.uint_32;
++ uid = make_kuid(current_user_ns(), result.uint_32);
++ if (!uid_valid(uid))
++ goto cifs_parse_mount_err;
++ ctx->cred_uid = uid;
++ ctx->cruid_specified = true;
+ break;
+ case Opt_backupgid:
+- ctx->backupgid.val = result.uint_32;
++ gid = make_kgid(current_user_ns(), result.uint_32);
++ if (!gid_valid(gid))
++ goto cifs_parse_mount_err;
++ ctx->backupgid = gid;
+ ctx->backupgid_specified = true;
+ break;
+ case Opt_gid:
+- ctx->linux_gid.val = result.uint_32;
++ gid = make_kgid(current_user_ns(), result.uint_32);
++ if (!gid_valid(gid))
++ goto cifs_parse_mount_err;
++ ctx->linux_gid = gid;
+ ctx->gid_specified = true;
+ break;
+ case Opt_port:
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+index 2a71c8e411ac..b6243972edf3 100644
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -155,6 +155,7 @@ enum cifs_param {
+
+ struct smb3_fs_context {
+ bool uid_specified;
++ bool cruid_specified;
+ bool gid_specified;
+ bool sloppy;
+ bool got_ip;
+--
+2.30.2
+
--- /dev/null
+From 33966a86eccc5fbe833820eeaa0f386ed972e122 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Aug 2021 13:48:18 -0500
+Subject: drm/i915: Revert "drm/i915/gem: Asynchronous cmdparser"
+
+From: Jason Ekstrand <jason@jlekstrand.net>
+
+commit c9d9fdbc108af8915d3f497bbdf3898bf8f321b8 upstream.
+
+This reverts 686c7c35abc2 ("drm/i915/gem: Asynchronous cmdparser"). The
+justification for this commit in the git history was a vague comment
+about getting it out from under the struct_mutex. While this may
+improve perf for some workloads on Gen7 platforms where we rely on the
+command parser for features such as indirect rendering, no numbers were
+provided to prove such an improvement. It claims to closed two
+gitlab/bugzilla issues but with no explanation whatsoever as to why or
+what bug it's fixing.
+
+Meanwhile, by moving command parsing off to an async callback, it leaves
+us with a problem of what to do on error. When things were synchronous,
+EXECBUFFER2 would fail with an error code if parsing failed. When
+moving it to async, we needed another way to handle that error and the
+solution employed was to set an error on the dma_fence and then trust
+that said error gets propagated to the client eventually. Moving back
+to synchronous will help us untangle the fence error propagation mess.
+
+This also reverts most of 0edbb9ba1bfe ("drm/i915: Move cmd parser
+pinning to execbuffer") which is a refactor of some of our allocation
+paths for asynchronous parsing. Now that everything is synchronous, we
+don't need it.
+
+v2 (Daniel Vetter):
+ - Add stabel Cc and Fixes tag
+
+Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
+Cc: <stable@vger.kernel.org> # v5.6+
+Fixes: 9e31c1fe45d5 ("drm/i915: Propagate errors on awaiting already signaled fences")
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210714193419.1459723-2-jason@jlekstrand.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 227 +-----------------
+ .../i915/gem/selftests/i915_gem_execbuffer.c | 4 +
+ drivers/gpu/drm/i915/i915_cmd_parser.c | 118 +++++----
+ drivers/gpu/drm/i915/i915_drv.h | 7 +-
+ 4 files changed, 91 insertions(+), 265 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 5964e67c7d36..305c320f9a83 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -25,10 +25,8 @@
+ #include "i915_gem_clflush.h"
+ #include "i915_gem_context.h"
+ #include "i915_gem_ioctls.h"
+-#include "i915_sw_fence_work.h"
+ #include "i915_trace.h"
+ #include "i915_user_extensions.h"
+-#include "i915_memcpy.h"
+
+ struct eb_vma {
+ struct i915_vma *vma;
+@@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
+ int err;
+ struct intel_engine_cs *engine = eb->engine;
+
++ /* If we need to copy for the cmdparser, we will stall anyway */
++ if (eb_use_cmdparser(eb))
++ return ERR_PTR(-EWOULDBLOCK);
++
+ if (!reloc_can_use_engine(engine)) {
+ engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
+ if (!engine)
+@@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
+ return vma;
+ }
+
+-struct eb_parse_work {
+- struct dma_fence_work base;
+- struct intel_engine_cs *engine;
+- struct i915_vma *batch;
+- struct i915_vma *shadow;
+- struct i915_vma *trampoline;
+- unsigned long batch_offset;
+- unsigned long batch_length;
+- unsigned long *jump_whitelist;
+- const void *batch_map;
+- void *shadow_map;
+-};
+-
+-static int __eb_parse(struct dma_fence_work *work)
+-{
+- struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+- int ret;
+- bool cookie;
+-
+- cookie = dma_fence_begin_signalling();
+- ret = intel_engine_cmd_parser(pw->engine,
+- pw->batch,
+- pw->batch_offset,
+- pw->batch_length,
+- pw->shadow,
+- pw->jump_whitelist,
+- pw->shadow_map,
+- pw->batch_map);
+- dma_fence_end_signalling(cookie);
+-
+- return ret;
+-}
+-
+-static void __eb_parse_release(struct dma_fence_work *work)
+-{
+- struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+-
+- if (!IS_ERR_OR_NULL(pw->jump_whitelist))
+- kfree(pw->jump_whitelist);
+-
+- if (pw->batch_map)
+- i915_gem_object_unpin_map(pw->batch->obj);
+- else
+- i915_gem_object_unpin_pages(pw->batch->obj);
+-
+- i915_gem_object_unpin_map(pw->shadow->obj);
+-
+- if (pw->trampoline)
+- i915_active_release(&pw->trampoline->active);
+- i915_active_release(&pw->shadow->active);
+- i915_active_release(&pw->batch->active);
+-}
+-
+-static const struct dma_fence_work_ops eb_parse_ops = {
+- .name = "eb_parse",
+- .work = __eb_parse,
+- .release = __eb_parse_release,
+-};
+-
+-static inline int
+-__parser_mark_active(struct i915_vma *vma,
+- struct intel_timeline *tl,
+- struct dma_fence *fence)
+-{
+- struct intel_gt_buffer_pool_node *node = vma->private;
+-
+- return i915_active_ref(&node->active, tl->fence_context, fence);
+-}
+-
+-static int
+-parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
+-{
+- int err;
+-
+- mutex_lock(&tl->mutex);
+-
+- err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
+- if (err)
+- goto unlock;
+-
+- if (pw->trampoline) {
+- err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
+- if (err)
+- goto unlock;
+- }
+-
+-unlock:
+- mutex_unlock(&tl->mutex);
+- return err;
+-}
+-
+-static int eb_parse_pipeline(struct i915_execbuffer *eb,
+- struct i915_vma *shadow,
+- struct i915_vma *trampoline)
+-{
+- struct eb_parse_work *pw;
+- struct drm_i915_gem_object *batch = eb->batch->vma->obj;
+- bool needs_clflush;
+- int err;
+-
+- GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
+- GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
+-
+- pw = kzalloc(sizeof(*pw), GFP_KERNEL);
+- if (!pw)
+- return -ENOMEM;
+-
+- err = i915_active_acquire(&eb->batch->vma->active);
+- if (err)
+- goto err_free;
+-
+- err = i915_active_acquire(&shadow->active);
+- if (err)
+- goto err_batch;
+-
+- if (trampoline) {
+- err = i915_active_acquire(&trampoline->active);
+- if (err)
+- goto err_shadow;
+- }
+-
+- pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
+- if (IS_ERR(pw->shadow_map)) {
+- err = PTR_ERR(pw->shadow_map);
+- goto err_trampoline;
+- }
+-
+- needs_clflush =
+- !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+-
+- pw->batch_map = ERR_PTR(-ENODEV);
+- if (needs_clflush && i915_has_memcpy_from_wc())
+- pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
+-
+- if (IS_ERR(pw->batch_map)) {
+- err = i915_gem_object_pin_pages(batch);
+- if (err)
+- goto err_unmap_shadow;
+- pw->batch_map = NULL;
+- }
+-
+- pw->jump_whitelist =
+- intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
+- trampoline);
+- if (IS_ERR(pw->jump_whitelist)) {
+- err = PTR_ERR(pw->jump_whitelist);
+- goto err_unmap_batch;
+- }
+-
+- dma_fence_work_init(&pw->base, &eb_parse_ops);
+-
+- pw->engine = eb->engine;
+- pw->batch = eb->batch->vma;
+- pw->batch_offset = eb->batch_start_offset;
+- pw->batch_length = eb->batch_len;
+- pw->shadow = shadow;
+- pw->trampoline = trampoline;
+-
+- /* Mark active refs early for this worker, in case we get interrupted */
+- err = parser_mark_active(pw, eb->context->timeline);
+- if (err)
+- goto err_commit;
+-
+- err = dma_resv_reserve_shared(pw->batch->resv, 1);
+- if (err)
+- goto err_commit;
+-
+- err = dma_resv_reserve_shared(shadow->resv, 1);
+- if (err)
+- goto err_commit;
+-
+- /* Wait for all writes (and relocs) into the batch to complete */
+- err = i915_sw_fence_await_reservation(&pw->base.chain,
+- pw->batch->resv, NULL, false,
+- 0, I915_FENCE_GFP);
+- if (err < 0)
+- goto err_commit;
+-
+- /* Keep the batch alive and unwritten as we parse */
+- dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
+-
+- /* Force execution to wait for completion of the parser */
+- dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
+-
+- dma_fence_work_commit_imm(&pw->base);
+- return 0;
+-
+-err_commit:
+- i915_sw_fence_set_error_once(&pw->base.chain, err);
+- dma_fence_work_commit_imm(&pw->base);
+- return err;
+-
+-err_unmap_batch:
+- if (pw->batch_map)
+- i915_gem_object_unpin_map(batch);
+- else
+- i915_gem_object_unpin_pages(batch);
+-err_unmap_shadow:
+- i915_gem_object_unpin_map(shadow->obj);
+-err_trampoline:
+- if (trampoline)
+- i915_active_release(&trampoline->active);
+-err_shadow:
+- i915_active_release(&shadow->active);
+-err_batch:
+- i915_active_release(&eb->batch->vma->active);
+-err_free:
+- kfree(pw);
+- return err;
+-}
+-
+ static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
+ {
+ /*
+@@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb)
+ goto err_trampoline;
+ }
+
+- err = eb_parse_pipeline(eb, shadow, trampoline);
++ err = dma_resv_reserve_shared(shadow->resv, 1);
++ if (err)
++ goto err_trampoline;
++
++ err = intel_engine_cmd_parser(eb->engine,
++ eb->batch->vma,
++ eb->batch_start_offset,
++ eb->batch_len,
++ shadow, trampoline);
+ if (err)
+ goto err_unpin_batch;
+
+diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+index 4df505e4c53a..16162fc2782d 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+@@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg)
+ intel_gt_pm_get(&eb.i915->gt);
+
+ for_each_uabi_engine(eb.engine, eb.i915) {
++ if (intel_engine_requires_cmd_parser(eb.engine) ||
++ intel_engine_using_cmd_parser(eb.engine))
++ continue;
++
+ reloc_cache_init(&eb.reloc_cache, eb.i915);
+ memset(map, POISON_INUSE, 4096);
+
+diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
+index e6f1e93abbbb..ce61ea4506ea 100644
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
+ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
+ struct drm_i915_gem_object *src_obj,
+ unsigned long offset, unsigned long length,
+- void *dst, const void *src)
++ bool *needs_clflush_after)
+ {
+- bool needs_clflush =
+- !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+-
+- if (src) {
+- GEM_BUG_ON(!needs_clflush);
+- i915_unaligned_memcpy_from_wc(dst, src + offset, length);
+- } else {
+- struct scatterlist *sg;
++ unsigned int src_needs_clflush;
++ unsigned int dst_needs_clflush;
++ void *dst, *src;
++ int ret;
++
++ ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
++ if (ret)
++ return ERR_PTR(ret);
++
++ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
++ i915_gem_object_finish_access(dst_obj);
++ if (IS_ERR(dst))
++ return dst;
++
++ ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
++ if (ret) {
++ i915_gem_object_unpin_map(dst_obj);
++ return ERR_PTR(ret);
++ }
++
++ src = ERR_PTR(-ENODEV);
++ if (src_needs_clflush && i915_has_memcpy_from_wc()) {
++ src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
++ if (!IS_ERR(src)) {
++ i915_unaligned_memcpy_from_wc(dst,
++ src + offset,
++ length);
++ i915_gem_object_unpin_map(src_obj);
++ }
++ }
++ if (IS_ERR(src)) {
++ unsigned long x, n, remain;
+ void *ptr;
+- unsigned int x, sg_ofs;
+- unsigned long remain;
+
+ /*
+ * We can avoid clflushing partial cachelines before the write
+@@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
+ * validate up to the end of the batch.
+ */
+ remain = length;
+- if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
++ if (dst_needs_clflush & CLFLUSH_BEFORE)
+ remain = round_up(remain,
+ boot_cpu_data.x86_clflush_size);
+
+ ptr = dst;
+ x = offset_in_page(offset);
+- sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
+-
+- while (remain) {
+- unsigned long sg_max = sg->length >> PAGE_SHIFT;
+-
+- for (; remain && sg_ofs < sg_max; sg_ofs++) {
+- unsigned long len = min(remain, PAGE_SIZE - x);
+- void *map;
+-
+- map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
+- if (needs_clflush)
+- drm_clflush_virt_range(map + x, len);
+- memcpy(ptr, map + x, len);
+- kunmap_atomic(map);
+-
+- ptr += len;
+- remain -= len;
+- x = 0;
+- }
+-
+- sg_ofs = 0;
+- sg = sg_next(sg);
++ for (n = offset >> PAGE_SHIFT; remain; n++) {
++ int len = min(remain, PAGE_SIZE - x);
++
++ src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
++ if (src_needs_clflush)
++ drm_clflush_virt_range(src + x, len);
++ memcpy(ptr, src + x, len);
++ kunmap_atomic(src);
++
++ ptr += len;
++ remain -= len;
++ x = 0;
+ }
+ }
+
++ i915_gem_object_finish_access(src_obj);
++
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
+ /* dst_obj is returned with vmap pinned */
++ *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
++
+ return dst;
+ }
+
+@@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
+ if (target_cmd_index == offset)
+ return 0;
+
++ if (IS_ERR(jump_whitelist))
++ return PTR_ERR(jump_whitelist);
++
+ if (!test_bit(target_cmd_index, jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+@@ -1369,14 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
+ return 0;
+ }
+
+-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
+- bool trampoline)
++static unsigned long *alloc_whitelist(u32 batch_length)
+ {
+ unsigned long *jmp;
+
+- if (trampoline)
+- return NULL;
+-
+ /*
+ * We expect batch_length to be less than 256KiB for known users,
+ * i.e. we need at most an 8KiB bitmap allocation which should be
+@@ -1409,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
+ * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+ * if the batch appears legal but should use hardware parsing
+ */
++
+ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct i915_vma *batch,
+ unsigned long batch_offset,
+ unsigned long batch_length,
+ struct i915_vma *shadow,
+- unsigned long *jump_whitelist,
+- void *shadow_map,
+- const void *batch_map)
++ bool trampoline)
+ {
+ u32 *cmd, *batch_end, offset = 0;
+ struct drm_i915_cmd_descriptor default_desc = noop_desc;
+ const struct drm_i915_cmd_descriptor *desc = &default_desc;
++ bool needs_clflush_after = false;
++ unsigned long *jump_whitelist;
+ u64 batch_addr, shadow_addr;
+ int ret = 0;
+- bool trampoline = !jump_whitelist;
+
+ GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
+ GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
+@@ -1431,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ batch->size));
+ GEM_BUG_ON(!batch_length);
+
+- cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
+- shadow_map, batch_map);
++ cmd = copy_batch(shadow->obj, batch->obj,
++ batch_offset, batch_length,
++ &needs_clflush_after);
++ if (IS_ERR(cmd)) {
++ DRM_DEBUG("CMD: Failed to copy batch\n");
++ return PTR_ERR(cmd);
++ }
++
++ jump_whitelist = NULL;
++ if (!trampoline)
++ /* Defer failure until attempted use */
++ jump_whitelist = alloc_whitelist(batch_length);
+
+ shadow_addr = gen8_canonical_addr(shadow->node.start);
+ batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+@@ -1533,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+ i915_gem_object_flush_map(shadow->obj);
+
++ if (!IS_ERR_OR_NULL(jump_whitelist))
++ kfree(jump_whitelist);
++ i915_gem_object_unpin_map(shadow->obj);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 69e43bf91a15..4c041e670904 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1881,17 +1881,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
+ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
+- bool trampoline);
+-
+ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct i915_vma *batch,
+ unsigned long batch_offset,
+ unsigned long batch_length,
+ struct i915_vma *shadow,
+- unsigned long *jump_whitelist,
+- void *shadow_map,
+- const void *batch_map);
++ bool trampoline);
+ #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
+
+ /* intel_device_info.c */
+--
+2.30.2
+
--- /dev/null
+From deb67810f9e616afb8461e9d983ec4dc8a3e448d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jul 2021 12:37:19 +0300
+Subject: net: dsa: sja1105: fix address learning getting disabled on the CPU
+ port
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit b0b33b048dcfbd7da82c3cde4fab02751dfab4d6 ]
+
+In May 2019 when commit 640f763f98c2 ("net: dsa: sja1105: Add support
+for Spanning Tree Protocol") was introduced, the comment that "STP does
+not get called for the CPU port" was true. This changed after commit
+0394a63acfe2 ("net: dsa: enable and disable all ports") in August 2019
+and went largely unnoticed, because the sja1105_bridge_stp_state_set()
+method did nothing different compared to the static setup done by
+sja1105_init_mac_settings().
+
+With the ability to turn address learning off introduced by the blamed
+commit, there is a new priv->learn_ena port mask in the driver. When
+sja1105_bridge_stp_state_set() gets called and we are in
+BR_STATE_LEARNING or later, address learning is enabled or not depending
+on priv->learn_ena & BIT(port).
+
+So what happens is that priv->learn_ena is not being set from anywhere
+for the CPU port, and the static configuration done by
+sja1105_init_mac_settings() is being overwritten.
+
+To solve this, acknowledge that the static configuration of STP state is
+no longer necessary because the STP state is being set by the DSA core
+now, but what is necessary is to set priv->learn_ena for the CPU port.
+
+Fixes: 4d9423549501 ("net: dsa: sja1105: offload bridge port flags to device")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/sja1105/sja1105_main.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index ebb6966eba8e..5b7947832b87 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -130,14 +130,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
+
+ for (i = 0; i < ds->num_ports; i++) {
+ mac[i] = default_mac;
+- if (i == dsa_upstream_port(priv->ds, i)) {
+- /* STP doesn't get called for CPU port, so we need to
+- * set the I/O parameters statically.
+- */
+- mac[i].dyn_learn = true;
+- mac[i].ingress = true;
+- mac[i].egress = true;
+- }
++
++ /* Let sja1105_bridge_stp_state_set() keep address learning
++ * enabled for the CPU port.
++ */
++ if (dsa_is_cpu_port(ds, i))
++ priv->learn_ena |= BIT(i);
+ }
+
+ return 0;
+--
+2.30.2
+
--- /dev/null
+From db62a3c527d6499ba901bd63e66d1c25862e36a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 May 2021 16:14:13 +0300
+Subject: net: dsa: sja1105: parameterize the number of ports
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 542043e91df452ed09f382d8c41cdf3788f31b5e ]
+
+The sja1105 driver will gain support for the next-gen SJA1110 switch,
+which is very similar except for the fact it has more than 5 ports.
+
+So we need to replace the hardcoded SJA1105_NUM_PORTS in this driver
+with ds->num_ports. This patch is as mechanical as possible (save for
+the fact that ds->num_ports is not an integer constant expression).
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/sja1105/sja1105_clocking.c | 3 +-
+ drivers/net/dsa/sja1105/sja1105_flower.c | 9 ++--
+ drivers/net/dsa/sja1105/sja1105_main.c | 61 +++++++++++++---------
+ drivers/net/dsa/sja1105/sja1105_spi.c | 4 +-
+ drivers/net/dsa/sja1105/sja1105_tas.c | 14 ++---
+ 5 files changed, 53 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
+index 2a9b8a6a5306..f54b4d03a002 100644
+--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
++++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
+@@ -721,9 +721,10 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
+
+ int sja1105_clocking_setup(struct sja1105_private *priv)
+ {
++ struct dsa_switch *ds = priv->ds;
+ int port, rc;
+
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ rc = sja1105_clocking_setup_port(priv, port);
+ if (rc < 0)
+ return rc;
+diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
+index 973761132fc3..77c54126b3fc 100644
+--- a/drivers/net/dsa/sja1105/sja1105_flower.c
++++ b/drivers/net/dsa/sja1105/sja1105_flower.c
+@@ -35,6 +35,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ {
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
++ struct dsa_switch *ds = priv->ds;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+@@ -59,7 +60,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+- if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
++ if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port already has a broadcast policer");
+ rc = -EEXIST;
+@@ -72,7 +73,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ * point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
++ int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
+
+ policing[bcast].sharindx = rule->bcast_pol.sharindx;
+ }
+@@ -435,7 +436,7 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (rule->type == SJA1105_RULE_BCAST_POLICER) {
+- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
++ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
+
+ old_sharindx = policing[bcast].sharindx;
+ policing[bcast].sharindx = port;
+@@ -486,7 +487,7 @@ void sja1105_flower_setup(struct dsa_switch *ds)
+
+ INIT_LIST_HEAD(&priv->flow_block.rules);
+
+- for (port = 0; port < SJA1105_NUM_PORTS; port++)
++ for (port = 0; port < ds->num_ports; port++)
+ priv->flow_block.l2_policer_used[port] = true;
+ }
+
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 6e5dbe9f3892..ebb6966eba8e 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -107,6 +107,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
+ .ingress = false,
+ };
+ struct sja1105_mac_config_entry *mac;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int i;
+
+@@ -118,16 +119,16 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
+ table->entry_count = 0;
+ }
+
+- table->entries = kcalloc(SJA1105_NUM_PORTS,
++ table->entries = kcalloc(ds->num_ports,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+- table->entry_count = SJA1105_NUM_PORTS;
++ table->entry_count = ds->num_ports;
+
+ mac = table->entries;
+
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ mac[i] = default_mac;
+ if (i == dsa_upstream_port(priv->ds, i)) {
+ /* STP doesn't get called for CPU port, so we need to
+@@ -162,6 +163,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
+ {
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_xmii_params_entry *mii;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int i;
+
+@@ -183,7 +185,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
+
+ mii = table->entries;
+
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+@@ -267,8 +269,6 @@ static int sja1105_init_static_fdb(struct sja1105_private *priv)
+
+ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+ {
+- struct sja1105_table *table;
+- u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
+ struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
+ /* Learned FDB entries are forgotten after 300 seconds */
+ .maxage = SJA1105_AGEING_TIME_MS(300000),
+@@ -276,8 +276,6 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+ .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* And the P/Q/R/S equivalent setting: */
+ .start_dynspc = 0,
+- .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
+- max_fdb_entries, max_fdb_entries, },
+ /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
+ .poly = 0x97,
+ /* This selects between Independent VLAN Learning (IVL) and
+@@ -301,6 +299,15 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+ .owr_dyn = true,
+ .drpnolearn = true,
+ };
++ struct dsa_switch *ds = priv->ds;
++ struct sja1105_table *table;
++ u64 max_fdb_entries;
++ int port;
++
++ max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / ds->num_ports;
++
++ for (port = 0; port < ds->num_ports; port++)
++ default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+
+@@ -393,6 +400,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
+ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+ {
+ struct sja1105_l2_forwarding_entry *l2fwd;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int i, j;
+
+@@ -413,7 +421,7 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+ l2fwd = table->entries;
+
+ /* First 5 entries define the forwarding rules */
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ unsigned int upstream = dsa_upstream_port(priv->ds, i);
+
+ for (j = 0; j < SJA1105_NUM_TC; j++)
+@@ -441,8 +449,8 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+ * Create a one-to-one mapping.
+ */
+ for (i = 0; i < SJA1105_NUM_TC; i++)
+- for (j = 0; j < SJA1105_NUM_PORTS; j++)
+- l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
++ for (j = 0; j < ds->num_ports; j++)
++ l2fwd[ds->num_ports + i].vlan_pmap[j] = i;
+
+ return 0;
+ }
+@@ -538,7 +546,7 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
+ */
+ .host_port = dsa_upstream_port(priv->ds, 0),
+ /* Default to an invalid value */
+- .mirr_port = SJA1105_NUM_PORTS,
++ .mirr_port = priv->ds->num_ports,
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address (presumably because it
+@@ -546,7 +554,7 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
+ * that). Default to an invalid port (to disable the feature)
+ * and overwrite this if we find any DSA (cascaded) ports.
+ */
+- .casc_port = SJA1105_NUM_PORTS,
++ .casc_port = priv->ds->num_ports,
+ /* No TTEthernet */
+ .vllupformat = SJA1105_VL_FORMAT_PSFP,
+ .vlmarker = 0,
+@@ -667,6 +675,7 @@ static int sja1105_init_avb_params(struct sja1105_private *priv)
+ static int sja1105_init_l2_policing(struct sja1105_private *priv)
+ {
+ struct sja1105_l2_policing_entry *policing;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+@@ -688,8 +697,8 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
+ policing = table->entries;
+
+ /* Setup shared indices for the matchall policers */
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
++ for (port = 0; port < ds->num_ports; port++) {
++ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ policing[port * SJA1105_NUM_TC + tc].sharindx = port;
+@@ -698,7 +707,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
+ }
+
+ /* Setup the matchall policer parameters */
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(priv->ds, port))
+@@ -764,9 +773,10 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
+ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
+ const struct sja1105_dt_port *ports)
+ {
++ struct dsa_switch *ds = priv->ds;
+ int i;
+
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ if (ports[i].role == XMII_MAC)
+ continue;
+
+@@ -1641,7 +1651,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ /* Add this port to the forwarding matrix of the
+ * other ports in the same bridge, and viceversa.
+ */
+@@ -1863,7 +1873,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
+ * switch wants to see in the static config in order to allow us to
+ * change it through the dynamic interface later.
+ */
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ speed_mbps[i] = sja1105_speed[mac[i].speed];
+ mac[i].speed = SJA1105_SPEED_AUTO;
+ }
+@@ -1915,7 +1925,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
+ if (rc < 0)
+ goto out;
+
+- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
+ if (rc < 0)
+ goto out;
+@@ -3055,7 +3065,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
+ struct sja1105_bridge_vlan *v, *n;
+ int port;
+
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!dsa_is_user_port(ds, port))
+@@ -3258,6 +3268,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ {
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
+@@ -3268,7 +3279,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+- already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
++ already_enabled = (general_params->mirr_port != ds->num_ports);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+@@ -3282,7 +3293,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ int port;
+
+ /* Anybody still referencing mirr_port? */
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+@@ -3290,7 +3301,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ }
+ /* Unset already_enabled for next time */
+ if (!keep)
+- new_mirr_port = SJA1105_NUM_PORTS;
++ new_mirr_port = ds->num_ports;
+ }
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
+@@ -3686,7 +3697,7 @@ static int sja1105_probe(struct spi_device *spi)
+ }
+
+ /* Connections between dsa_port and sja1105_port */
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ struct sja1105_port *sp = &priv->ports[port];
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *slave;
+diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
+index f7a1514f81e8..923d617cbec6 100644
+--- a/drivers/net/dsa/sja1105/sja1105_spi.c
++++ b/drivers/net/dsa/sja1105/sja1105_spi.c
+@@ -339,10 +339,10 @@ int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+
+ int sja1105_static_config_upload(struct sja1105_private *priv)
+ {
+- unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
+ struct sja1105_static_config *config = &priv->static_config;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = &priv->spidev->dev;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_status status;
+ int rc, retries = RETRIES;
+ u8 *config_buf;
+@@ -363,7 +363,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
+ * Tx on all ports and waiting for current packet to drain.
+ * Otherwise, the PHY will see an unterminated Ethernet packet.
+ */
+- rc = sja1105_inhibit_tx(priv, port_bitmap, true);
++ rc = sja1105_inhibit_tx(priv, GENMASK_ULL(ds->num_ports - 1, 0), true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to inhibit Tx on ports\n");
+ rc = -ENXIO;
+diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
+index 31d8acff1f01..e6153848a950 100644
+--- a/drivers/net/dsa/sja1105/sja1105_tas.c
++++ b/drivers/net/dsa/sja1105/sja1105_tas.c
+@@ -27,7 +27,7 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+
+ tas_data->enabled = false;
+
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+@@ -164,6 +164,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct sja1105_schedule_entry *schedule;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int schedule_start_idx;
+ s64 entry_point_delta;
+@@ -207,7 +208,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
+ }
+
+ /* Figure out the dimensioning of the problem */
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ if (tas_data->offload[port]) {
+ num_entries += tas_data->offload[port]->num_entries;
+ num_cycles++;
+@@ -269,7 +270,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
+ schedule_entry_points_params->actsubsch = num_cycles - 1;
+
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
+@@ -468,6 +469,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ size_t num_entries = gating_cfg->num_entries;
+ struct tc_taprio_qopt_offload *dummy;
++ struct dsa_switch *ds = priv->ds;
+ struct sja1105_gate_entry *e;
+ bool conflict;
+ int i = 0;
+@@ -491,7 +493,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ if (port != -1) {
+ conflict = sja1105_tas_check_conflicts(priv, port, dummy);
+ } else {
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ conflict = sja1105_tas_check_conflicts(priv, port,
+ dummy);
+ if (conflict)
+@@ -554,7 +556,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ }
+ }
+
+- for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
++ for (other_port = 0; other_port < ds->num_ports; other_port++) {
+ if (other_port == port)
+ continue;
+
+@@ -885,7 +887,7 @@ void sja1105_tas_teardown(struct dsa_switch *ds)
+
+ cancel_work_sync(&priv->tas_data.tas_work);
+
+- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
++ for (port = 0; port < ds->num_ports; port++) {
+ offload = priv->tas_data.offload[port];
+ if (!offload)
+ continue;
+--
+2.30.2
+
--- /dev/null
+From db0b6012550dfb26577cbdd9a2944ecf9d26084f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 May 2021 00:50:40 +0200
+Subject: power: supply: ab8500: Call battery population once
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit 7e2bb83c617f8fccc04db7d03f105a06b9d491a9 ]
+
+The code was calling ab8500_bm_of_probe() in four different
+spots effectively overwriting the same configuration three
+times. This was done because probe order was uncertain.
+
+Since we now used componentized probe, call it only once
+while probing the main charging component.
+
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/power/supply/ab8500_btemp.c | 7 -------
+ drivers/power/supply/ab8500_fg.c | 6 ------
+ drivers/power/supply/abx500_chargalg.c | 7 -------
+ 3 files changed, 20 deletions(-)
+
+diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
+index 4df305c767c5..dbdcff32f353 100644
+--- a/drivers/power/supply/ab8500_btemp.c
++++ b/drivers/power/supply/ab8500_btemp.c
+@@ -983,7 +983,6 @@ static const struct component_ops ab8500_btemp_component_ops = {
+
+ static int ab8500_btemp_probe(struct platform_device *pdev)
+ {
+- struct device_node *np = pdev->dev.of_node;
+ struct power_supply_config psy_cfg = {};
+ struct device *dev = &pdev->dev;
+ struct ab8500_btemp *di;
+@@ -996,12 +995,6 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
+
+ di->bm = &ab8500_bm_data;
+
+- ret = ab8500_bm_of_probe(dev, np, di->bm);
+- if (ret) {
+- dev_err(dev, "failed to get battery information\n");
+- return ret;
+- }
+-
+ /* get parent data */
+ di->dev = dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
+index 46c718e9ebb7..146a5f03818f 100644
+--- a/drivers/power/supply/ab8500_fg.c
++++ b/drivers/power/supply/ab8500_fg.c
+@@ -3058,12 +3058,6 @@ static int ab8500_fg_probe(struct platform_device *pdev)
+
+ di->bm = &ab8500_bm_data;
+
+- ret = ab8500_bm_of_probe(dev, np, di->bm);
+- if (ret) {
+- dev_err(dev, "failed to get battery information\n");
+- return ret;
+- }
+-
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
+index 599684ce0e4b..a17849bfacbf 100644
+--- a/drivers/power/supply/abx500_chargalg.c
++++ b/drivers/power/supply/abx500_chargalg.c
+@@ -2002,7 +2002,6 @@ static const struct component_ops abx500_chargalg_component_ops = {
+ static int abx500_chargalg_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct device_node *np = dev->of_node;
+ struct power_supply_config psy_cfg = {};
+ struct abx500_chargalg *di;
+ int ret = 0;
+@@ -2013,12 +2012,6 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
+
+ di->bm = &ab8500_bm_data;
+
+- ret = ab8500_bm_of_probe(dev, np, di->bm);
+- if (ret) {
+- dev_err(dev, "failed to get battery information\n");
+- return ret;
+- }
+-
+ /* get device struct and parent */
+ di->dev = dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+--
+2.30.2
+
--- /dev/null
+From 57f21983f4318f6ebb8735e8aaabb16a231c6f36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Aug 2021 13:48:19 -0500
+Subject: Revert "drm/i915: Propagate errors on awaiting already signaled
+ fences"
+
+From: Jason Ekstrand <jason@jlekstrand.net>
+
+commit 3761baae908a7b5012be08d70fa553cc2eb82305 upstream.
+
+This reverts commit 9e31c1fe45d555a948ff66f1f0e3fe1f83ca63f7. Ever
+since that commit, we've been having issues where a hang in one client
+can propagate to another. In particular, a hang in an app can propagate
+to the X server which causes the whole desktop to lock up.
+
+Error propagation along fences sound like a good idea, but as your bug
+shows, surprising consequences, since propagating errors across security
+boundaries is not a good thing.
+
+What we do have is track the hangs on the ctx, and report information to
+userspace using RESET_STATS. That's how arb_robustness works. Also, if my
+understanding is still correct, the EIO from execbuf is when your context
+is banned (because not recoverable or too many hangs). And in all these
+cases it's up to userspace to figure out what is all impacted and should
+be reported to the application, that's not on the kernel to guess and
+automatically propagate.
+
+What's more, we're also building more features on top of ctx error
+reporting with RESET_STATS ioctl: Encrypted buffers use the same, and the
+userspace fence wait also relies on that mechanism. So it is the path
+going forward for reporting gpu hangs and resets to userspace.
+
+So all together that's why I think we should just bury this idea again as
+not quite the direction we want to go to, hence why I think the revert is
+the right option here.
+
+For backporters: Please note that you _must_ have a backport of
+https://lore.kernel.org/dri-devel/20210602164149.391653-2-jason@jlekstrand.net/
+for otherwise backporting just this patch opens up a security bug.
+
+v2: Augment commit message. Also restore Jason's sob that I
+accidentally lost.
+
+v3: Add a note for backporters
+
+Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
+Reported-by: Marcin Slusarz <marcin.slusarz@intel.com>
+Cc: <stable@vger.kernel.org> # v5.6+
+Cc: Jason Ekstrand <jason.ekstrand@intel.com>
+Cc: Marcin Slusarz <marcin.slusarz@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/3080
+Fixes: 9e31c1fe45d5 ("drm/i915: Propagate errors on awaiting already signaled fences")
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210714193419.1459723-3-jason@jlekstrand.net
+(cherry picked from commit 93a2711cddd5760e2f0f901817d71c93183c3b87)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_request.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index bec9c3652188..59d48a6a83d2 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq,
+
+ do {
+ fence = *child++;
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+- i915_sw_fence_set_error_once(&rq->submit, fence->error);
++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ continue;
+- }
+
+ if (fence->context == rq->fence.context)
+ continue;
+@@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
+
+ do {
+ fence = *child++;
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+- i915_sw_fence_set_error_once(&rq->submit, fence->error);
++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ continue;
+- }
+
+ /*
+ * Requests on the same timeline are explicitly ordered, along
+--
+2.30.2
+
can-j1939-j1939_session_deactivate-clarify-lifetime-of-session-object.patch
perf-pmu-fix-alias-matching.patch
octeontx2-af-remove-unnecessary-devm_kfree.patch
+drm-i915-revert-drm-i915-gem-asynchronous-cmdparser.patch
+revert-drm-i915-propagate-errors-on-awaiting-already.patch
+power-supply-ab8500-call-battery-population-once.patch
+skmsg-increase-sk-sk_drops-when-dropping-packets.patch
+skmsg-pass-source-psock-to-sk_psock_skb_redirect.patch
+bpf-sockmap-on-cleanup-we-additionally-need-to-remov.patch
+cifs-use-helpers-when-parsing-uid-gid-mount-options-.patch
+cifs-add-missing-parsing-of-backupuid.patch
+net-dsa-sja1105-parameterize-the-number-of-ports.patch
+net-dsa-sja1105-fix-address-learning-getting-disable.patch
+asoc-intel-boards-handle-hda-dsp-common-as-a-module.patch
+asoc-intel-boards-create-sof-maxim-common-module.patch
+asoc-intel-boards-fix-xrun-issue-on-platform-with-ma.patch
--- /dev/null
+From b0479a4e36706349ce550ef7860e65e2647837e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jun 2021 19:13:42 -0700
+Subject: skmsg: Increase sk->sk_drops when dropping packets
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit 781dd0431eb549f9cb1fdddf91a50d985febe884 ]
+
+It is hard to observe packet drops without increasing relevant
+drop counters, here we should increase sk->sk_drops which is
+a protocol-independent counter. Fortunately psock is always
+associated with a struct sock, we can just use psock->sk.
+
+Suggested-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
+Link: https://lore.kernel.org/bpf/20210615021342.7416-9-xiyou.wangcong@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 45b3a3adc886..d428368a0d87 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -607,6 +607,12 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ return sk_psock_skb_ingress(psock, skb);
+ }
+
++static void sock_drop(struct sock *sk, struct sk_buff *skb)
++{
++ sk_drops_add(sk, skb);
++ kfree_skb(skb);
++}
++
+ static void sk_psock_backlog(struct work_struct *work)
+ {
+ struct sk_psock *psock = container_of(work, struct sk_psock, work);
+@@ -646,7 +652,7 @@ static void sk_psock_backlog(struct work_struct *work)
+ /* Hard errors break pipe and stop xmit. */
+ sk_psock_report_error(psock, ret ? -ret : EPIPE);
+ sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
+- kfree_skb(skb);
++ sock_drop(psock->sk, skb);
+ goto end;
+ }
+ off += ret;
+@@ -737,7 +743,7 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
+
+ while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
+ skb_bpf_redirect_clear(skb);
+- kfree_skb(skb);
++ sock_drop(psock->sk, skb);
+ }
+ __sk_psock_purge_ingress_msg(psock);
+ }
+@@ -863,7 +869,7 @@ static int sk_psock_skb_redirect(struct sk_buff *skb)
+ * return code, but then didn't set a redirect interface.
+ */
+ if (unlikely(!sk_other)) {
+- kfree_skb(skb);
++ sock_drop(from->sk, skb);
+ return -EIO;
+ }
+ psock_other = sk_psock(sk_other);
+@@ -873,14 +879,14 @@ static int sk_psock_skb_redirect(struct sk_buff *skb)
+ */
+ if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
+ skb_bpf_redirect_clear(skb);
+- kfree_skb(skb);
++ sock_drop(from->sk, skb);
+ return -EIO;
+ }
+ spin_lock_bh(&psock_other->ingress_lock);
+ if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
+ spin_unlock_bh(&psock_other->ingress_lock);
+ skb_bpf_redirect_clear(skb);
+- kfree_skb(skb);
++ sock_drop(from->sk, skb);
+ return -EIO;
+ }
+
+@@ -970,7 +976,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ case __SK_DROP:
+ default:
+ out_free:
+- kfree_skb(skb);
++ sock_drop(psock->sk, skb);
+ }
+
+ return err;
+@@ -1005,7 +1011,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
+ sk = strp->sk;
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+- kfree_skb(skb);
++ sock_drop(sk, skb);
+ goto out;
+ }
+ prog = READ_ONCE(psock->progs.stream_verdict);
+@@ -1126,7 +1132,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ len = 0;
+- kfree_skb(skb);
++ sock_drop(sk, skb);
+ goto out;
+ }
+ prog = READ_ONCE(psock->progs.stream_verdict);
+--
+2.30.2
+
--- /dev/null
+From c4f874f4c8df1a274631594cf889cc5444c29d91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jun 2021 19:13:41 -0700
+Subject: skmsg: Pass source psock to sk_psock_skb_redirect()
+
+From: Cong Wang <cong.wang@bytedance.com>
+
+[ Upstream commit 42830571f1fd9751b3fbf38084bbb253320e185f ]
+
+sk_psock_skb_redirect() only takes skb as a parameter, we
+will need to know where this skb is from, so just pass
+the source psock to this function as a new parameter.
+This patch prepares for the next one.
+
+Signed-off-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
+Link: https://lore.kernel.org/bpf/20210615021342.7416-8-xiyou.wangcong@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index d428368a0d87..b088fe07fc00 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -859,7 +859,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ }
+ EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
+
+-static int sk_psock_skb_redirect(struct sk_buff *skb)
++static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
+ {
+ struct sk_psock *psock_other;
+ struct sock *sk_other;
+@@ -896,11 +896,12 @@ static int sk_psock_skb_redirect(struct sk_buff *skb)
+ return 0;
+ }
+
+-static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
++static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
++ struct sk_psock *from, int verdict)
+ {
+ switch (verdict) {
+ case __SK_REDIRECT:
+- sk_psock_skb_redirect(skb);
++ sk_psock_skb_redirect(from, skb);
+ break;
+ case __SK_PASS:
+ case __SK_DROP:
+@@ -924,7 +925,7 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
+ ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
+ skb->sk = NULL;
+ }
+- sk_psock_tls_verdict_apply(skb, psock->sk, ret);
++ sk_psock_tls_verdict_apply(skb, psock, ret);
+ rcu_read_unlock();
+ return ret;
+ }
+@@ -971,7 +972,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ }
+ break;
+ case __SK_REDIRECT:
+- err = sk_psock_skb_redirect(skb);
++ err = sk_psock_skb_redirect(psock, skb);
+ break;
+ case __SK_DROP:
+ default:
+--
+2.30.2
+