--- /dev/null
+From d94b93a9101573eb75b819dee94b1417acff631b Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 30 Dec 2020 16:54:56 +0100
+Subject: ARM: cpuidle: Avoid orphan section warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit d94b93a9101573eb75b819dee94b1417acff631b upstream.
+
+Since commit 83109d5d5fba ("x86/build: Warn on orphan section placement"),
+we get a warning for objects in orphan sections. The cpuidle implementation
+for OMAP causes this when CONFIG_CPU_IDLE is disabled:
+
+arm-linux-gnueabi-ld: warning: orphan section `__cpuidle_method_of_table' from `arch/arm/mach-omap2/pm33xx-core.o' being placed in section `__cpuidle_method_of_table'
+arm-linux-gnueabi-ld: warning: orphan section `__cpuidle_method_of_table' from `arch/arm/mach-omap2/pm33xx-core.o' being placed in section `__cpuidle_method_of_table'
+arm-linux-gnueabi-ld: warning: orphan section `__cpuidle_method_of_table' from `arch/arm/mach-omap2/pm33xx-core.o' being placed in section `__cpuidle_method_of_table'
+
+Change the definition of CPUIDLE_METHOD_OF_DECLARE() to silently
+drop the table and all code referenced from it when CONFIG_CPU_IDLE
+is disabled.
+
+Fixes: 06ee7a950b6a ("ARM: OMAP2+: pm33xx-core: Add cpuidle_ops for am335x/am437x")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Miguel Ojeda <ojeda@kernel.org>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20201230155506.1085689-1-arnd@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/cpuidle.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/cpuidle.h
++++ b/arch/arm/include/asm/cpuidle.h
+@@ -7,9 +7,11 @@
+ #ifdef CONFIG_CPU_IDLE
+ extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
++#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
+ #else
+ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index) { return -ENODEV; }
++#define __cpuidle_method_section __maybe_unused /* drop silently */
+ #endif
+
+ /* Common ARM WFI state */
+@@ -42,8 +44,7 @@ struct of_cpuidle_method {
+
+ #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+- __used __section("__cpuidle_method_of_table") \
+- = { .method = _method, .ops = _ops }
++ __cpuidle_method_section = { .method = _method, .ops = _ops }
+
+ extern int arm_cpuidle_suspend(int index);
+
--- /dev/null
+From 41daf6ba594d55f201c50280ebcd430590441da1 Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Mon, 24 May 2021 10:49:41 +0800
+Subject: ASoC: core: Fix Null-point-dereference in fmt_single_name()
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit 41daf6ba594d55f201c50280ebcd430590441da1 upstream.
+
+Check the return value of devm_kstrdup() in case of
+Null-point-dereference.
+
+Fixes: 45dd9943fce0 ("ASoC: core: remove artificial component and DAI name constraint")
+Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Link: https://lore.kernel.org/r/20210524024941.159952-1-wangkefeng.wang@huawei.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/soc-core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2231,6 +2231,8 @@ static char *fmt_single_name(struct devi
+ return NULL;
+
+ name = devm_kstrdup(dev, devname, GFP_KERNEL);
++ if (!name)
++ return NULL;
+
+ /* are we a "%s.%d" name (platform and SPI components) */
+ found = strstr(name, dev->driver->name);
--- /dev/null
+From d031d99b02eaf7363c33f5b27b38086cc8104082 Mon Sep 17 00:00:00 2001
+From: Jerome Brunet <jbrunet@baylibre.com>
+Date: Mon, 24 May 2021 11:34:48 +0200
+Subject: ASoC: meson: gx-card: fix sound-dai dt schema
+
+From: Jerome Brunet <jbrunet@baylibre.com>
+
+commit d031d99b02eaf7363c33f5b27b38086cc8104082 upstream.
+
+There is a fair amount of warnings when running 'make dtbs_check' with
+amlogic,gx-sound-card.yaml.
+
+Ex:
+arch/arm64/boot/dts/amlogic/meson-gxm-q200.dt.yaml: sound: dai-link-0:sound-dai:0:1: missing phandle tag in 0
+arch/arm64/boot/dts/amlogic/meson-gxm-q200.dt.yaml: sound: dai-link-0:sound-dai:0:2: missing phandle tag in 0
+arch/arm64/boot/dts/amlogic/meson-gxm-q200.dt.yaml: sound: dai-link-0:sound-dai:0: [66, 0, 0] is too long
+
+The reason is that the sound-dai phandle provided has cells, and in such
+case the schema should use 'phandle-array' instead of 'phandle'.
+
+Fixes: fd00366b8e41 ("ASoC: meson: gx: add sound card dt-binding documentation")
+Signed-off-by: Jerome Brunet <jbrunet@baylibre.com>
+Link: https://lore.kernel.org/r/20210524093448.357140-1-jbrunet@baylibre.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
++++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+@@ -57,7 +57,7 @@ patternProperties:
+ rate
+
+ sound-dai:
+- $ref: /schemas/types.yaml#/definitions/phandle
++ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: phandle of the CPU DAI
+
+ patternProperties:
+@@ -71,7 +71,7 @@ patternProperties:
+
+ properties:
+ sound-dai:
+- $ref: /schemas/types.yaml#/definitions/phandle
++ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: phandle of the codec DAI
+
+ required:
--- /dev/null
+From c8a570443943304cac2e4186dbce6989b6c2b8b5 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Tue, 8 Jun 2021 23:33:18 +0200
+Subject: drm/mcde: Fix off by 10^3 in calculation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit c8a570443943304cac2e4186dbce6989b6c2b8b5 upstream.
+
+The calclulation of how many bytes we stuff into the
+DSI pipeline for video mode panels is off by three
+orders of magnitude because we did not account for the
+fact that the DRM mode clock is in kilohertz rather
+than hertz.
+
+This used to be:
+drm_mode_vrefresh(mode) * mode->htotal * mode->vtotal
+which would become for example for s6e63m0:
+60 x 514 x 831 = 25628040 Hz, but mode->clock is
+25628 as it is in kHz.
+
+This affects only the Samsung GT-I8190 "Golden" phone
+right now since it is the only MCDE device with a video
+mode display.
+
+Curiously some specimen work with this code and wild
+settings in the EOL and empty packets at the end of the
+display, but I have noticed an eeire flicker until now.
+Others were not so lucky and got black screens.
+
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reported-by: Stephan Gerhold <stephan@gerhold.net>
+Fixes: 920dd1b1425b ("drm/mcde: Use mode->clock instead of reverse calculating it from the vrefresh")
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Tested-by: Stephan Gerhold <stephan@gerhold.net>
+Reviewed-by: Stephan Gerhold <stephan@gerhold.net>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210608213318.3897858-1-linus.walleij@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/mcde/mcde_dsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/mcde/mcde_dsi.c
++++ b/drivers/gpu/drm/mcde/mcde_dsi.c
+@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(st
+ * porches and sync.
+ */
+ /* (ps/s) / (pixels/s) = ps/pixels */
+- pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
++ pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
+ dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
+ pclk);
+
--- /dev/null
+From ce86c239e4d218ae6040bec18e6d19a58edb8b7c Mon Sep 17 00:00:00 2001
+From: Jonathan Marek <jonathan@marek.ca>
+Date: Thu, 13 May 2021 13:14:00 -0400
+Subject: drm/msm/a6xx: avoid shadow NULL reference in failure path
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+commit ce86c239e4d218ae6040bec18e6d19a58edb8b7c upstream.
+
+If a6xx_hw_init() fails before creating the shadow_bo, the a6xx_pm_suspend
+code referencing it will crash. Change the condition to one that avoids
+this problem (note: creation of shadow_bo is behind this same condition)
+
+Fixes: e8b0b994c3a5 ("drm/msm/a6xx: Clear shadow on suspend")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Reviewed-by: Akhil P Oommen <akhilpo@codeaurora.org>
+Link: https://lore.kernel.org/r/20210513171431.18632-6-jonathan@marek.ca
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1128,7 +1128,7 @@ static int a6xx_pm_suspend(struct msm_gp
+ if (ret)
+ return ret;
+
+- if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
++ if (a6xx_gpu->shadow_bo)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
--- /dev/null
+From b4387eaf3821a4c4241ac3a556e13244eb1fdaa5 Mon Sep 17 00:00:00 2001
+From: Jonathan Marek <jonathan@marek.ca>
+Date: Thu, 13 May 2021 13:13:58 -0400
+Subject: drm/msm/a6xx: fix incorrectly set uavflagprd_inv field for A650
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+commit b4387eaf3821a4c4241ac3a556e13244eb1fdaa5 upstream.
+
+Value was shifted in the wrong direction, resulting in the field always
+being zero, which is incorrect for A650.
+
+Fixes: d0bac4e9cd66 ("drm/msm/a6xx: set ubwc config for A640 and A650")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Reviewed-by: Akhil P Oommen <akhilpo@codeaurora.org>
+Link: https://lore.kernel.org/r/20210513171431.18632-4-jonathan@marek.ca
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -486,7 +486,7 @@ static void a6xx_set_ubwc_config(struct
+ rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+- uavflagprd_inv >> 4 | lower_bit << 1);
++ uavflagprd_inv << 4 | lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
+ }
+
--- /dev/null
+From 408434036958699a7f50ddec984f7ba33e11a8f5 Mon Sep 17 00:00:00 2001
+From: Jonathan Marek <jonathan@marek.ca>
+Date: Thu, 13 May 2021 13:13:59 -0400
+Subject: drm/msm/a6xx: update/fix CP_PROTECT initialization
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+commit 408434036958699a7f50ddec984f7ba33e11a8f5 upstream.
+
+Update CP_PROTECT register programming based on downstream.
+
+A6XX_PROTECT_RW is renamed to A6XX_PROTECT_NORDWR to make things aligned
+and also be more clear about what it does.
+
+Note that this required switching to use the CP_ALWAYS_ON_COUNTER as the
+GMU counter is not accessible from the cmdstream. Which also means
+using the CPU counter for the msm_gpu_submit_flush() tracepoint (as
+catapult depends on being able to compare this to the start/end values
+captured in cmdstream). This may need to be revisited when IFPC is
+enabled.
+
+Also, compared to downstream, this opens up CP_PERFCTR_CP_SEL as the
+userspace performance tooling (fdperf and pps-producer) expect to be
+able to configure the CP counters.
+
+Fixes: 4b565ca5a2cb ("drm/msm: Add A6XX device support")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Reviewed-by: Akhil P Oommen <akhilpo@codeaurora.org>
+Link: https://lore.kernel.org/r/20210513171431.18632-5-jonathan@marek.ca
+[switch to CP_ALWAYS_ON_COUNTER, open up CP_PERFCNTR_CP_SEL, and spiff
+ up commit msg]
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 151 +++++++++++++++++++++++++---------
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 2
+ 2 files changed, 113 insertions(+), 40 deletions(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -154,7 +154,7 @@ static void a6xx_submit(struct msm_gpu *
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
++ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ rbmemptr_stats(ring, index, alwayson_start));
+
+ /* Invalidate CCU depth and color */
+@@ -184,7 +184,7 @@ static void a6xx_submit(struct msm_gpu *
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_end));
+- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
++ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ rbmemptr_stats(ring, index, alwayson_end));
+
+ /* Write the fence to the scratch register */
+@@ -203,8 +203,8 @@ static void a6xx_submit(struct msm_gpu *
+ OUT_RING(ring, submit->seqno);
+
+ trace_msm_gpu_submit_flush(submit,
+- gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+- REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
++ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
++ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
+
+ a6xx_flush(gpu, ring);
+ }
+@@ -459,6 +459,113 @@ static void a6xx_set_hwcg(struct msm_gpu
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
+ }
+
++/* For a615, a616, a618, A619, a630, a640 and a680 */
++static const u32 a6xx_protect[] = {
++ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
++ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
++ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
++ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
++ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
++ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
++ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
++ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
++ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
++ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
++ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
++ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
++ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
++ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
++ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
++ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
++ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
++ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
++ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
++ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
++ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
++ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
++ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
++ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
++ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
++ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
++ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
++ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
++ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
++ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
++ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
++ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
++};
++
++/* These are for a620 and a650 */
++static const u32 a650_protect[] = {
++ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
++ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
++ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
++ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
++ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
++ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
++ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
++ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
++ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
++ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
++ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
++ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
++ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
++ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
++ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
++ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
++ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
++ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
++ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
++ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
++ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
++ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
++ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
++ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
++ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
++ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
++ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
++ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
++ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
++ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
++ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
++ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
++ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
++ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
++ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
++ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
++ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
++ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
++ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
++};
++
++static void a6xx_set_cp_protect(struct msm_gpu *gpu)
++{
++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
++ const u32 *regs = a6xx_protect;
++ unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
++
++ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
++ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
++
++ if (adreno_is_a650(adreno_gpu)) {
++ regs = a650_protect;
++ count = ARRAY_SIZE(a650_protect);
++ count_max = 48;
++ }
++
++ /*
++ * Enable access protection to privileged registers, fault on an access
++ * protect violation and select the last span to protect from the start
++ * address all the way to the end of the register address space
++ */
++ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
++
++ for (i = 0; i < count - 1; i++)
++ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
++ /* last CP_PROTECT to have "infinite" length on the last entry */
++ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
++}
++
+ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+ {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+@@ -722,41 +829,7 @@ static int a6xx_hw_init(struct msm_gpu *
+ }
+
+ /* Protect registers from the CP */
+- gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
+-
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
+- A6XX_PROTECT_RDONLY(0x600, 0x51));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
+- A6XX_PROTECT_RDONLY(0xfc00, 0x3));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
+- A6XX_PROTECT_RDONLY(0x0, 0x4f9));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
+- A6XX_PROTECT_RDONLY(0x501, 0xa));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
+- A6XX_PROTECT_RDONLY(0x511, 0x44));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
+- A6XX_PROTECT_RW(0xbe20, 0x11f3));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
+- A6XX_PROTECT_RDONLY(0x980, 0x4));
+- gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
++ a6xx_set_cp_protect(gpu);
+
+ /* Enable expanded apriv for targets that support it */
+ if (gpu->hw_apriv) {
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+@@ -37,7 +37,7 @@ struct a6xx_gpu {
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ */
+-#define A6XX_PROTECT_RW(_reg, _len) \
++#define A6XX_PROTECT_NORDWR(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
--- /dev/null
+From dbec64b11c65d74f31427e2b9d5746fbf17bf840 Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Date: Tue, 25 May 2021 17:55:39 +0100
+Subject: gpio: wcd934x: Fix shift-out-of-bounds error
+
+From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+commit dbec64b11c65d74f31427e2b9d5746fbf17bf840 upstream.
+
+bit-mask for pins 0 to 4 is BIT(0) to BIT(4) however we ended up with BIT(n - 1)
+which is not right, and this was caught by below usban check
+
+UBSAN: shift-out-of-bounds in drivers/gpio/gpio-wcd934x.c:34:14
+
+Fixes: 59c324683400 ("gpio: wcd934x: Add support to wcd934x gpio controller")
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-wcd934x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-wcd934x.c
++++ b/drivers/gpio/gpio-wcd934x.c
+@@ -7,7 +7,7 @@
+ #include <linux/slab.h>
+ #include <linux/of_device.h>
+
+-#define WCD_PIN_MASK(p) BIT(p - 1)
++#define WCD_PIN_MASK(p) BIT(p)
+ #define WCD_REG_DIR_CTL_OFFSET 0x42
+ #define WCD_REG_VAL_CTL_OFFSET 0x43
+ #define WCD934X_NPINS 5
--- /dev/null
+From 2ba0aa2feebda680ecfc3c552e867cf4d1b05a3a Mon Sep 17 00:00:00 2001
+From: Alaa Hleihel <alaa@nvidia.com>
+Date: Thu, 10 Jun 2021 10:34:27 +0300
+Subject: IB/mlx5: Fix initializing CQ fragments buffer
+
+From: Alaa Hleihel <alaa@nvidia.com>
+
+commit 2ba0aa2feebda680ecfc3c552e867cf4d1b05a3a upstream.
+
+The function init_cq_frag_buf() can be called to initialize the current CQ
+fragments buffer cq->buf, or the temporary cq->resize_buf that is filled
+during CQ resize operation.
+
+However, the offending commit started to use function get_cqe() for
+getting the CQEs, the issue with this change is that get_cqe() always
+returns CQEs from cq->buf, which leads us to initialize the wrong buffer,
+and in case of enlarging the CQ we try to access elements beyond the size
+of the current cq->buf and eventually hit a kernel panic.
+
+ [exception RIP: init_cq_frag_buf+103]
+ [ffff9f799ddcbcd8] mlx5_ib_resize_cq at ffffffffc0835d60 [mlx5_ib]
+ [ffff9f799ddcbdb0] ib_resize_cq at ffffffffc05270df [ib_core]
+ [ffff9f799ddcbdc0] llt_rdma_setup_qp at ffffffffc0a6a712 [llt]
+ [ffff9f799ddcbe10] llt_rdma_cc_event_action at ffffffffc0a6b411 [llt]
+ [ffff9f799ddcbe98] llt_rdma_client_conn_thread at ffffffffc0a6bb75 [llt]
+ [ffff9f799ddcbec8] kthread at ffffffffa66c5da1
+ [ffff9f799ddcbf50] ret_from_fork_nospec_begin at ffffffffa6d95ddd
+
+Fix it by getting the needed CQE by calling mlx5_frag_buf_get_wqe() that
+takes the correct source buffer as a parameter.
+
+Fixes: 388ca8be0037 ("IB/mlx5: Implement fragmented completion queue (CQ)")
+Link: https://lore.kernel.org/r/90a0e8c924093cfa50a482880ad7e7edb73dc19a.1623309971.git.leonro@nvidia.com
+Signed-off-by: Alaa Hleihel <alaa@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx5/cq.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -838,15 +838,14 @@ static void destroy_cq_user(struct mlx5_
+ ib_umem_release(cq->buf.umem);
+ }
+
+-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
+- struct mlx5_ib_cq_buf *buf)
++static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
+ {
+ int i;
+ void *cqe;
+ struct mlx5_cqe64 *cqe64;
+
+ for (i = 0; i < buf->nent; i++) {
+- cqe = get_cqe(cq, i);
++ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+@@ -872,7 +871,7 @@ static int create_cq_kernel(struct mlx5_
+ if (err)
+ goto err_db;
+
+- init_cq_frag_buf(cq, &cq->buf);
++ init_cq_frag_buf(&cq->buf);
+
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+@@ -1177,7 +1176,7 @@ static int resize_kernel(struct mlx5_ib_
+ if (err)
+ goto ex;
+
+- init_cq_frag_buf(cq, cq->resize_buf);
++ init_cq_frag_buf(cq->resize_buf);
+
+ return 0;
+
--- /dev/null
+From f31500b0d437a2464ca5972d8f5439e156b74960 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 7 Jun 2021 10:57:48 -0700
+Subject: KVM: x86: Ensure liveliness of nested VM-Enter fail tracepoint message
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit f31500b0d437a2464ca5972d8f5439e156b74960 upstream.
+
+Use the __string() machinery provided by the tracing subystem to make a
+copy of the string literals consumed by the "nested VM-Enter failed"
+tracepoint. A complete copy is necessary to ensure that the tracepoint
+can't outlive the data/memory it consumes and deference stale memory.
+
+Because the tracepoint itself is defined by kvm, if kvm-intel and/or
+kvm-amd are built as modules, the memory holding the string literals
+defined by the vendor modules will be freed when the module is unloaded,
+whereas the tracepoint and its data in the ring buffer will live until
+kvm is unloaded (or "indefinitely" if kvm is built-in).
+
+This bug has existed since the tracepoint was added, but was recently
+exposed by a new check in tracing to detect exactly this type of bug.
+
+ fmt: '%s%s
+ ' current_buffer: ' vmx_dirty_log_t-140127 [003] .... kvm_nested_vmenter_failed: '
+ WARNING: CPU: 3 PID: 140134 at kernel/trace/trace.c:3759 trace_check_vprintf+0x3be/0x3e0
+ CPU: 3 PID: 140134 Comm: less Not tainted 5.13.0-rc1-ce2e73ce600a-req #184
+ Hardware name: ASUS Q87M-E/Q87M-E, BIOS 1102 03/03/2014
+ RIP: 0010:trace_check_vprintf+0x3be/0x3e0
+ Code: <0f> 0b 44 8b 4c 24 1c e9 a9 fe ff ff c6 44 02 ff 00 49 8b 97 b0 20
+ RSP: 0018:ffffa895cc37bcb0 EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: ffffa895cc37bd08 RCX: 0000000000000027
+ RDX: 0000000000000027 RSI: 00000000ffffdfff RDI: ffff9766cfad74f8
+ RBP: ffffffffc0a041d4 R08: ffff9766cfad74f0 R09: ffffa895cc37bad8
+ R10: 0000000000000001 R11: 0000000000000001 R12: ffffffffc0a041d4
+ R13: ffffffffc0f4dba8 R14: 0000000000000000 R15: ffff976409f2c000
+ FS: 00007f92fa200740(0000) GS:ffff9766cfac0000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000559bd11b0000 CR3: 000000019fbaa002 CR4: 00000000001726e0
+ Call Trace:
+ trace_event_printf+0x5e/0x80
+ trace_raw_output_kvm_nested_vmenter_failed+0x3a/0x60 [kvm]
+ print_trace_line+0x1dd/0x4e0
+ s_show+0x45/0x150
+ seq_read_iter+0x2d5/0x4c0
+ seq_read+0x106/0x150
+ vfs_read+0x98/0x180
+ ksys_read+0x5f/0xe0
+ do_syscall_64+0x40/0xb0
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Fixes: 380e0055bc7e ("KVM: nVMX: trace nested VM-Enter failures detected by H/W")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Message-Id: <20210607175748.674002-1-seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/trace.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -1514,16 +1514,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
+ TP_ARGS(msg, err),
+
+ TP_STRUCT__entry(
+- __field(const char *, msg)
++ __string(msg, msg)
+ __field(u32, err)
+ ),
+
+ TP_fast_assign(
+- __entry->msg = msg;
++ __assign_str(msg, msg);
+ __entry->err = err;
+ ),
+
+- TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
++ TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
+ __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
+ );
+
--- /dev/null
+From 6c605f8371159432ec61cbb1488dcf7ad24ad19a Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Thu, 27 May 2021 12:47:11 +0200
+Subject: perf: Fix data race between pin_count increment/decrement
+
+From: Marco Elver <elver@google.com>
+
+commit 6c605f8371159432ec61cbb1488dcf7ad24ad19a upstream.
+
+KCSAN reports a data race between increment and decrement of pin_count:
+
+ write to 0xffff888237c2d4e0 of 4 bytes by task 15740 on cpu 1:
+ find_get_context kernel/events/core.c:4617
+ __do_sys_perf_event_open kernel/events/core.c:12097 [inline]
+ __se_sys_perf_event_open kernel/events/core.c:11933
+ ...
+ read to 0xffff888237c2d4e0 of 4 bytes by task 15743 on cpu 0:
+ perf_unpin_context kernel/events/core.c:1525 [inline]
+ __do_sys_perf_event_open kernel/events/core.c:12328 [inline]
+ __se_sys_perf_event_open kernel/events/core.c:11933
+ ...
+
+Because neither read-modify-write here is atomic, this can lead to one
+of the operations being lost, resulting in an inconsistent pin_count.
+Fix it by adding the missing locking in the CPU-event case.
+
+Fixes: fe4b04fa31a6 ("perf: Cure task_oncpu_function_call() races")
+Reported-by: syzbot+142c9018f5962db69c7e@syzkaller.appspotmail.com
+Signed-off-by: Marco Elver <elver@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20210527104711.2671610-1-elver@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4547,7 +4547,9 @@ find_get_context(struct pmu *pmu, struct
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ ctx = &cpuctx->ctx;
+ get_ctx(ctx);
++ raw_spin_lock_irqsave(&ctx->lock, flags);
+ ++ctx->pin_count;
++ raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return ctx;
+ }
--- /dev/null
+From 6411e386db0a477217607015e7d2910d02f75426 Mon Sep 17 00:00:00 2001
+From: Wang Wensheng <wangwensheng4@huawei.com>
+Date: Mon, 17 May 2021 01:57:49 +0000
+Subject: phy: cadence: Sierra: Fix error return code in cdns_sierra_phy_probe()
+
+From: Wang Wensheng <wangwensheng4@huawei.com>
+
+commit 6411e386db0a477217607015e7d2910d02f75426 upstream.
+
+Fix to return a negative error code from the error handling
+case instead of 0, as done elsewhere in this function.
+
+Fixes: a43f72ae136a ("phy: cadence: Sierra: Change MAX_LANES of Sierra to 16")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wang Wensheng <wangwensheng4@huawei.com>
+Link: https://lore.kernel.org/r/20210517015749.127799-1-wangwensheng4@huawei.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/cadence/phy-cadence-sierra.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/phy/cadence/phy-cadence-sierra.c
++++ b/drivers/phy/cadence/phy-cadence-sierra.c
+@@ -614,6 +614,7 @@ static int cdns_sierra_phy_probe(struct
+ sp->nsubnodes = node;
+
+ if (sp->num_lanes > SIERRA_MAX_LANES) {
++ ret = -EINVAL;
+ dev_err(dev, "Invalid lane configuration\n");
+ goto put_child2;
+ }
--- /dev/null
+From b8203ec7f58ae925e10fadd3d136073ae7503a6e Mon Sep 17 00:00:00 2001
+From: Yang Li <yang.lee@linux.alibaba.com>
+Date: Tue, 25 May 2021 18:50:32 +0800
+Subject: phy: ti: Fix an error code in wiz_probe()
+
+From: Yang Li <yang.lee@linux.alibaba.com>
+
+commit b8203ec7f58ae925e10fadd3d136073ae7503a6e upstream.
+
+When the code execute this if statement, the value of ret is 0.
+However, we can see from the dev_err() log that the value of
+ret should be -EINVAL.
+
+Clean up smatch warning:
+
+drivers/phy/ti/phy-j721e-wiz.c:1216 wiz_probe() warn: missing error code 'ret'
+
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Fixes: c9f9eba06629 ("phy: ti: j721e-wiz: Manage typec-gpio-dir")
+Signed-off-by: Yang Li <yang.lee@linux.alibaba.com>
+Link: https://lore.kernel.org/r/1621939832-65535-1-git-send-email-yang.lee@linux.alibaba.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/ti/phy-j721e-wiz.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -894,6 +894,7 @@ static int wiz_probe(struct platform_dev
+
+ if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
+ wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
++ ret = -EINVAL;
+ dev_err(dev, "Invalid typec-dir-debounce property\n");
+ goto err_addr_to_resource;
+ }
--- /dev/null
+From 7c2fc79250cafa1a29befeb60163028ec4720814 Mon Sep 17 00:00:00 2001
+From: Chen Li <chenli@uniontech.com>
+Date: Tue, 27 Apr 2021 15:17:45 +0800
+Subject: phy: usb: Fix misuse of IS_ENABLED
+
+From: Chen Li <chenli@uniontech.com>
+
+commit 7c2fc79250cafa1a29befeb60163028ec4720814 upstream.
+
+While IS_ENABLED() is perfectly fine for CONFIG_* symbols, it is not
+for other symbols such as __BIG_ENDIAN that is provided directly by
+the compiler.
+
+Switch to use CONFIG_CPU_BIG_ENDIAN instead of __BIG_ENDIAN.
+
+Signed-off-by: Chen Li <chenli@uniontech.com>
+Reviewed-by: Al Cooper <alcooperx@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Fixes: 94583a41047e ("phy: usb: Restructure in preparation for adding 7216 USB support")
+Link: https://lore.kernel.org/r/87czuggpra.wl-chenli@uniontech.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/broadcom/phy-brcm-usb-init.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
++++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
+@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
++ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __
+ static inline void brcm_usb_writel(u32 val, void __iomem *addr)
+ {
+ /* See brcmnand_readl() comments */
+- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
++ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
--- /dev/null
+From a3e74fb9247cd530dca246699d5eb5a691884d32 Mon Sep 17 00:00:00 2001
+From: Kamal Heib <kamalheib1@gmail.com>
+Date: Tue, 25 May 2021 18:01:34 +0300
+Subject: RDMA/ipoib: Fix warning caused by destroying non-initial netns
+
+From: Kamal Heib <kamalheib1@gmail.com>
+
+commit a3e74fb9247cd530dca246699d5eb5a691884d32 upstream.
+
+After the commit 5ce2dced8e95 ("RDMA/ipoib: Set rtnl_link_ops for ipoib
+interfaces"), if the IPoIB device is moved to non-initial netns,
+destroying that netns lets the device vanish instead of moving it back to
+the initial netns, This is happening because default_device_exit() skips
+the interfaces due to having rtnl_link_ops set.
+
+Steps to reporoduce:
+ ip netns add foo
+ ip link set mlx5_ib0 netns foo
+ ip netns delete foo
+
+WARNING: CPU: 1 PID: 704 at net/core/dev.c:11435 netdev_exit+0x3f/0x50
+Modules linked in: xt_CHECKSUM xt_MASQUERADE xt_conntrack ipt_REJECT
+nf_reject_ipv4 nft_compat nft_counter nft_chain_nat nf_nat nf_conntrack
+nf_defrag_ipv6 nf_defrag_ipv4 nf_tables nfnetlink tun d
+ fuse
+CPU: 1 PID: 704 Comm: kworker/u64:3 Tainted: G S W 5.13.0-rc1+ #1
+Hardware name: Dell Inc. PowerEdge R630/02C2CP, BIOS 2.1.5 04/11/2016
+Workqueue: netns cleanup_net
+RIP: 0010:netdev_exit+0x3f/0x50
+Code: 48 8b bb 30 01 00 00 e8 ef 81 b1 ff 48 81 fb c0 3a 54 a1 74 13 48
+8b 83 90 00 00 00 48 81 c3 90 00 00 00 48 39 d8 75 02 5b c3 <0f> 0b 5b
+c3 66 66 2e 0f 1f 84 00 00 00 00 00 66 90 0f 1f 44 00
+RSP: 0018:ffffb297079d7e08 EFLAGS: 00010206
+RAX: ffff8eb542c00040 RBX: ffff8eb541333150 RCX: 000000008010000d
+RDX: 000000008010000e RSI: 000000008010000d RDI: ffff8eb440042c00
+RBP: ffffb297079d7e48 R08: 0000000000000001 R09: ffffffff9fdeac00
+R10: ffff8eb5003be000 R11: 0000000000000001 R12: ffffffffa1545620
+R13: ffffffffa1545628 R14: 0000000000000000 R15: ffffffffa1543b20
+FS: 0000000000000000(0000) GS:ffff8ed37fa00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00005601b5f4c2e8 CR3: 0000001fc8c10002 CR4: 00000000003706e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ ops_exit_list.isra.9+0x36/0x70
+ cleanup_net+0x234/0x390
+ process_one_work+0x1cb/0x360
+ ? process_one_work+0x360/0x360
+ worker_thread+0x30/0x370
+ ? process_one_work+0x360/0x360
+ kthread+0x116/0x130
+ ? kthread_park+0x80/0x80
+ ret_from_fork+0x22/0x30
+
+To avoid the above warning and later on the kernel panic that could happen
+on shutdown due to a NULL pointer dereference, make sure to set the
+netns_refund flag that was introduced by commit 3a5ca857079e ("can: dev:
+Move device back to init netns on owning netns delete") to properly
+restore the IPoIB interfaces to the initial netns.
+
+Fixes: 5ce2dced8e95 ("RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces")
+Link: https://lore.kernel.org/r/20210525150134.139342-1-kamalheib1@gmail.com
+Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/ulp/ipoib/ipoib_netlink.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struc
+
+ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ .kind = "ipoib",
++ .netns_refund = true,
+ .maxtype = IFLA_IPOIB_MAX,
+ .policy = ipoib_policy,
+ .priv_size = sizeof(struct ipoib_dev_priv),
--- /dev/null
+From 404e5a12691fe797486475fe28cc0b80cb8bef2c Mon Sep 17 00:00:00 2001
+From: Shay Drory <shayd@nvidia.com>
+Date: Thu, 3 Jun 2021 16:19:39 +0300
+Subject: RDMA/mlx4: Do not map the core_clock page to user space unless enabled
+
+From: Shay Drory <shayd@nvidia.com>
+
+commit 404e5a12691fe797486475fe28cc0b80cb8bef2c upstream.
+
+Currently when mlx4 maps the hca_core_clock page to the user space there
+are read-modifiable registers, one of which is semaphore, on this page as
+well as the clock counter. If user reads the wrong offset, it can modify
+the semaphore and hang the device.
+
+Do not map the hca_core_clock page to the user space unless the device has
+been put in a backwards compatibility mode to support this feature.
+
+After this patch, mlx4 core_clock won't be mapped to user space on the
+majority of existing devices and the uverbs device time feature in
+ibv_query_rt_values_ex() will be disabled.
+
+Fixes: 52033cfb5aab ("IB/mlx4: Add mmap call to map the hardware clock")
+Link: https://lore.kernel.org/r/9632304e0d6790af84b3b706d8c18732bc0d5e27.1622726305.git.leonro@nvidia.com
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/mlx4/main.c | 5 +----
+ drivers/net/ethernet/mellanox/mlx4/fw.c | 3 +++
+ drivers/net/ethernet/mellanox/mlx4/fw.h | 1 +
+ drivers/net/ethernet/mellanox/mlx4/main.c | 6 ++++++
+ include/linux/mlx4/device.h | 1 +
+ 5 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -580,12 +580,9 @@ static int mlx4_ib_query_device(struct i
+ props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
+
+- if (!mlx4_is_slave(dev->dev))
+- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
+-
+ if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
+- if (!err && !mlx4_is_slave(dev->dev)) {
++ if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
+ resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
+ resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+ }
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
+@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *
+ #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
+ #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
+ #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
++#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
+ #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
+ #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
+ #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
+@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *
+
+ if (mlx4_is_mfunc(dev))
+ disable_unsupported_roce_caps(outbox);
++ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
++ dev_cap->map_clock_to_user = field & 0x80;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
+ dev_cap->reserved_qps = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
+@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
+ u32 health_buffer_addrs;
+ struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
+ bool wol_port[MLX4_MAX_PORTS + 1];
++ bool map_clock_to_user;
+ };
+
+ struct mlx4_func_cap {
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev
+ }
+ }
+
++ dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
+ dev->caps.uar_page_size = PAGE_SIZE;
+ dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
+ dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
+@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struc
+ if (mlx4_is_slave(dev))
+ return -EOPNOTSUPP;
+
++ if (!dev->caps.map_clock_to_user) {
++ mlx4_dbg(dev, "Map clock to user is not supported.\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (!params)
+ return -EINVAL;
+
+--- a/include/linux/mlx4/device.h
++++ b/include/linux/mlx4/device.h
+@@ -631,6 +631,7 @@ struct mlx4_caps {
+ bool wol_port[MLX4_MAX_PORTS + 1];
+ struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
++ bool map_clock_to_user;
+ };
+
+ struct mlx4_buf_list {
--- /dev/null
+From 4c668630bf8ea90a041fc69c9984486e0f56682d Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Sun, 23 May 2021 15:10:45 +0800
+Subject: regulator: bd71828: Fix .n_voltages settings
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit 4c668630bf8ea90a041fc69c9984486e0f56682d upstream.
+
+Current .n_voltages settings do not cover the latest 2 valid selectors,
+so it fails to set voltage for the hightest voltage support.
+The latest linear range has step_uV = 0, so it does not matter if we
+count the .n_voltages to maximum selector + 1 or the first selector of
+latest linear range + 1.
+To simplify calculating the n_voltages, let's just set the
+.n_voltages to maximum selector + 1.
+
+Fixes: 522498f8cb8c ("regulator: bd71828: Basic support for ROHM bd71828 PMIC regulators")
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Reviewed-by: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+Link: https://lore.kernel.org/r/20210523071045.2168904-2-axel.lin@ingics.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mfd/rohm-bd71828.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/include/linux/mfd/rohm-bd71828.h
++++ b/include/linux/mfd/rohm-bd71828.h
+@@ -26,11 +26,11 @@ enum {
+ BD71828_REGULATOR_AMOUNT,
+ };
+
+-#define BD71828_BUCK1267_VOLTS 0xEF
+-#define BD71828_BUCK3_VOLTS 0x10
+-#define BD71828_BUCK4_VOLTS 0x20
+-#define BD71828_BUCK5_VOLTS 0x10
+-#define BD71828_LDO_VOLTS 0x32
++#define BD71828_BUCK1267_VOLTS 0x100
++#define BD71828_BUCK3_VOLTS 0x20
++#define BD71828_BUCK4_VOLTS 0x40
++#define BD71828_BUCK5_VOLTS 0x20
++#define BD71828_LDO_VOLTS 0x40
+ /* LDO6 is fixed 1.8V voltage */
+ #define BD71828_LDO_6_VOLTAGE 1800000
+
--- /dev/null
+From bc537e65b09a05923f98a31920d1ab170e648dba Mon Sep 17 00:00:00 2001
+From: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+Date: Wed, 2 Jun 2021 08:45:58 +0300
+Subject: regulator: bd718x7: Fix the BUCK7 voltage setting on BD71837
+
+From: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+commit bc537e65b09a05923f98a31920d1ab170e648dba upstream.
+
+Changing the BD71837 voltages for other regulators except the first 4 BUCKs
+should be forbidden when the regulator is enabled. There may be out-of-spec
+voltage spikes if the voltage of these "non DVS" bucks is changed when
+enabled. This restriction was accidentally removed when the LDO voltage
+change was allowed for BD71847. (It was not noticed that the BD71837
+BUCK7 used same voltage setting function as LDOs).
+
+Additionally this bug causes incorrect voltage monitoring register access.
+The voltage change function accidentally used for bd71837 BUCK7 is
+intended to only handle LDO voltage changes. A BD71847 LDO specific
+voltage monitoring disabling code gets executed on BD71837 and register
+offsets are wrongly calculated as regulator is assumed to be an LDO.
+
+Prevent the BD71837 BUCK7 voltage change when BUCK7 is enabled by using
+the correct voltage setting operation.
+
+Fixes: 9bcbabafa19b ("regulator: bd718x7: remove voltage change restriction from BD71847 LDOs")
+Signed-off-by: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+Link: https://lore.kernel.org/r/bd8c00931421fafa57e3fdf46557a83075b7cc17.1622610103.git.matti.vaittinen@fi.rohmeurope.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/regulator/bd718x7-regulator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/regulator/bd718x7-regulator.c
++++ b/drivers/regulator/bd718x7-regulator.c
+@@ -364,7 +364,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops,
+ NULL);
+
+ BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+- regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
++ regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ NULL);
+ /*
--- /dev/null
+From 98e48cd9283dbac0e1445ee780889f10b3d1db6a Mon Sep 17 00:00:00 2001
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Date: Thu, 20 May 2021 01:12:23 +0300
+Subject: regulator: core: resolve supply for boot-on/always-on regulators
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+commit 98e48cd9283dbac0e1445ee780889f10b3d1db6a upstream.
+
+For the boot-on/always-on regulators the set_machine_constrainst() is
+called before resolving rdev->supply. Thus the code would try to enable
+rdev before enabling supplying regulator. Enforce resolving supply
+regulator before enabling rdev.
+
+Fixes: aea6cb99703e ("regulator: resolve supply after creating regulator")
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20210519221224.2868496-1-dmitry.baryshkov@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/regulator/core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1422,6 +1422,12 @@ static int set_machine_constraints(struc
+ * and we have control then make sure it is enabled.
+ */
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
++ /* If we want to enable this regulator, make sure that we know
++ * the supplying regulator.
++ */
++ if (rdev->supply_name && !rdev->supply)
++ return -EPROBE_DEFER;
++
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
--- /dev/null
+From 34991ee96fd8477479dd15adadceb6b28b30d9b0 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Mon, 17 May 2021 18:53:24 +0800
+Subject: regulator: fan53880: Fix missing n_voltages setting
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit 34991ee96fd8477479dd15adadceb6b28b30d9b0 upstream.
+
+Fixes: e6dea51e2d41 ("regulator: fan53880: Add initial support")
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Acked-by: Christoph Fritz <chf.fritz@googlemail.com>
+Link: https://lore.kernel.org/r/20210517105325.1227393-1-axel.lin@ingics.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/regulator/fan53880.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/regulator/fan53880.c
++++ b/drivers/regulator/fan53880.c
+@@ -51,6 +51,7 @@ static const struct regulator_ops fan538
+ REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
+ }, \
+ .n_linear_ranges = 2, \
++ .n_voltages = 0x74, \
+ .vsel_reg = FAN53880_LDO ## _num ## VOUT, \
+ .vsel_mask = 0x7f, \
+ .enable_reg = FAN53880_ENABLE, \
+@@ -76,6 +77,7 @@ static const struct regulator_desc fan53
+ REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
+ },
+ .n_linear_ranges = 2,
++ .n_voltages = 0xf8,
+ .vsel_reg = FAN53880_BUCKVOUT,
+ .vsel_mask = 0x7f,
+ .enable_reg = FAN53880_ENABLE,
+@@ -95,6 +97,7 @@ static const struct regulator_desc fan53
+ REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
+ },
+ .n_linear_ranges = 2,
++ .n_voltages = 0x71,
+ .vsel_reg = FAN53880_BOOSTVOUT,
+ .vsel_mask = 0x7f,
+ .enable_reg = FAN53880_ENABLE_BOOST,
--- /dev/null
+From 6f55c5dd1118b3076d11d9cb17f5c5f4bc3a1162 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <digetx@gmail.com>
+Date: Mon, 24 May 2021 01:42:42 +0300
+Subject: regulator: max77620: Use device_set_of_node_from_dev()
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+commit 6f55c5dd1118b3076d11d9cb17f5c5f4bc3a1162 upstream.
+
+The MAX77620 driver fails to re-probe on deferred probe because driver
+core tries to claim resources that are already claimed by the PINCTRL
+device. Use device_set_of_node_from_dev() helper which marks OF node as
+reused, skipping erroneous execution of pinctrl_bind_pins() for the PMIC
+device on the re-probe.
+
+Fixes: aea6cb99703e ("regulator: resolve supply after creating regulator")
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Link: https://lore.kernel.org/r/20210523224243.13219-2-digetx@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/regulator/max77620-regulator.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/regulator/max77620-regulator.c
++++ b/drivers/regulator/max77620-regulator.c
+@@ -814,6 +814,13 @@ static int max77620_regulator_probe(stru
+ config.dev = dev;
+ config.driver_data = pmic;
+
++ /*
++ * Set of_node_reuse flag to prevent driver core from attempting to
++ * claim any pinmux resources already claimed by the parent device.
++ * Otherwise PMIC driver will fail to re-probe.
++ */
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
+ for (id = 0; id < MAX77620_NUM_REGS; id++) {
+ struct regulator_dev *rdev;
+ struct regulator_desc *rdesc;
--- /dev/null
+From 86ab21cc39e6b99b7065ab9008c90bec5dec535a Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Sun, 30 May 2021 20:41:00 +0800
+Subject: regulator: rtmv20: Fix .set_current_limit/.get_current_limit callbacks
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit 86ab21cc39e6b99b7065ab9008c90bec5dec535a upstream.
+
+Current code does not set .curr_table and .n_linear_ranges settings,
+so it cannot use the regulator_get/set_current_limit_regmap helpers.
+If we setup the curr_table, it will has 200 entries.
+Implement customized .set_current_limit/.get_current_limit callbacks
+instead.
+
+Fixes: b8c054a5eaf0 ("regulator: rtmv20: Adds support for Richtek RTMV20 load switch regulator")
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Reviewed-by: ChiYuan Huang <cy_huang@richtek.com>
+Link: https://lore.kernel.org/r/20210530124101.477727-1-axel.lin@ingics.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/regulator/rtmv20-regulator.c | 42 +++++++++++++++++++++++++++++++++--
+ 1 file changed, 40 insertions(+), 2 deletions(-)
+
+--- a/drivers/regulator/rtmv20-regulator.c
++++ b/drivers/regulator/rtmv20-regulator.c
+@@ -103,9 +103,47 @@ static int rtmv20_lsw_disable(struct reg
+ return 0;
+ }
+
++static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
++ int max_uA)
++{
++ int sel;
++
++ if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
++ return -EINVAL;
++
++ if (max_uA > RTMV20_LSW_MAXUA)
++ max_uA = RTMV20_LSW_MAXUA;
++
++ sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
++
++ /* Ensure the selected setting is still in range */
++ if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
++ return -EINVAL;
++
++ sel <<= ffs(rdev->desc->csel_mask) - 1;
++
++ return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
++ rdev->desc->csel_mask, sel);
++}
++
++static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
++{
++ unsigned int val;
++ int ret;
++
++ ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
++ if (ret)
++ return ret;
++
++ val &= rdev->desc->csel_mask;
++ val >>= ffs(rdev->desc->csel_mask) - 1;
++
++ return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
++}
++
+ static const struct regulator_ops rtmv20_regulator_ops = {
+- .set_current_limit = regulator_set_current_limit_regmap,
+- .get_current_limit = regulator_get_current_limit_regmap,
++ .set_current_limit = rtmv20_lsw_set_current_limit,
++ .get_current_limit = rtmv20_lsw_get_current_limit,
+ .enable = rtmv20_lsw_enable,
+ .disable = rtmv20_lsw_disable,
+ .is_enabled = regulator_is_enabled_regmap,
--- /dev/null
+From 68d7a190682aa4eb02db477328088ebad15acc83 Mon Sep 17 00:00:00 2001
+From: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Date: Wed, 2 Jun 2021 16:58:08 +0200
+Subject: sched/fair: Fix util_est UTIL_AVG_UNCHANGED handling
+
+From: Dietmar Eggemann <dietmar.eggemann@arm.com>
+
+commit 68d7a190682aa4eb02db477328088ebad15acc83 upstream.
+
+The util_est internal UTIL_AVG_UNCHANGED flag which is used to prevent
+unnecessary util_est updates uses the LSB of util_est.enqueued. It is
+exposed via _task_util_est() (and task_util_est()).
+
+Commit 92a801e5d5b7 ("sched/fair: Mask UTIL_AVG_UNCHANGED usages")
+mentions that the LSB is lost for util_est resolution but
+find_energy_efficient_cpu() checks if task_util_est() returns 0 to
+return prev_cpu early.
+
+_task_util_est() returns the max value of util_est.ewma and
+util_est.enqueued or'ed w/ UTIL_AVG_UNCHANGED.
+So task_util_est() returning the max of task_util() and
+_task_util_est() will never return 0 under the default
+SCHED_FEAT(UTIL_EST, true).
+
+To fix this use the MSB of util_est.enqueued instead and keep the flag
+util_est internal, i.e. don't export it via _task_util_est().
+
+The maximal possible util_avg value for a task is 1024 so the MSB of
+'unsigned int util_est.enqueued' isn't used to store a util value.
+
+As a caveat the code behind the util_est_se trace point has to filter
+UTIL_AVG_UNCHANGED to see the real util_est.enqueued value which should
+be easy to do.
+
+This also fixes an issue report by Xuewen Yan that util_est_update()
+only used UTIL_AVG_UNCHANGED for the subtrahend of the equation:
+
+ last_enqueued_diff = ue.enqueued - (task_util() | UTIL_AVG_UNCHANGED)
+
+Fixes: b89997aa88f0b sched/pelt: Fix task util_est update filtering
+Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Reviewed-by: Vincent Donnefort <vincent.donnefort@arm.com>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Link: https://lore.kernel.org/r/20210602145808.1562603-1-dietmar.eggemann@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/sched.h | 8 ++++++++
+ kernel/sched/debug.c | 3 ++-
+ kernel/sched/fair.c | 5 +++--
+ kernel/sched/pelt.h | 11 +----------
+ 4 files changed, 14 insertions(+), 13 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -348,11 +348,19 @@ struct load_weight {
+ * Only for tasks we track a moving average of the past instantaneous
+ * estimated utilization. This allows to absorb sporadic drops in utilization
+ * of an otherwise almost periodic task.
++ *
++ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
++ * updates. When a task is dequeued, its util_est should not be updated if its
++ * util_avg has not been updated in the meantime.
++ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
++ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
++ * for a task) it is safe to use MSB.
+ */
+ struct util_est {
+ unsigned int enqueued;
+ unsigned int ewma;
+ #define UTIL_EST_WEIGHT_SHIFT 2
++#define UTIL_AVG_UNCHANGED 0x80000000
+ } __attribute__((__aligned__(sizeof(u64))));
+
+ /*
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -888,6 +888,7 @@ __initcall(init_sched_debug_procfs);
+ #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
+ #define __P(F) __PS(#F, F)
+ #define P(F) __PS(#F, p->F)
++#define PM(F, M) __PS(#F, p->F & (M))
+ #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
+ #define __PN(F) __PSN(#F, F)
+ #define PN(F) __PSN(#F, p->F)
+@@ -1014,7 +1015,7 @@ void proc_sched_show_task(struct task_st
+ P(se.avg.util_avg);
+ P(se.avg.last_update_time);
+ P(se.avg.util_est.ewma);
+- P(se.avg.util_est.enqueued);
++ PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
+ #endif
+ #ifdef CONFIG_UCLAMP_TASK
+ __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3903,7 +3903,7 @@ static inline unsigned long _task_util_e
+ {
+ struct util_est ue = READ_ONCE(p->se.avg.util_est);
+
+- return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
++ return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
+ }
+
+ static inline unsigned long task_util_est(struct task_struct *p)
+@@ -4003,7 +4003,7 @@ static inline void util_est_update(struc
+ * Reset EWMA on utilization increases, the moving average is used only
+ * to smooth utilization decreases.
+ */
+- ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
++ ue.enqueued = task_util(p);
+ if (sched_feat(UTIL_EST_FASTUP)) {
+ if (ue.ewma < ue.enqueued) {
+ ue.ewma = ue.enqueued;
+@@ -4052,6 +4052,7 @@ static inline void util_est_update(struc
+ ue.ewma += last_ewma_diff;
+ ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
+ done:
++ ue.enqueued |= UTIL_AVG_UNCHANGED;
+ WRITE_ONCE(p->se.avg.util_est, ue);
+
+ trace_sched_util_est_se_tp(&p->se);
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struc
+ return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+
+-/*
+- * When a task is dequeued, its estimated utilization should not be update if
+- * its util_avg has not been updated at least once.
+- * This flag is used to synchronize util_avg updates with util_est updates.
+- * We map this information into the LSB bit of the utilization saved at
+- * dequeue time (i.e. util_est.dequeued).
+- */
+-#define UTIL_AVG_UNCHANGED 0x1
+-
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ unsigned int enqueued;
+@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(st
+ if (!sched_feat(UTIL_EST))
+ return;
+
+- /* Avoid store if the flag has been already set */
++ /* Avoid store if the flag has been already reset */
+ enqueued = avg->util_est.enqueued;
+ if (!(enqueued & UTIL_AVG_UNCHANGED))
+ return;
--- /dev/null
+From 7c7ad626d9a0ff0a36c1e2a3cfbbc6a13828d5eb Mon Sep 17 00:00:00 2001
+From: Vincent Guittot <vincent.guittot@linaro.org>
+Date: Thu, 27 May 2021 14:29:15 +0200
+Subject: sched/fair: Keep load_avg and load_sum synced
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+commit 7c7ad626d9a0ff0a36c1e2a3cfbbc6a13828d5eb upstream.
+
+when removing a cfs_rq from the list we only check _sum value so we must
+ensure that _avg and _sum stay synced so load_sum can't be null whereas
+load_avg is not after propagating load in the cgroup hierarchy.
+
+Use load_avg to compute load_sum similarly to what is done for util_sum
+and runnable_sum.
+
+Fixes: 0e2d2aaaae52 ("sched/fair: Rewrite PELT migration propagation")
+Reported-by: Odin Ugedal <odin@uged.al>
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Odin Ugedal <odin@uged.al>
+Link: https://lkml.kernel.org/r/20210527122916.27683-2-vincent.guittot@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3501,10 +3501,9 @@ update_tg_cfs_runnable(struct cfs_rq *cf
+ static inline void
+ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
+ {
+- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
++ long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ unsigned long load_avg;
+ u64 load_sum = 0;
+- s64 delta_sum;
+ u32 divider;
+
+ if (!runnable_sum)
+@@ -3551,13 +3550,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq
+ load_sum = (s64)se_weight(se) * runnable_sum;
+ load_avg = div_s64(load_sum, divider);
+
+- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
+- delta_avg = load_avg - se->avg.load_avg;
++ delta = load_avg - se->avg.load_avg;
+
+ se->avg.load_sum = runnable_sum;
+ se->avg.load_avg = load_avg;
+- add_positive(&cfs_rq->avg.load_avg, delta_avg);
+- add_positive(&cfs_rq->avg.load_sum, delta_sum);
++
++ add_positive(&cfs_rq->avg.load_avg, delta);
++ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+ }
+
+ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
--- /dev/null
+From 02da26ad5ed6ea8680e5d01f20661439611ed776 Mon Sep 17 00:00:00 2001
+From: Vincent Guittot <vincent.guittot@linaro.org>
+Date: Thu, 27 May 2021 14:29:16 +0200
+Subject: sched/fair: Make sure to update tg contrib for blocked load
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+commit 02da26ad5ed6ea8680e5d01f20661439611ed776 upstream.
+
+During the update of fair blocked load (__update_blocked_fair()), we
+update the contribution of the cfs in tg->load_avg if cfs_rq's pelt
+has decayed. Nevertheless, the pelt values of a cfs_rq could have
+been recently updated while propagating the change of a child. In this
+case, cfs_rq's pelt will not decayed because it has already been
+updated and we don't update tg->load_avg.
+
+__update_blocked_fair
+ ...
+ for_each_leaf_cfs_rq_safe: child cfs_rq
+ update cfs_rq_load_avg() for child cfs_rq
+ ...
+ update_load_avg(cfs_rq_of(se), se, 0)
+ ...
+ update cfs_rq_load_avg() for parent cfs_rq
+ -propagation of child's load makes parent cfs_rq->load_sum
+ becoming null
+ -UPDATE_TG is not set so it doesn't update parent
+ cfs_rq->tg_load_avg_contrib
+ ..
+ for_each_leaf_cfs_rq_safe: parent cfs_rq
+ update cfs_rq_load_avg() for parent cfs_rq
+ - nothing to do because parent cfs_rq has already been updated
+ recently so cfs_rq->tg_load_avg_contrib is not updated
+ ...
+ parent cfs_rq is decayed
+ list_del_leaf_cfs_rq parent cfs_rq
+ - but it still contibutes to tg->load_avg
+
+we must set UPDATE_TG flags when propagting pending load to the parent
+
+Fixes: 039ae8bcf7a5 ("sched/fair: Fix O(nr_cgroups) in the load balancing path")
+Reported-by: Odin Ugedal <odin@uged.al>
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Odin Ugedal <odin@uged.al>
+Link: https://lkml.kernel.org/r/20210527122916.27683-3-vincent.guittot@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7960,7 +7960,7 @@ static bool __update_blocked_fair(struct
+ /* Propagate pending load changes to the parent, if any: */
+ se = cfs_rq->tg->se[cpu];
+ if (se && !skip_blocked_update(se))
+- update_load_avg(cfs_rq_of(se), se, 0);
++ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
+
+ /*
+ * There can be a lot of idle CPU cgroups. Don't let fully
usb-fix-various-gadget-panics-on-10gbps-cabling.patch
usb-typec-tcpm-cancel-vdm-and-state-machine-hrtimer-when-unregister-tcpm-port.patch
usb-typec-tcpm-cancel-frs-hrtimer-when-unregister-tcpm-port.patch
+regulator-core-resolve-supply-for-boot-on-always-on-regulators.patch
+regulator-max77620-use-device_set_of_node_from_dev.patch
+regulator-bd718x7-fix-the-buck7-voltage-setting-on-bd71837.patch
+regulator-fan53880-fix-missing-n_voltages-setting.patch
+regulator-bd71828-fix-.n_voltages-settings.patch
+regulator-rtmv20-fix-.set_current_limit-.get_current_limit-callbacks.patch
+phy-usb-fix-misuse-of-is_enabled.patch
+usb-dwc3-gadget-disable-gadget-irq-during-pullup-disable.patch
+usb-typec-mux-fix-copy-paste-mistake-in-typec_mux_match.patch
+drm-mcde-fix-off-by-10-3-in-calculation.patch
+drm-msm-a6xx-fix-incorrectly-set-uavflagprd_inv-field-for-a650.patch
+drm-msm-a6xx-update-fix-cp_protect-initialization.patch
+drm-msm-a6xx-avoid-shadow-null-reference-in-failure-path.patch
+rdma-ipoib-fix-warning-caused-by-destroying-non-initial-netns.patch
+rdma-mlx4-do-not-map-the-core_clock-page-to-user-space-unless-enabled.patch
+arm-cpuidle-avoid-orphan-section-warning.patch
+vmlinux.lds.h-avoid-orphan-section-with-smp.patch
+tools-bootconfig-fix-error-return-code-in-apply_xbc.patch
+phy-cadence-sierra-fix-error-return-code-in-cdns_sierra_phy_probe.patch
+asoc-core-fix-null-point-dereference-in-fmt_single_name.patch
+asoc-meson-gx-card-fix-sound-dai-dt-schema.patch
+phy-ti-fix-an-error-code-in-wiz_probe.patch
+gpio-wcd934x-fix-shift-out-of-bounds-error.patch
+perf-fix-data-race-between-pin_count-increment-decrement.patch
+sched-fair-keep-load_avg-and-load_sum-synced.patch
+sched-fair-make-sure-to-update-tg-contrib-for-blocked-load.patch
+sched-fair-fix-util_est-util_avg_unchanged-handling.patch
+x86-nmi_watchdog-fix-old-style-nmi-watchdog-regression-on-old-intel-cpus.patch
+kvm-x86-ensure-liveliness-of-nested-vm-enter-fail-tracepoint-message.patch
+ib-mlx5-fix-initializing-cq-fragments-buffer.patch
--- /dev/null
+From e8ba0b2b64126381643bb50df3556b139a60545a Mon Sep 17 00:00:00 2001
+From: Zhen Lei <thunder.leizhen@huawei.com>
+Date: Sat, 8 May 2021 11:42:16 +0800
+Subject: tools/bootconfig: Fix error return code in apply_xbc()
+
+From: Zhen Lei <thunder.leizhen@huawei.com>
+
+commit e8ba0b2b64126381643bb50df3556b139a60545a upstream.
+
+Fix to return a negative error code from the error handling case instead
+of 0, as done elsewhere in this function.
+
+Link: https://lkml.kernel.org/r/20210508034216.2277-1-thunder.leizhen@huawei.com
+
+Fixes: a995e6bc0524 ("tools/bootconfig: Fix to check the write failure correctly")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/bootconfig/main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, c
+ }
+ /* TODO: Ensure the @path is initramfs/initrd image */
+ if (fstat(fd, &stat) < 0) {
++ ret = -errno;
+ pr_err("Failed to get the size of %s\n", path);
+ goto out;
+ }
--- /dev/null
+From 8212937305f84ef73ea81036dafb80c557583d4b Mon Sep 17 00:00:00 2001
+From: Wesley Cheng <wcheng@codeaurora.org>
+Date: Thu, 20 May 2021 21:23:57 -0700
+Subject: usb: dwc3: gadget: Disable gadget IRQ during pullup disable
+
+From: Wesley Cheng <wcheng@codeaurora.org>
+
+commit 8212937305f84ef73ea81036dafb80c557583d4b upstream.
+
+Current sequence utilizes dwc3_gadget_disable_irq() alongside
+synchronize_irq() to ensure that no further DWC3 events are generated.
+However, the dwc3_gadget_disable_irq() API only disables device
+specific events. Endpoint events can still be generated. Briefly
+disable the interrupt line, so that the cleanup code can run to
+prevent device and endpoint events. (i.e. __dwc3_gadget_stop() and
+dwc3_stop_active_transfers() respectively)
+
+Without doing so, it can lead to both the interrupt handler and the
+pullup disable routine both writing to the GEVNTCOUNT register, which
+will cause an incorrect count being read from future interrupts.
+
+Fixes: ae7e86108b12 ("usb: dwc3: Stop active transfers before halting the controller")
+Signed-off-by: Wesley Cheng <wcheng@codeaurora.org>
+Link: https://lore.kernel.org/r/1621571037-1424-1-git-send-email-wcheng@codeaurora.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2143,13 +2143,10 @@ static int dwc3_gadget_pullup(struct usb
+ }
+
+ /*
+- * Synchronize any pending event handling before executing the controller
+- * halt routine.
++ * Synchronize and disable any further event handling while controller
++ * is being enabled/disabled.
+ */
+- if (!is_on) {
+- dwc3_gadget_disable_irq(dwc);
+- synchronize_irq(dwc->irq_gadget);
+- }
++ disable_irq(dwc->irq_gadget);
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+@@ -2187,6 +2184,8 @@ static int dwc3_gadget_pullup(struct usb
+
+ ret = dwc3_gadget_run_stop(dwc, is_on, false);
+ spin_unlock_irqrestore(&dwc->lock, flags);
++ enable_irq(dwc->irq_gadget);
++
+ pm_runtime_put(dwc->dev);
+
+ return ret;
--- /dev/null
+From 142d0b24c1b17139f1aaaacae7542a38aa85640f Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Wed, 9 Jun 2021 17:21:32 -0700
+Subject: usb: typec: mux: Fix copy-paste mistake in typec_mux_match
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 142d0b24c1b17139f1aaaacae7542a38aa85640f upstream.
+
+Fix the copy-paste mistake in the return path of typec_mux_match(),
+where dev is considered a member of struct typec_switch rather than
+struct typec_mux.
+
+The two structs are identical in regards to having the struct device as
+the first entry, so this provides no functional change.
+
+Fixes: 3370db35193b ("usb: typec: Registering real device entries for the muxes")
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/20210610002132.3088083-1-bjorn.andersson@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/mux.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/typec/mux.c
++++ b/drivers/usb/typec/mux.c
+@@ -239,7 +239,7 @@ find_mux:
+ dev = class_find_device(&typec_mux_class, NULL, fwnode,
+ mux_fwnode_match);
+
+- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
++ return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
+ }
+
+ /**
--- /dev/null
+From d4c6399900364facd84c9e35ce1540b6046c345f Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Wed, 5 May 2021 17:14:11 -0700
+Subject: vmlinux.lds.h: Avoid orphan section with !SMP
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit d4c6399900364facd84c9e35ce1540b6046c345f upstream.
+
+With x86_64_defconfig and the following configs, there is an orphan
+section warning:
+
+CONFIG_SMP=n
+CONFIG_AMD_MEM_ENCRYPT=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_KVM=y
+CONFIG_PARAVIRT=y
+
+ld: warning: orphan section `.data..decrypted' from `arch/x86/kernel/cpu/vmware.o' being placed in section `.data..decrypted'
+ld: warning: orphan section `.data..decrypted' from `arch/x86/kernel/kvm.o' being placed in section `.data..decrypted'
+
+These sections are created with DEFINE_PER_CPU_DECRYPTED, which
+ultimately turns into __PCPU_ATTRS, which in turn has a section
+attribute with a value of PER_CPU_BASE_SECTION + the section name. When
+CONFIG_SMP is not set, the base section is .data and that is not
+currently handled in any linker script.
+
+Add .data..decrypted to PERCPU_DECRYPTED_SECTION, which is included in
+PERCPU_INPUT -> PERCPU_SECTION, which is include in the x86 linker
+script when either CONFIG_X86_64 or CONFIG_SMP is unset, taking care of
+the warning.
+
+Fixes: ac26963a1175 ("percpu: Introduce DEFINE_PER_CPU_DECRYPTED")
+Link: https://github.com/ClangBuiltLinux/linux/issues/1360
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Tested-by: Nick Desaulniers <ndesaulniers@google.com> # build
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20210506001410.1026691-1-nathan@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/vmlinux.lds.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -971,6 +971,7 @@
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ #define PERCPU_DECRYPTED_SECTION \
+ . = ALIGN(PAGE_SIZE); \
++ *(.data..decrypted) \
+ *(.data..percpu..decrypted) \
+ . = ALIGN(PAGE_SIZE);
+ #else
--- /dev/null
+From a8383dfb2138742a1bb77b481ada047aededa2ba Mon Sep 17 00:00:00 2001
+From: CodyYao-oc <CodyYao-oc@zhaoxin.com>
+Date: Mon, 7 Jun 2021 10:53:35 +0800
+Subject: x86/nmi_watchdog: Fix old-style NMI watchdog regression on old Intel CPUs
+
+From: CodyYao-oc <CodyYao-oc@zhaoxin.com>
+
+commit a8383dfb2138742a1bb77b481ada047aededa2ba upstream.
+
+The following commit:
+
+ 3a4ac121c2ca ("x86/perf: Add hardware performance events support for Zhaoxin CPU.")
+
+Got the old-style NMI watchdog logic wrong and broke it for basically every
+Intel CPU where it was active. Which is only truly old CPUs, so few people noticed.
+
+On CPUs with perf events support we turn off the old-style NMI watchdog, so it
+was pretty pointless to add the logic for X86_VENDOR_ZHAOXIN to begin with ... :-/
+
+Anyway, the fix is to restore the old logic and add a 'break'.
+
+[ mingo: Wrote a new changelog. ]
+
+Fixes: 3a4ac121c2ca ("x86/perf: Add hardware performance events support for Zhaoxin CPU.")
+Signed-off-by: CodyYao-oc <CodyYao-oc@zhaoxin.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20210607025335.9643-1-CodyYao-oc@zhaoxin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/perfctr-watchdog.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
++++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
+@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_m
+ case 15:
+ return msr - MSR_P4_BPU_PERFCTR0;
+ }
+- fallthrough;
++ break;
+ case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
+ return msr - MSR_ARCH_PERFMON_PERFCTR0;
+@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_m
+ case 15:
+ return msr - MSR_P4_BSU_ESCR0;
+ }
+- fallthrough;
++ break;
+ case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
+ return msr - MSR_ARCH_PERFMON_EVENTSEL0;