--- /dev/null
+From d3eb70ead6474ec16f976fcacf10a7a890a95bd3 Mon Sep 17 00:00:00 2001
+From: Pingfan Liu <kernelfans@gmail.com>
+Date: Fri, 12 Nov 2021 13:22:14 +0800
+Subject: arm64: mm: Fix VM_BUG_ON(mm != &init_mm) for trans_pgd
+
+From: Pingfan Liu <kernelfans@gmail.com>
+
+commit d3eb70ead6474ec16f976fcacf10a7a890a95bd3 upstream.
+
+trans_pgd_create_copy() can hit "VM_BUG_ON(mm != &init_mm)" in the
+function pmd_populate_kernel().
+
+This is the combined consequence of commit 5de59884ac0e ("arm64:
+trans_pgd: pass NULL instead of init_mm to *_populate functions"), which
+replaced &init_mm with NULL and commit 59511cfd08f3 ("arm64: mm: use XN
+table mapping attributes for user/kernel mappings"), which introduced
+the VM_BUG_ON.
+
+Since the former sounds reasonable, it is better to work on the later.
+From the perspective of trans_pgd, two groups of functions are
+considered in the later one:
+
+ pmd_populate_kernel()
+ mm == NULL should be fixed, else it hits VM_BUG_ON()
+ p?d_populate()
+ mm == NULL means PXN, that is OK, since trans_pgd only copies a
+ linear map, no execution will happen on the map.
+
+So it is good enough to just relax VM_BUG_ON() to disregard mm == NULL
+
+Fixes: 59511cfd08f3 ("arm64: mm: use XN table mapping attributes for user/kernel mappings")
+Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
+Cc: <stable@vger.kernel.org> # 5.13.x
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: James Morse <james.morse@arm.com>
+Cc: Matthias Brugger <mbrugger@suse.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Link: https://lore.kernel.org/r/20211112052214.9086-1-kernelfans@gmail.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/pgalloc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/pgalloc.h
++++ b/arch/arm64/include/asm/pgalloc.h
+@@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t
+ static inline void
+ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+ {
+- VM_BUG_ON(mm != &init_mm);
++ VM_BUG_ON(mm && mm != &init_mm);
+ __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
+ }
+
--- /dev/null
+From ed38eb49d101e829ae0f8c0a0d3bf5cb6bcbc6b2 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 17 Nov 2021 14:57:31 +0100
+Subject: cpufreq: intel_pstate: Fix active mode offline/online EPP handling
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit ed38eb49d101e829ae0f8c0a0d3bf5cb6bcbc6b2 upstream.
+
+After commit 4adcf2e5829f ("cpufreq: intel_pstate: Add ->offline and
+->online callbacks") the EPP value set by the "performance" scaling
+algorithm in the active mode is not restored after an offline/online
+cycle which replaces it with the saved EPP value coming from user
+space.
+
+Address this issue by forcing intel_pstate_hwp_set() to set a new
+EPP value when it runs first time after online.
+
+Fixes: 4adcf2e5829f ("cpufreq: intel_pstate: Add ->offline and ->online callbacks")
+Link: https://lore.kernel.org/linux-pm/adc7132c8655bd4d1c8b6129578e931a14fe1db2.camel@linux.intel.com/
+Reported-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: 5.9+ <stable@vger.kernel.org> # 5.9+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -999,6 +999,12 @@ static void intel_pstate_hwp_offline(str
+ */
+ value &= ~GENMASK_ULL(31, 24);
+ value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
++ /*
++ * However, make sure that EPP will be set to "performance" when
++ * the CPU is brought back online again and the "performance"
++ * scaling algorithm is still in effect.
++ */
++ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+ }
+
+ /*
--- /dev/null
+From 4d62555f624582e60be416fbc4772cd3fcd12b1a Mon Sep 17 00:00:00 2001
+From: Philip Yang <Philip.Yang@amd.com>
+Date: Fri, 12 Nov 2021 19:05:08 -0500
+Subject: drm/amdgpu: IH process reset count when restart
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Philip Yang <Philip.Yang@amd.com>
+
+commit 4d62555f624582e60be416fbc4772cd3fcd12b1a upstream.
+
+Otherwise when IH process restart, count is zero, the loop will
+not exit to wake_up_all after processing AMDGPU_IH_MAX_NUM_IVS
+interrupts.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Philip Yang <Philip.Yang@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+@@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process
+ */
+ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+ {
+- unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
++ unsigned int count;
+ u32 wptr;
+
+ if (!ih->enabled || adev->shutdown)
+@@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_devi
+ wptr = amdgpu_ih_get_wptr(adev, ih);
+
+ restart_ih:
++ count = AMDGPU_IH_MAX_NUM_IVS;
+ DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+
+ /* Order reading of wptr vs. reading of IH ring data */
--- /dev/null
+From d5c7255dc7ff6e1239d794b9c53029d83ced04ca Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 23 Nov 2021 11:36:01 -0500
+Subject: drm/amdgpu/pm: fix powerplay OD interface
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit d5c7255dc7ff6e1239d794b9c53029d83ced04ca upstream.
+
+The overclocking interface currently appends data to a
+string. Revert back to using sprintf().
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1774
+Fixes: 6db0c87a0a8ee1 ("amdgpu/pm: Replace hwmgr smu usage of sprintf with sysfs_emit")
+Acked-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c | 20 ++----
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 24 +++----
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c | 6 -
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 28 ++++----
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c | 10 +--
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c | 58 ++++++++----------
+ 6 files changed, 67 insertions(+), 79 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(stru
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
+
+- phm_get_sysfs_buf(&buf, &size);
+-
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
+@@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(stru
+ else
+ i = 1;
+
+- size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
++ size += sprintf(buf + size, "0: %uMhz %s\n",
+ data->gfx_min_freq_limit/100,
+ i == 0 ? "*" : "");
+- size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
++ size += sprintf(buf + size, "1: %uMhz %s\n",
+ i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+- size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
++ size += sprintf(buf + size, "2: %uMhz %s\n",
+ data->gfx_max_freq_limit/100,
+ i == 2 ? "*" : "");
+ break;
+@@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(stru
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
+
+ for (i = 0; i < mclk_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i,
+ mclk_table->entries[i].clk / 100,
+ ((mclk_table->entries[i].clk / 100)
+@@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(stru
+ if (ret)
+ return ret;
+
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
++ size += sprintf(buf + size, "0: %10uMhz\n",
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
++ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
+ }
+ break;
+@@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(stru
+ if (ret)
+ return ret;
+
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
++ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ min_freq, max_freq);
+ }
+ break;
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struc
+ int size = 0;
+ uint32_t i, now, clock, pcie_speed;
+
+- phm_get_sysfs_buf(&buf, &size);
+-
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
+@@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struc
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+@@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struc
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+@@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struc
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
++ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
+@@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struc
+ break;
+ case OD_SCLK:
+ if (hwmgr->od_enabled) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+ for (i = 0; i < odn_sclk_table->num_of_pl; i++)
+- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
++ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+ i, odn_sclk_table->entries[i].clock/100,
+ odn_sclk_table->entries[i].vddc);
+ }
+ break;
+ case OD_MCLK:
+ if (hwmgr->od_enabled) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
++ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
+ for (i = 0; i < odn_mclk_table->num_of_pl; i++)
+- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
++ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+ i, odn_mclk_table->entries[i].clock/100,
+ odn_mclk_table->entries[i].vddc);
+ }
+ break;
+ case OD_RANGE:
+ if (hwmgr->od_enabled) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
++ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
++ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
++ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+ data->odn_dpm_table.min_vddc,
+ data->odn_dpm_table.max_vddc);
+ }
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struc
+ uint32_t i, now;
+ int size = 0;
+
+- phm_get_sysfs_buf(&buf, &size);
+-
+ switch (type) {
+ case PP_SCLK:
+ now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
+@@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struc
+ CURR_SCLK_INDEX);
+
+ for (i = 0; i < sclk_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->entries[i].clk / 100,
+ (i == now) ? "*" : "");
+ break;
+@@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struc
+ CURR_MCLK_INDEX);
+
+ for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
+ (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
+ break;
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(str
+
+ int i, now, size = 0, count = 0;
+
+- phm_get_sysfs_buf(&buf, &size);
+-
+ switch (type) {
+ case PP_SCLK:
+ if (data->registry_data.sclk_dpm_key_disabled)
+@@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(str
+ else
+ count = sclk_table->count;
+ for (i = 0; i < count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+@@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(str
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
+
+ for (i = 0; i < mclk_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+@@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(str
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
+
+ for (i = 0; i < soc_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, soc_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+@@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(str
+ PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
+
+ for (i = 0; i < dcef_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, dcef_table->dpm_levels[i].value / 100,
+ (dcef_table->dpm_levels[i].value / 100 == now) ?
+ "*" : "");
+@@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(str
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+
+- size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
++ size += sprintf(buf + size, "%d: %s %s %s\n", i,
+ (gen_speed == 0) ? "2.5GT/s," :
+ (gen_speed == 1) ? "5.0GT/s," :
+ (gen_speed == 2) ? "8.0GT/s," :
+@@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(str
+
+ case OD_SCLK:
+ if (hwmgr->od_enabled) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
+ for (i = 0; i < podn_vdd_dep->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
++ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+ i, podn_vdd_dep->entries[i].clk / 100,
+ podn_vdd_dep->entries[i].vddc);
+ }
+ break;
+ case OD_MCLK:
+ if (hwmgr->od_enabled) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
++ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
+ for (i = 0; i < podn_vdd_dep->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
++ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+ i, podn_vdd_dep->entries[i].clk/100,
+ podn_vdd_dep->entries[i].vddc);
+ }
+ break;
+ case OD_RANGE:
+ if (hwmgr->od_enabled) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
++ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
++ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
++ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+ data->odn_dpm_table.min_vddc,
+ data->odn_dpm_table.max_vddc);
+ }
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(str
+ int i, now, size = 0;
+ struct pp_clock_levels_with_latency clocks;
+
+- phm_get_sysfs_buf(&buf, &size);
+-
+ switch (type) {
+ case PP_SCLK:
+ PP_ASSERT_WITH_CODE(
+@@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(str
+ "Attempt to get gfx clk levels Failed!",
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
+ break;
+@@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(str
+ "Attempt to get memory clk levels Failed!",
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
+ break;
+@@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(str
+ "Attempt to get soc clk levels Failed!",
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
+ break;
+@@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(str
+ "Attempt to get dcef clk levels Failed!",
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
+ break;
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(str
+ int ret = 0;
+ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+
+- phm_get_sysfs_buf(&buf, &size);
+-
+ switch (type) {
+ case PP_SCLK:
+ ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
+@@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(str
+ return ret);
+
+ if (vega20_get_sclks(hwmgr, &clocks)) {
+- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+@@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(str
+ return ret);
+
+ if (vega20_get_memclocks(hwmgr, &clocks)) {
+- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+@@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(str
+ return ret);
+
+ if (vega20_get_socclocks(hwmgr, &clocks)) {
+- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+@@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(str
+ return ret);
+
+ for (i = 0; i < fclk_dpm_table->count; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, fclk_dpm_table->dpm_levels[i].value,
+ fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
+ break;
+@@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(str
+ return ret);
+
+ if (vega20_get_dcefclocks(hwmgr, &clocks)) {
+- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
++ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
++ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+@@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(str
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+
+- size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
++ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (gen_speed == 0) ? "2.5GT/s," :
+ (gen_speed == 1) ? "5.0GT/s," :
+ (gen_speed == 2) ? "8.0GT/s," :
+@@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(str
+ case OD_SCLK:
+ if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
++ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
++ size += sprintf(buf + size, "0: %10uMhz\n",
+ od_table->GfxclkFmin);
+- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
++ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->GfxclkFmax);
+ }
+ break;
+
+ case OD_MCLK:
+ if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
++ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
++ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->UclkFmax);
+ }
+
+@@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(str
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
+- size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
++ size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
++ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq1,
+ od_table->GfxclkVolt1 / VOLTAGE_SCALE);
+- size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
++ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq2,
+ od_table->GfxclkVolt2 / VOLTAGE_SCALE);
+- size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
++ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq3,
+ od_table->GfxclkVolt3 / VOLTAGE_SCALE);
+ }
+@@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(str
+ break;
+
+ case OD_RANGE:
+- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
++ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+
+ if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
++ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
+ }
+
+ if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
+- size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
++ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
+ od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
+ }
+@@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(str
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
+- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
+- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
+- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
+- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
++ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
+- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
++ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
+ od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
+ }
--- /dev/null
+From 46741e4f593ff1bd0e4a140ab7e566701946484b Mon Sep 17 00:00:00 2001
+From: Ben Skeggs <bskeggs@redhat.com>
+Date: Thu, 18 Nov 2021 13:04:13 +1000
+Subject: drm/nouveau: recognise GA106
+
+From: Ben Skeggs <bskeggs@redhat.com>
+
+commit 46741e4f593ff1bd0e4a140ab7e566701946484b upstream.
+
+I've got HW now, appears to work as expected so far.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Cc: <stable@vger.kernel.org> # 5.14+
+Reviewed-by: Karol Herbst <kherbst@redhat.com>
+Signed-off-by: Karol Herbst <kherbst@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20211118030413.2610-1-skeggsb@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+@@ -2627,6 +2627,27 @@ nv174_chipset = {
+ };
+
+ static const struct nvkm_device_chip
++nv176_chipset = {
++ .name = "GA106",
++ .bar = { 0x00000001, tu102_bar_new },
++ .bios = { 0x00000001, nvkm_bios_new },
++ .devinit = { 0x00000001, ga100_devinit_new },
++ .fb = { 0x00000001, ga102_fb_new },
++ .gpio = { 0x00000001, ga102_gpio_new },
++ .i2c = { 0x00000001, gm200_i2c_new },
++ .imem = { 0x00000001, nv50_instmem_new },
++ .mc = { 0x00000001, ga100_mc_new },
++ .mmu = { 0x00000001, tu102_mmu_new },
++ .pci = { 0x00000001, gp100_pci_new },
++ .privring = { 0x00000001, gm200_privring_new },
++ .timer = { 0x00000001, gk20a_timer_new },
++ .top = { 0x00000001, ga100_top_new },
++ .disp = { 0x00000001, ga102_disp_new },
++ .dma = { 0x00000001, gv100_dma_new },
++ .fifo = { 0x00000001, ga102_fifo_new },
++};
++
++static const struct nvkm_device_chip
+ nv177_chipset = {
+ .name = "GA107",
+ .bar = { 0x00000001, tu102_bar_new },
+@@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_devic
+ case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
++ case 0x176: device->chip = &nv176_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
+ default:
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
--- /dev/null
+From 674ee8e1b4a41d2fdffc885c55350c3fbb38c22a Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 23 Nov 2021 01:45:35 +0000
+Subject: io_uring: correct link-list traversal locking
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 674ee8e1b4a41d2fdffc885c55350c3fbb38c22a upstream.
+
+As io_remove_next_linked() is now under ->timeout_lock (see
+io_link_timeout_fn), we should update locking around io_for_each_link()
+and io_match_task() to use the new lock.
+
+Cc: stable@kernel.org # 5.15+
+Fixes: 89850fce16a1a ("io_uring: run timeouts from task_work")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/b54541cedf7de59cb5ae36109e58529ca16e66aa.1637631883.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1430,10 +1430,10 @@ static void io_prep_async_link(struct io
+ if (req->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+- spin_lock(&ctx->completion_lock);
++ spin_lock_irq(&ctx->timeout_lock);
+ io_for_each_link(cur, req)
+ io_prep_async_work(cur);
+- spin_unlock(&ctx->completion_lock);
++ spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ io_for_each_link(cur, req)
+ io_prep_async_work(cur);
+@@ -5697,6 +5697,7 @@ static bool io_poll_remove_all(struct io
+ int posted = 0, i;
+
+ spin_lock(&ctx->completion_lock);
++ spin_lock_irq(&ctx->timeout_lock);
+ for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
+ struct hlist_head *list;
+
+@@ -5706,6 +5707,7 @@ static bool io_poll_remove_all(struct io
+ posted += io_poll_remove_one(req);
+ }
+ }
++ spin_unlock_irq(&ctx->timeout_lock);
+ spin_unlock(&ctx->completion_lock);
+
+ if (posted)
+@@ -9523,9 +9525,9 @@ static bool io_cancel_task_cb(struct io_
+ struct io_ring_ctx *ctx = req->ctx;
+
+ /* protect against races with linked timeouts */
+- spin_lock(&ctx->completion_lock);
++ spin_lock_irq(&ctx->timeout_lock);
+ ret = io_match_task(req, cancel->task, cancel->all);
+- spin_unlock(&ctx->completion_lock);
++ spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ ret = io_match_task(req, cancel->task, cancel->all);
+ }
+@@ -9539,12 +9541,14 @@ static bool io_cancel_defer_files(struct
+ LIST_HEAD(list);
+
+ spin_lock(&ctx->completion_lock);
++ spin_lock_irq(&ctx->timeout_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_task(de->req, task, cancel_all)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
++ spin_unlock_irq(&ctx->timeout_lock);
+ spin_unlock(&ctx->completion_lock);
+ if (list_empty(&list))
+ return false;
--- /dev/null
+From 617a89484debcd4e7999796d693cf0b77d2519de Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Fri, 26 Nov 2021 14:38:14 +0000
+Subject: io_uring: fail cancellation for EXITING tasks
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 617a89484debcd4e7999796d693cf0b77d2519de upstream.
+
+WARNING: CPU: 1 PID: 20 at fs/io_uring.c:6269 io_try_cancel_userdata+0x3c5/0x640 fs/io_uring.c:6269
+CPU: 1 PID: 20 Comm: kworker/1:0 Not tainted 5.16.0-rc1-syzkaller #0
+Workqueue: events io_fallback_req_func
+RIP: 0010:io_try_cancel_userdata+0x3c5/0x640 fs/io_uring.c:6269
+Call Trace:
+ <TASK>
+ io_req_task_link_timeout+0x6b/0x1e0 fs/io_uring.c:6886
+ io_fallback_req_func+0xf9/0x1ae fs/io_uring.c:1334
+ process_one_work+0x9b2/0x1690 kernel/workqueue.c:2298
+ worker_thread+0x658/0x11f0 kernel/workqueue.c:2445
+ kthread+0x405/0x4f0 kernel/kthread.c:327
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295
+ </TASK>
+
+We need original task's context to do cancellations, so if it's dying
+and the callback is executed in a fallback mode, fail the cancellation
+attempt.
+
+Fixes: 89b263f6d56e6 ("io_uring: run linked timeouts from task_work")
+Cc: stable@kernel.org # 5.15+
+Reported-by: syzbot+ab0cfe96c2b3cd1c1153@syzkaller.appspotmail.com
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/4c41c5f379c6941ad5a07cd48cb66ed62199cf7e.1637937097.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6886,10 +6886,11 @@ static inline struct file *io_file_get(s
+ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+ {
+ struct io_kiocb *prev = req->timeout.prev;
+- int ret;
++ int ret = -ENOENT;
+
+ if (prev) {
+- ret = io_try_cancel_userdata(req, prev->user_data);
++ if (!(req->task->flags & PF_EXITING))
++ ret = io_try_cancel_userdata(req, prev->user_data);
+ io_req_complete_post(req, ret ?: -ETIME, 0);
+ io_put_req(prev);
+ } else {
--- /dev/null
+From 6af3f48bf6156a7f02e91aca64e2927c4bebda03 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Fri, 26 Nov 2021 14:38:15 +0000
+Subject: io_uring: fix link traversal locking
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 6af3f48bf6156a7f02e91aca64e2927c4bebda03 upstream.
+
+WARNING: inconsistent lock state
+5.16.0-rc2-syzkaller #0 Not tainted
+inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
+ffff888078e11418 (&ctx->timeout_lock
+){?.+.}-{2:2}
+, at: io_timeout_fn+0x6f/0x360 fs/io_uring.c:5943
+{HARDIRQ-ON-W} state was registered at:
+ [...]
+ spin_unlock_irq include/linux/spinlock.h:399 [inline]
+ __io_poll_remove_one fs/io_uring.c:5669 [inline]
+ __io_poll_remove_one fs/io_uring.c:5654 [inline]
+ io_poll_remove_one+0x236/0x870 fs/io_uring.c:5680
+ io_poll_remove_all+0x1af/0x235 fs/io_uring.c:5709
+ io_ring_ctx_wait_and_kill+0x1cc/0x322 fs/io_uring.c:9534
+ io_uring_release+0x42/0x46 fs/io_uring.c:9554
+ __fput+0x286/0x9f0 fs/file_table.c:280
+ task_work_run+0xdd/0x1a0 kernel/task_work.c:164
+ exit_task_work include/linux/task_work.h:32 [inline]
+ do_exit+0xc14/0x2b40 kernel/exit.c:832
+
+674ee8e1b4a41 ("io_uring: correct link-list traversal locking") fixed a
+data race but introduced a possible deadlock and inconsistentcy in irq
+states. E.g.
+
+io_poll_remove_all()
+ spin_lock_irq(timeout_lock)
+ io_poll_remove_one()
+ spin_lock/unlock_irq(poll_lock);
+ spin_unlock_irq(timeout_lock)
+
+Another type of problem is freeing a request while holding
+->timeout_lock, which may leads to a deadlock in
+io_commit_cqring() -> io_flush_timeouts() and other places.
+
+Having 3 nested locks is also too ugly. Add io_match_task_safe(), which
+would briefly take and release timeout_lock for race prevention inside,
+so the actuall request cancellation / free / etc. code doesn't have it
+taken.
+
+Reported-by: syzbot+ff49a3059d49b0ca0eec@syzkaller.appspotmail.com
+Reported-by: syzbot+847f02ec20a6609a328b@syzkaller.appspotmail.com
+Reported-by: syzbot+3368aadcd30425ceb53b@syzkaller.appspotmail.com
+Reported-by: syzbot+51ce8887cdef77c9ac83@syzkaller.appspotmail.com
+Reported-by: syzbot+3cb756a49d2f394a9ee3@syzkaller.appspotmail.com
+Fixes: 674ee8e1b4a41 ("io_uring: correct link-list traversal locking")
+Cc: stable@kernel.org # 5.15+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/397f7ebf3f4171f1abe41f708ac1ecb5766f0b68.1637937097.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 60 ++++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 42 insertions(+), 18 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1204,6 +1204,7 @@ static void io_refs_resurrect(struct per
+
+ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all)
++ __must_hold(&req->ctx->timeout_lock)
+ {
+ struct io_kiocb *req;
+
+@@ -1219,6 +1220,44 @@ static bool io_match_task(struct io_kioc
+ return false;
+ }
+
++static bool io_match_linked(struct io_kiocb *head)
++{
++ struct io_kiocb *req;
++
++ io_for_each_link(req, head) {
++ if (req->flags & REQ_F_INFLIGHT)
++ return true;
++ }
++ return false;
++}
++
++/*
++ * As io_match_task() but protected against racing with linked timeouts.
++ * User must not hold timeout_lock.
++ */
++static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
++ bool cancel_all)
++{
++ bool matched;
++
++ if (task && head->task != task)
++ return false;
++ if (cancel_all)
++ return true;
++
++ if (head->flags & REQ_F_LINK_TIMEOUT) {
++ struct io_ring_ctx *ctx = head->ctx;
++
++ /* protect against races with linked timeouts */
++ spin_lock_irq(&ctx->timeout_lock);
++ matched = io_match_linked(head);
++ spin_unlock_irq(&ctx->timeout_lock);
++ } else {
++ matched = io_match_linked(head);
++ }
++ return matched;
++}
++
+ static inline void req_set_fail(struct io_kiocb *req)
+ {
+ req->flags |= REQ_F_FAIL;
+@@ -5697,17 +5736,15 @@ static bool io_poll_remove_all(struct io
+ int posted = 0, i;
+
+ spin_lock(&ctx->completion_lock);
+- spin_lock_irq(&ctx->timeout_lock);
+ for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
+ struct hlist_head *list;
+
+ list = &ctx->cancel_hash[i];
+ hlist_for_each_entry_safe(req, tmp, list, hash_node) {
+- if (io_match_task(req, tsk, cancel_all))
++ if (io_match_task_safe(req, tsk, cancel_all))
+ posted += io_poll_remove_one(req);
+ }
+ }
+- spin_unlock_irq(&ctx->timeout_lock);
+ spin_unlock(&ctx->completion_lock);
+
+ if (posted)
+@@ -9520,19 +9557,8 @@ static bool io_cancel_task_cb(struct io_
+ {
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_task_cancel *cancel = data;
+- bool ret;
+
+- if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
+- struct io_ring_ctx *ctx = req->ctx;
+-
+- /* protect against races with linked timeouts */
+- spin_lock_irq(&ctx->timeout_lock);
+- ret = io_match_task(req, cancel->task, cancel->all);
+- spin_unlock_irq(&ctx->timeout_lock);
+- } else {
+- ret = io_match_task(req, cancel->task, cancel->all);
+- }
+- return ret;
++ return io_match_task_safe(req, cancel->task, cancel->all);
+ }
+
+ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+@@ -9542,14 +9568,12 @@ static bool io_cancel_defer_files(struct
+ LIST_HEAD(list);
+
+ spin_lock(&ctx->completion_lock);
+- spin_lock_irq(&ctx->timeout_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+- if (io_match_task(de->req, task, cancel_all)) {
++ if (io_match_task_safe(de->req, task, cancel_all)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+- spin_unlock_irq(&ctx->timeout_lock);
+ spin_unlock(&ctx->completion_lock);
+ if (list_empty(&list))
+ return false;
--- /dev/null
+From d8af404ffce71448f29bbc19a05e3d095baf98eb Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Wed, 17 Nov 2021 17:59:01 -0800
+Subject: iomap: Fix inline extent handling in iomap_readpage
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit d8af404ffce71448f29bbc19a05e3d095baf98eb upstream.
+
+Before commit 740499c78408 ("iomap: fix the iomap_readpage_actor return
+value for inline data"), when hitting an IOMAP_INLINE extent,
+iomap_readpage_actor would report having read the entire page. Since
+then, it only reports having read the inline data (iomap->length).
+
+This will force iomap_readpage into another iteration, and the
+filesystem will report an unaligned hole after the IOMAP_INLINE extent.
+But iomap_readpage_actor (now iomap_readpage_iter) isn't prepared to
+deal with unaligned extents, it will get things wrong on filesystems
+with a block size smaller than the page size, and we'll eventually run
+into the following warning in iomap_iter_advance:
+
+ WARN_ON_ONCE(iter->processed > iomap_length(iter));
+
+Fix that by changing iomap_readpage_iter to return 0 when hitting an
+inline extent; this will cause iomap_iter to stop immediately.
+
+To fix readahead as well, change iomap_readahead_iter to pass on
+iomap_readpage_iter return values less than or equal to zero.
+
+Fixes: 740499c78408 ("iomap: fix the iomap_readpage_actor return value for inline data")
+Cc: stable@vger.kernel.org # v5.15+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/iomap/buffered-io.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -256,8 +256,13 @@ static loff_t iomap_readpage_iter(const
+ unsigned poff, plen;
+ sector_t sector;
+
+- if (iomap->type == IOMAP_INLINE)
+- return min(iomap_read_inline_data(iter, page), length);
++ if (iomap->type == IOMAP_INLINE) {
++ loff_t ret = iomap_read_inline_data(iter, page);
++
++ if (ret < 0)
++ return ret;
++ return 0;
++ }
+
+ /* zero post-eof blocks as the page may be mapped */
+ iop = iomap_page_create(iter->inode, page);
+@@ -370,6 +375,8 @@ static loff_t iomap_readahead_iter(const
+ ctx->cur_page_in_bio = false;
+ }
+ ret = iomap_readpage_iter(iter, ctx, done);
++ if (ret <= 0)
++ return ret;
+ }
+
+ return done;
--- /dev/null
+From 1ec72153ff434ce75bace3044dc89a23a05d7064 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sun, 21 Nov 2021 11:32:39 +0900
+Subject: ksmbd: contain default data stream even if xattr is empty
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 1ec72153ff434ce75bace3044dc89a23a05d7064 upstream.
+
+If xattr is not supported like exfat or fat, ksmbd server doesn't
+contain default data stream in FILE_STREAM_INFORMATION response. It will
+cause ppt or doc file update issue if local filesystem is such as ones.
+This patch move goto statement to contain it.
+
+Fixes: 9f6323311c70 ("ksmbd: add default data stream name in FILE_STREAM_INFORMATION")
+Cc: stable@vger.kernel.org # v5.15
+Acked-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -4450,6 +4450,12 @@ static void get_file_stream_info(struct
+ &stat);
+ file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+
++ buf_free_len =
++ smb2_calc_max_out_buf_len(work, 8,
++ le32_to_cpu(req->OutputBufferLength));
++ if (buf_free_len < 0)
++ goto out;
++
+ xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+ if (xattr_list_len < 0) {
+ goto out;
+@@ -4458,12 +4464,6 @@ static void get_file_stream_info(struct
+ goto out;
+ }
+
+- buf_free_len =
+- smb2_calc_max_out_buf_len(work, 8,
+- le32_to_cpu(req->OutputBufferLength));
+- if (buf_free_len < 0)
+- goto out;
+-
+ while (idx < xattr_list_len) {
+ stream_name = xattr_list + idx;
+ streamlen = strlen(stream_name);
+@@ -4507,6 +4507,7 @@ static void get_file_stream_info(struct
+ file_info->NextEntryOffset = cpu_to_le32(next);
+ }
+
++out:
+ if (!S_ISDIR(stat.mode) &&
+ buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
+ file_info = (struct smb2_file_stream_info *)
+@@ -4515,14 +4516,13 @@ static void get_file_stream_info(struct
+ "::$DATA", 7, conn->local_nls, 0);
+ streamlen *= 2;
+ file_info->StreamNameLength = cpu_to_le32(streamlen);
+- file_info->StreamSize = 0;
+- file_info->StreamAllocationSize = 0;
++ file_info->StreamSize = cpu_to_le64(stat.size);
++ file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
+ nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
+ }
+
+ /* last entry offset should be 0 */
+ file_info->NextEntryOffset = 0;
+-out:
+ kvfree(xattr_list);
+
+ rsp->OutputBufferLength = cpu_to_le32(nbytes);
--- /dev/null
+From 8e537d1465e7401f352a6e0a728a93f8cad5294a Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sun, 21 Nov 2021 07:48:45 +0900
+Subject: ksmbd: downgrade addition info error msg to debug in smb2_get_info_sec()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 8e537d1465e7401f352a6e0a728a93f8cad5294a upstream.
+
+While file transfer through windows client, This error flood message
+happen. This flood message will cause performance degradation and
+misunderstand server has problem.
+
+Fixes: e294f78d3478 ("ksmbd: allow PROTECTED_DACL_SECINFO and UNPROTECTED_DACL_SECINFO addition information in smb2 set info security")
+Cc: stable@vger.kernel.org # v5.15
+Acked-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -5060,7 +5060,7 @@ static int smb2_get_info_sec(struct ksmb
+ if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
+ PROTECTED_DACL_SECINFO |
+ UNPROTECTED_DACL_SECINFO)) {
+- pr_err("Unsupported addition info: 0x%x)\n",
++ ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
+ addition_info);
+
+ pntsd->revision = cpu_to_le16(1);
--- /dev/null
+From 178ca6f85aa3231094467691f5ea1ff2f398aa8d Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 24 Nov 2021 10:23:02 +0900
+Subject: ksmbd: fix memleak in get_file_stream_info()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 178ca6f85aa3231094467691f5ea1ff2f398aa8d upstream.
+
+Fix memleak in get_file_stream_info()
+
+Fixes: 34061d6b76a4 ("ksmbd: validate OutputBufferLength of QUERY_DIR, QUERY_INFO, IOCTL requests")
+Cc: stable@vger.kernel.org # v5.15
+Reported-by: Coverity Scan <scan-admin@coverity.com>
+Acked-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -4489,8 +4489,10 @@ static void get_file_stream_info(struct
+ ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
+
+ next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
+- if (next > buf_free_len)
++ if (next > buf_free_len) {
++ kfree(stream_buf);
+ break;
++ }
+
+ file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
+ streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
--- /dev/null
+From cf0b0e3712f7af90006f8317ff27278094c2c128 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Fri, 19 Nov 2021 13:16:27 +1000
+Subject: KVM: PPC: Book3S HV: Prevent POWER7/8 TLB flush flushing SLB
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit cf0b0e3712f7af90006f8317ff27278094c2c128 upstream.
+
+The POWER9 ERAT flush instruction is a SLBIA with IH=7, which is a
+reserved value on POWER7/8. On POWER8 this invalidates the SLB entries
+above index 0, similarly to SLBIA IH=0.
+
+If the SLB entries are invalidated, and then the guest is bypassed, the
+host SLB does not get re-loaded, so the bolted entries above 0 will be
+lost. This can result in kernel stack access causing a SLB fault.
+
+Kernel stack access causing a SLB fault was responsible for the infamous
+mega bug (search "Fix SLB reload bug"). Although since commit
+48e7b7695745 ("powerpc/64s/hash: Convert SLB miss handlers to C") that
+starts using the kernel stack in the SLB miss handler, it might only
+result in an infinite loop of SLB faults. In any case it's a bug.
+
+Fix this by only executing the instruction on >= POWER9 where IH=7 is
+defined not to invalidate the SLB. POWER7/8 don't require this ERAT
+flush.
+
+Fixes: 500871125920 ("KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20211119031627.577853-1-npiggin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_hv_builtin.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_builtin.c
++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
+@@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *
+ "r" (0) : "memory");
+ }
+ asm volatile("ptesync": : :"memory");
++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
+ } else {
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+@@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *
+ rb += PPC_BIT(51); /* increment set number */
+ }
+ asm volatile("ptesync": : :"memory");
+- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
++ if (cpu_has_feature(CPU_FTR_ARCH_300))
++ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ }
+ }
+
--- /dev/null
+From 9dbe33cf371bd70330858370bdbc35c7668f00c3 Mon Sep 17 00:00:00 2001
+From: Dylan Hung <dylan_hung@aspeedtech.com>
+Date: Thu, 25 Nov 2021 10:44:32 +0800
+Subject: mdio: aspeed: Fix "Link is Down" issue
+
+From: Dylan Hung <dylan_hung@aspeedtech.com>
+
+commit 9dbe33cf371bd70330858370bdbc35c7668f00c3 upstream.
+
+The issue happened randomly in runtime. The message "Link is Down" is
+popped but soon it recovered to "Link is Up".
+
+The "Link is Down" results from the incorrect read data for reading the
+PHY register via MDIO bus. The correct sequence for reading the data
+shall be:
+1. fire the command
+2. wait for command done (this step was missing)
+3. wait for data idle
+4. read data from data register
+
+Cc: stable@vger.kernel.org
+Fixes: f160e99462c6 ("net: phy: Add mdio-aspeed")
+Reviewed-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Dylan Hung <dylan_hung@aspeedtech.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://lore.kernel.org/r/20211125024432.15809-1-dylan_hung@aspeedtech.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/mdio/mdio-aspeed.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/mdio/mdio-aspeed.c
++++ b/drivers/net/mdio/mdio-aspeed.c
+@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_b
+
+ iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+
++ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
++ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
++ ASPEED_MDIO_INTERVAL_US,
++ ASPEED_MDIO_TIMEOUT_US);
++ if (rc < 0)
++ return rc;
++
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
+ data & ASPEED_MDIO_DATA_IDLE,
+ ASPEED_MDIO_INTERVAL_US,
--- /dev/null
+From adab993c25191b839b415781bdc7173a77315240 Mon Sep 17 00:00:00 2001
+From: Tim Harvey <tharvey@gateworks.com>
+Date: Wed, 3 Nov 2021 09:54:15 -0700
+Subject: mmc: sdhci-esdhc-imx: disable CMDQ support
+
+From: Tim Harvey <tharvey@gateworks.com>
+
+commit adab993c25191b839b415781bdc7173a77315240 upstream.
+
+On IMX SoC's which support CMDQ the following can occur during high a
+high cpu load:
+
+mmc2: cqhci: ============ CQHCI REGISTER DUMP ===========
+mmc2: cqhci: Caps: 0x0000310a | Version: 0x00000510
+mmc2: cqhci: Config: 0x00001001 | Control: 0x00000000
+mmc2: cqhci: Int stat: 0x00000000 | Int enab: 0x00000006
+mmc2: cqhci: Int sig: 0x00000006 | Int Coal: 0x00000000
+mmc2: cqhci: TDL base: 0x8003f000 | TDL up32: 0x00000000
+mmc2: cqhci: Doorbell: 0xbf01dfff | TCN: 0x00000000
+mmc2: cqhci: Dev queue: 0x00000000 | Dev Pend: 0x08000000
+mmc2: cqhci: Task clr: 0x00000000 | SSC1: 0x00011000
+mmc2: cqhci: SSC2: 0x00000001 | DCMD rsp: 0x00000800
+mmc2: cqhci: RED mask: 0xfdf9a080 | TERRI: 0x00000000
+mmc2: cqhci: Resp idx: 0x0000000d | Resp arg: 0x00000000
+mmc2: sdhci: ============ SDHCI REGISTER DUMP ===========
+mmc2: sdhci: Sys addr: 0x7c722000 | Version: 0x00000002
+mmc2: sdhci: Blk size: 0x00000200 | Blk cnt: 0x00000020
+mmc2: sdhci: Argument: 0x00018000 | Trn mode: 0x00000023
+mmc2: sdhci: Present: 0x01f88008 | Host ctl: 0x00000030
+mmc2: sdhci: Power: 0x00000002 | Blk gap: 0x00000080
+mmc2: sdhci: Wake-up: 0x00000008 | Clock: 0x0000000f
+mmc2: sdhci: Timeout: 0x0000008f | Int stat: 0x00000000
+mmc2: sdhci: Int enab: 0x107f4000 | Sig enab: 0x107f4000
+mmc2: sdhci: ACmd stat: 0x00000000 | Slot int: 0x00000502
+mmc2: sdhci: Caps: 0x07eb0000 | Caps_1: 0x8000b407
+mmc2: sdhci: Cmd: 0x00000d1a | Max curr: 0x00ffffff
+mmc2: sdhci: Resp[0]: 0x00000000 | Resp[1]: 0xffc003ff
+mmc2: sdhci: Resp[2]: 0x328f5903 | Resp[3]: 0x00d07f01
+mmc2: sdhci: Host ctl2: 0x00000088
+mmc2: sdhci: ADMA Err: 0x00000000 | ADMA Ptr: 0xfe179020
+mmc2: sdhci-esdhc-imx: ========= ESDHC IMX DEBUG STATUS DUMP ====
+mmc2: sdhci-esdhc-imx: cmd debug status: 0x2120
+mmc2: sdhci-esdhc-imx: data debug status: 0x2200
+mmc2: sdhci-esdhc-imx: trans debug status: 0x2300
+mmc2: sdhci-esdhc-imx: dma debug status: 0x2400
+mmc2: sdhci-esdhc-imx: adma debug status: 0x2510
+mmc2: sdhci-esdhc-imx: fifo debug status: 0x2680
+mmc2: sdhci-esdhc-imx: async fifo debug status: 0x2750
+mmc2: sdhci: ============================================
+
+For now, disable CMDQ support on the imx8qm/imx8qxp/imx8mm until the
+issue is found and resolved.
+
+Fixes: bb6e358169bf6 ("mmc: sdhci-esdhc-imx: add CMDQ support")
+Fixes: cde5e8e9ff146 ("mmc: sdhci-esdhc-imx: Add an new esdhc_soc_data for i.MX8MM")
+Cc: stable@vger.kernel.org
+Signed-off-by: Tim Harvey <tharvey@gateworks.com>
+Reviewed-by: Haibo Chen <haibo.chen@nxp.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20211103165415.2016-1-tharvey@gateworks.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-esdhc-imx.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -300,7 +300,6 @@ static struct esdhc_soc_data usdhc_imx8q
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+- | ESDHC_FLAG_CQHCI
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE
+ | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
+ };
+@@ -309,7 +308,6 @@ static struct esdhc_soc_data usdhc_imx8m
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+- | ESDHC_FLAG_CQHCI
+ | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
+ };
+
--- /dev/null
+From 3d7c194b7c9ad414264935ad4f943a6ce285ebb1 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Mon, 15 Nov 2021 10:23:45 +0200
+Subject: mmc: sdhci: Fix ADMA for PAGE_SIZE >= 64KiB
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 3d7c194b7c9ad414264935ad4f943a6ce285ebb1 upstream.
+
+The block layer forces a minimum segment size of PAGE_SIZE, so a segment
+can be too big for the ADMA table, if PAGE_SIZE >= 64KiB. Fix by writing
+multiple descriptors, noting that the ADMA table is sized for 4KiB chunks
+anyway, so it will be big enough.
+
+Reported-and-tested-by: Bough Chen <haibo.chen@nxp.com>
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20211115082345.802238-1-adrian.hunter@intel.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci.c | 21 ++++++++++++++++++---
+ drivers/mmc/host/sdhci.h | 4 +++-
+ 2 files changed, 21 insertions(+), 4 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct
+ len -= offset;
+ }
+
+- BUG_ON(len > 65536);
++ /*
++ * The block layer forces a minimum segment size of PAGE_SIZE,
++ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
++ * multiple descriptors, noting that the ADMA table is sized
++ * for 4KiB chunks anyway, so it will be big enough.
++ */
++ while (len > host->max_adma) {
++ int n = 32 * 1024; /* 32KiB*/
++
++ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
++ addr += n;
++ len -= n;
++ }
+
+ /* tran, valid */
+ if (len)
+@@ -3952,6 +3964,7 @@ struct sdhci_host *sdhci_alloc_host(stru
+ * descriptor for each segment, plus 1 for a nop end descriptor.
+ */
+ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
++ host->max_adma = 65536;
+
+ host->max_timeout_count = 0xE;
+
+@@ -4617,10 +4630,12 @@ int sdhci_setup_host(struct sdhci_host *
+ * be larger than 64 KiB though.
+ */
+ if (host->flags & SDHCI_USE_ADMA) {
+- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
++ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
++ host->max_adma = 65532; /* 32-bit alignment */
+ mmc->max_seg_size = 65535;
+- else
++ } else {
+ mmc->max_seg_size = 65536;
++ }
+ } else {
+ mmc->max_seg_size = mmc->max_req_size;
+ }
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc {
+
+ /*
+ * Maximum segments assuming a 512KiB maximum requisition size and a minimum
+- * 4KiB page size.
++ * 4KiB page size. Note this also allows enough for multiple descriptors in
++ * case of PAGE_SIZE >= 64KiB.
+ */
+ #define SDHCI_MAX_SEGS 128
+
+@@ -543,6 +544,7 @@ struct sdhci_host {
+ unsigned int blocks; /* remaining PIO blocks */
+
+ int sg_count; /* Mapped sg entries */
++ int max_adma; /* Max. length in ADMA descriptor */
+
+ void *adma_table; /* ADMA descriptor table */
+ void *align_buffer; /* Bounce buffer */
--- /dev/null
+From 3f015d89a47cd8855cd92f71fff770095bd885a1 Mon Sep 17 00:00:00 2001
+From: Benjamin Coddington <bcodding@redhat.com>
+Date: Tue, 16 Nov 2021 10:48:13 -0500
+Subject: NFSv42: Fix pagecache invalidation after COPY/CLONE
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+commit 3f015d89a47cd8855cd92f71fff770095bd885a1 upstream.
+
+The mechanism in use to allow the client to see the results of COPY/CLONE
+is to drop those pages from the pagecache. This forces the client to read
+those pages once more from the server. However, truncate_pagecache_range()
+zeros out partial pages instead of dropping them. Let us instead use
+invalidate_inode_pages2_range() with full-page offsets to ensure the client
+properly sees the results of COPY/CLONE operations.
+
+Cc: <stable@vger.kernel.org> # v4.7+
+Fixes: 2e72448b07dc ("NFS: Add COPY nfs operation")
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/nfs42proc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -285,7 +285,9 @@ static void nfs42_copy_dest_done(struct
+ loff_t newsize = pos + len;
+ loff_t end = newsize - 1;
+
+- truncate_pagecache_range(inode, pos, end);
++ WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
++ pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
++
+ spin_lock(&inode->i_lock);
+ if (newsize > i_size_read(inode))
+ i_size_write(inode, newsize);
--- /dev/null
+From 5bb60ea611db1e04814426ed4bd1c95d1487678e Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Thu, 18 Nov 2021 10:39:53 +0100
+Subject: powerpc/32: Fix hardlockup on vmap stack overflow
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 5bb60ea611db1e04814426ed4bd1c95d1487678e upstream.
+
+Since the commit c118c7303ad5 ("powerpc/32: Fix vmap stack - Do not
+activate MMU before reading task struct") a vmap stack overflow
+results in a hard lockup. This is because emergency_ctx is still
+addressed with its virtual address allthough data MMU is not active
+anymore at that time.
+
+Fix it by using a physical address instead.
+
+Fixes: c118c7303ad5 ("powerpc/32: Fix vmap stack - Do not activate MMU before reading task struct")
+Cc: stable@vger.kernel.org # v5.10+
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/ce30364fb7ccda489272af4a1612b6aa147e1d23.1637227521.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/head_32.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -202,11 +202,11 @@ vmap_stack_overflow:
+ mfspr r1, SPRN_SPRG_THREAD
+ lwz r1, TASK_CPU - THREAD(r1)
+ slwi r1, r1, 3
+- addis r1, r1, emergency_ctx@ha
++ addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
+ #else
+- lis r1, emergency_ctx@ha
++ lis r1, emergency_ctx-PAGE_OFFSET@ha
+ #endif
+- lwz r1, emergency_ctx@l(r1)
++ lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ EXCEPTION_PROLOG_2 0 vmap_stack_overflow
+ prepare_transfer_to_handler
staging-r8188eu-use-gfp_atomic-under-spinlock.patch
staging-r8188eu-fix-a-memory-leak-in-rtw_wx_read32.patch
fuse-release-pipe-buf-after-last-use.patch
+xen-don-t-continue-xenstore-initialization-in-case-of-errors.patch
+xen-detect-uninitialized-xenbus-in-xenbus_init.patch
+io_uring-correct-link-list-traversal-locking.patch
+io_uring-fail-cancellation-for-exiting-tasks.patch
+io_uring-fix-link-traversal-locking.patch
+drm-amdgpu-ih-process-reset-count-when-restart.patch
+drm-amdgpu-pm-fix-powerplay-od-interface.patch
+drm-nouveau-recognise-ga106.patch
+ksmbd-downgrade-addition-info-error-msg-to-debug-in-smb2_get_info_sec.patch
+ksmbd-contain-default-data-stream-even-if-xattr-is-empty.patch
+ksmbd-fix-memleak-in-get_file_stream_info.patch
+kvm-ppc-book3s-hv-prevent-power7-8-tlb-flush-flushing-slb.patch
+tracing-uprobe-fix-uprobe_perf_open-probes-iteration.patch
+tracing-fix-pid-filtering-when-triggers-are-attached.patch
+mmc-sdhci-esdhc-imx-disable-cmdq-support.patch
+mmc-sdhci-fix-adma-for-page_size-64kib.patch
+mdio-aspeed-fix-link-is-down-issue.patch
+arm64-mm-fix-vm_bug_on-mm-init_mm-for-trans_pgd.patch
+cpufreq-intel_pstate-fix-active-mode-offline-online-epp-handling.patch
+powerpc-32-fix-hardlockup-on-vmap-stack-overflow.patch
+iomap-fix-inline-extent-handling-in-iomap_readpage.patch
+nfsv42-fix-pagecache-invalidation-after-copy-clone.patch
--- /dev/null
+From a55f224ff5f238013de8762c4287117e47b86e22 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 26 Nov 2021 17:34:42 -0500
+Subject: tracing: Fix pid filtering when triggers are attached
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit a55f224ff5f238013de8762c4287117e47b86e22 upstream.
+
+If a event is filtered by pid and a trigger that requires processing of
+the event to happen is a attached to the event, the discard portion does
+not take the pid filtering into account, and the event will then be
+recorded when it should not have been.
+
+Cc: stable@vger.kernel.org
+Fixes: 3fdaf80f4a836 ("tracing: Implement event pid filtering")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.h | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1360,14 +1360,26 @@ __event_trigger_test_discard(struct trac
+ if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ *tt = event_triggers_call(file, buffer, entry, event);
+
+- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+- !filter_match_preds(file->filter, entry))) {
+- __trace_event_discard_commit(buffer, event);
+- return true;
+- }
++ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
++ EVENT_FILE_FL_FILTERED |
++ EVENT_FILE_FL_PID_FILTER))))
++ return false;
++
++ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
++ goto discard;
++
++ if (file->flags & EVENT_FILE_FL_FILTERED &&
++ !filter_match_preds(file->filter, entry))
++ goto discard;
++
++ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
++ trace_event_ignore_this_pid(file))
++ goto discard;
+
+ return false;
++ discard:
++ __trace_event_discard_commit(buffer, event);
++ return true;
+ }
+
+ /**
--- /dev/null
+From 1880ed71ce863318c1ce93bf324876fb5f92854f Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@redhat.com>
+Date: Tue, 23 Nov 2021 15:28:01 +0100
+Subject: tracing/uprobe: Fix uprobe_perf_open probes iteration
+
+From: Jiri Olsa <jolsa@redhat.com>
+
+commit 1880ed71ce863318c1ce93bf324876fb5f92854f upstream.
+
+Add missing 'tu' variable initialization in the probes loop,
+otherwise the head 'tu' is used instead of added probes.
+
+Link: https://lkml.kernel.org/r/20211123142801.182530-1-jolsa@kernel.org
+
+Cc: stable@vger.kernel.org
+Fixes: 99c9a923e97a ("tracing/uprobe: Fix double perf_event linking on multiprobe uprobe")
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_uprobe.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace
+ return 0;
+
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
++ tu = container_of(pos, struct trace_uprobe, tp);
+ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+ if (err) {
+ uprobe_perf_close(call, event);
--- /dev/null
+From 36e8f60f0867d3b70d398d653c17108459a04efe Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Date: Tue, 23 Nov 2021 13:07:48 -0800
+Subject: xen: detect uninitialized xenbus in xenbus_init
+
+From: Stefano Stabellini <stefano.stabellini@xilinx.com>
+
+commit 36e8f60f0867d3b70d398d653c17108459a04efe upstream.
+
+If the xenstore page hasn't been allocated properly, reading the value
+of the related hvm_param (HVM_PARAM_STORE_PFN) won't actually return
+error. Instead, it will succeed and return zero. Instead of attempting
+to xen_remap a bad guest physical address, detect this condition and
+return early.
+
+Note that although a guest physical address of zero for
+HVM_PARAM_STORE_PFN is theoretically possible, it is not a good choice
+and zero has never been validly used in that capacity.
+
+Also recognize all bits set as an invalid value.
+
+For 32-bit Linux, any pfn above ULONG_MAX would get truncated. Pfns
+above ULONG_MAX should never be passed by the Xen tools to HVM guests
+anyway, so check for this condition and return early.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Link: https://lore.kernel.org/r/20211123210748.1910236-1-sstabellini@kernel.org
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/xenbus/xenbus_probe.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -949,6 +949,29 @@ static int __init xenbus_init(void)
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err)
+ goto out_error;
++ /*
++ * Uninitialized hvm_params are zero and return no error.
++ * Although it is theoretically possible to have
++ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
++ * not zero when valid. If zero, it means that Xenstore hasn't
++ * been properly initialized. Instead of attempting to map a
++ * wrong guest physical address return error.
++ *
++ * Also recognize all bits set as an invalid value.
++ */
++ if (!v || !~v) {
++ err = -ENOENT;
++ goto out_error;
++ }
++ /* Avoid truncation on 32-bit. */
++#if BITS_PER_LONG == 32
++ if (v > ULONG_MAX) {
++ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
++ __func__, v);
++ err = -EINVAL;
++ goto out_error;
++ }
++#endif
+ xen_store_gfn = (unsigned long)v;
+ xen_store_interface =
+ xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
--- /dev/null
+From 08f6c2b09ebd4b326dbe96d13f94fee8f9814c78 Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Date: Mon, 15 Nov 2021 14:27:19 -0800
+Subject: xen: don't continue xenstore initialization in case of errors
+
+From: Stefano Stabellini <stefano.stabellini@xilinx.com>
+
+commit 08f6c2b09ebd4b326dbe96d13f94fee8f9814c78 upstream.
+
+In case of errors in xenbus_init (e.g. missing xen_store_gfn parameter),
+we goto out_error but we forget to reset xen_store_domain_type to
+XS_UNKNOWN. As a consequence xenbus_probe_initcall and other initcalls
+will still try to initialize xenstore resulting into a crash at boot.
+
+[ 2.479830] Call trace:
+[ 2.482314] xb_init_comms+0x18/0x150
+[ 2.486354] xs_init+0x34/0x138
+[ 2.489786] xenbus_probe+0x4c/0x70
+[ 2.498432] xenbus_probe_initcall+0x2c/0x7c
+[ 2.503944] do_one_initcall+0x54/0x1b8
+[ 2.507358] kernel_init_freeable+0x1ac/0x210
+[ 2.511617] kernel_init+0x28/0x130
+[ 2.516112] ret_from_fork+0x10/0x20
+
+Cc: <Stable@vger.kernel.org>
+Cc: jbeulich@suse.com
+Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Link: https://lore.kernel.org/r/20211115222719.2558207-1-sstabellini@kernel.org
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/xenbus/xenbus_probe.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -909,7 +909,7 @@ static struct notifier_block xenbus_resu
+
+ static int __init xenbus_init(void)
+ {
+- int err = 0;
++ int err;
+ uint64_t v = 0;
+ xen_store_domain_type = XS_UNKNOWN;
+
+@@ -983,8 +983,10 @@ static int __init xenbus_init(void)
+ */
+ proc_create_mount_point("xen");
+ #endif
++ return 0;
+
+ out_error:
++ xen_store_domain_type = XS_UNKNOWN;
+ return err;
+ }
+