]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 Dec 2020 14:44:47 +0000 (15:44 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 Dec 2020 14:44:47 +0000 (15:44 +0100)
added patches:
drm-amdgpu-disply-set-num_crtc-earlier.patch
drm-amdgpu-fix-sdma-instance-fw-version-and-feature-version-init.patch
drm-i915-display-dp-compute-the-correct-slice-count-for-vdsc-on-dp.patch
drm-i915-gem-propagate-error-from-cancelled-submit-due-to-context-closure.patch
drm-i915-gt-cancel-the-preemption-timeout-on-responding-to-it.patch
drm-i915-gt-declare-gen9-has-64-mocs-entries.patch
drm-i915-gt-ignore-repeated-attempts-to-suspend-request-flow-across-reset.patch
input-cm109-do-not-stomp-on-control-urb.patch
input-i8042-add-acer-laptops-to-the-i8042-reset-list.patch
kbuild-avoid-static_assert-for-genksyms.patch
ktest.pl-fix-incorrect-reboot-for-grub2bls.patch
media-pulse8-cec-add-support-for-fw-v10-and-up.patch
media-pulse8-cec-fix-duplicate-free-at-disconnect-or-probe-error.patch
mm-hugetlb-clear-compound_nr-before-freeing-gigantic-pages.patch
mmc-block-fixup-condition-for-cmd13-polling-for-rpmb-requests.patch
mmc-mediatek-extend-recheck_sdio_irq-fix-to-more-variants.patch
mmc-mediatek-fix-system-suspend-resume-support-for-cqhci.patch
mmc-sdhci-of-arasan-fix-clock-registration-error-for-keem-bay-soc.patch
pinctrl-amd-remove-debounce-filter-setting-in-irq-type-setting.patch
pinctrl-jasperlake-fix-hostsw_own-offset.patch
proc-use-untagged_addr-for-pagemap_read-addresses.patch
scsi-be2iscsi-revert-fix-a-theoretical-leak-in-beiscsi_create_eqs.patch
x86-apic-vector-fix-ordering-in-vector-assignment.patch
x86-kprobes-fix-optprobe-to-detect-int3-padding-correctly.patch
x86-membarrier-get-rid-of-a-dubious-optimization.patch
x86-mm-mem_encrypt-fix-definition-of-pmd_flags_dec_wp.patch
xen-add-helpers-for-caching-grant-mapping-pages.patch
xen-don-t-use-page-lru-for-zone_device-memory.patch
zonefs-fix-page-reference-and-bio-leak.patch

30 files changed:
queue-5.9/drm-amdgpu-disply-set-num_crtc-earlier.patch [new file with mode: 0644]
queue-5.9/drm-amdgpu-fix-sdma-instance-fw-version-and-feature-version-init.patch [new file with mode: 0644]
queue-5.9/drm-i915-display-dp-compute-the-correct-slice-count-for-vdsc-on-dp.patch [new file with mode: 0644]
queue-5.9/drm-i915-gem-propagate-error-from-cancelled-submit-due-to-context-closure.patch [new file with mode: 0644]
queue-5.9/drm-i915-gt-cancel-the-preemption-timeout-on-responding-to-it.patch [new file with mode: 0644]
queue-5.9/drm-i915-gt-declare-gen9-has-64-mocs-entries.patch [new file with mode: 0644]
queue-5.9/drm-i915-gt-ignore-repeated-attempts-to-suspend-request-flow-across-reset.patch [new file with mode: 0644]
queue-5.9/input-cm109-do-not-stomp-on-control-urb.patch [new file with mode: 0644]
queue-5.9/input-i8042-add-acer-laptops-to-the-i8042-reset-list.patch [new file with mode: 0644]
queue-5.9/kbuild-avoid-static_assert-for-genksyms.patch [new file with mode: 0644]
queue-5.9/ktest.pl-fix-incorrect-reboot-for-grub2bls.patch [new file with mode: 0644]
queue-5.9/media-pulse8-cec-add-support-for-fw-v10-and-up.patch [new file with mode: 0644]
queue-5.9/media-pulse8-cec-fix-duplicate-free-at-disconnect-or-probe-error.patch [new file with mode: 0644]
queue-5.9/mm-hugetlb-clear-compound_nr-before-freeing-gigantic-pages.patch [new file with mode: 0644]
queue-5.9/mmc-block-fixup-condition-for-cmd13-polling-for-rpmb-requests.patch [new file with mode: 0644]
queue-5.9/mmc-mediatek-extend-recheck_sdio_irq-fix-to-more-variants.patch [new file with mode: 0644]
queue-5.9/mmc-mediatek-fix-system-suspend-resume-support-for-cqhci.patch [new file with mode: 0644]
queue-5.9/mmc-sdhci-of-arasan-fix-clock-registration-error-for-keem-bay-soc.patch [new file with mode: 0644]
queue-5.9/pinctrl-amd-remove-debounce-filter-setting-in-irq-type-setting.patch [new file with mode: 0644]
queue-5.9/pinctrl-jasperlake-fix-hostsw_own-offset.patch [new file with mode: 0644]
queue-5.9/proc-use-untagged_addr-for-pagemap_read-addresses.patch [new file with mode: 0644]
queue-5.9/scsi-be2iscsi-revert-fix-a-theoretical-leak-in-beiscsi_create_eqs.patch [new file with mode: 0644]
queue-5.9/series
queue-5.9/x86-apic-vector-fix-ordering-in-vector-assignment.patch [new file with mode: 0644]
queue-5.9/x86-kprobes-fix-optprobe-to-detect-int3-padding-correctly.patch [new file with mode: 0644]
queue-5.9/x86-membarrier-get-rid-of-a-dubious-optimization.patch [new file with mode: 0644]
queue-5.9/x86-mm-mem_encrypt-fix-definition-of-pmd_flags_dec_wp.patch [new file with mode: 0644]
queue-5.9/xen-add-helpers-for-caching-grant-mapping-pages.patch [new file with mode: 0644]
queue-5.9/xen-don-t-use-page-lru-for-zone_device-memory.patch [new file with mode: 0644]
queue-5.9/zonefs-fix-page-reference-and-bio-leak.patch [new file with mode: 0644]

diff --git a/queue-5.9/drm-amdgpu-disply-set-num_crtc-earlier.patch b/queue-5.9/drm-amdgpu-disply-set-num_crtc-earlier.patch
new file mode 100644 (file)
index 0000000..b327df1
--- /dev/null
@@ -0,0 +1,58 @@
+From 578b6c487899179fed730e710ffec0b069917971 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 3 Dec 2020 16:06:26 -0500
+Subject: drm/amdgpu/disply: set num_crtc earlier
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 578b6c487899179fed730e710ffec0b069917971 upstream.
+
+To avoid a recently added warning:
+ Bogus possible_crtcs: [ENCODER:65:TMDS-65] possible_crtcs=0xf (full crtc mask=0x7)
+ WARNING: CPU: 3 PID: 439 at drivers/gpu/drm/drm_mode_config.c:617 drm_mode_config_validate+0x178/0x200 [drm]
+In this case the warning is harmless, but confusing to users.
+
+Fixes: 0df108237433 ("drm: Validate encoder->possible_crtcs")
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=209123
+Reviewed-by: Daniel Vetter <daniel@ffwll.ch>
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -977,9 +977,6 @@ static int amdgpu_dm_init(struct amdgpu_
+               goto error;
+       }
+-      /* Update the actual used number of crtc */
+-      adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+-
+       /* create fake encoders for MST */
+       dm_dp_create_fake_mst_encoders(adev);
+@@ -3099,6 +3096,10 @@ static int amdgpu_dm_initialize_drm_devi
+       enum dc_connection_type new_connection_type = dc_connection_none;
+       const struct dc_plane_cap *plane;
++      dm->display_indexes_num = dm->dc->caps.max_streams;
++      /* Update the actual used number of crtc */
++      adev->mode_info.num_crtc = adev->dm.display_indexes_num;
++
+       link_cnt = dm->dc->caps.max_links;
+       if (amdgpu_dm_mode_config_init(dm->adev)) {
+               DRM_ERROR("DM: Failed to initialize mode config\n");
+@@ -3160,8 +3161,6 @@ static int amdgpu_dm_initialize_drm_devi
+                       goto fail;
+               }
+-      dm->display_indexes_num = dm->dc->caps.max_streams;
+-
+       /* loops over all connectors on the board */
+       for (i = 0; i < link_cnt; i++) {
+               struct dc_link *link = NULL;
diff --git a/queue-5.9/drm-amdgpu-fix-sdma-instance-fw-version-and-feature-version-init.patch b/queue-5.9/drm-amdgpu-fix-sdma-instance-fw-version-and-feature-version-init.patch
new file mode 100644 (file)
index 0000000..1a16871
--- /dev/null
@@ -0,0 +1,34 @@
+From 6896887b8676d8fb445c85ea56333b9661a6a8aa Mon Sep 17 00:00:00 2001
+From: "Stanley.Yang" <Stanley.Yang@amd.com>
+Date: Mon, 7 Dec 2020 14:38:33 +0800
+Subject: drm/amdgpu: fix sdma instance fw version and feature version init
+
+From: Stanley.Yang <Stanley.Yang@amd.com>
+
+commit 6896887b8676d8fb445c85ea56333b9661a6a8aa upstream.
+
+each sdma instance fw_version and feature_version
+should be set right value when asic type isn't
+between SIENNA_CICHILD and CHIP_DIMGREY_CAVEFISH
+
+Signed-off-by: Stanley.Yang <Stanley.Yang@amd.com>
+Reviewed-by: Tao Zhou <tao.zhou1@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -183,7 +183,7 @@ static int sdma_v5_2_init_microcode(stru
+                       if (err)
+                               goto out;
+-                      err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
++                      err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
+                       if (err)
+                               goto out;
+               }
diff --git a/queue-5.9/drm-i915-display-dp-compute-the-correct-slice-count-for-vdsc-on-dp.patch b/queue-5.9/drm-i915-display-dp-compute-the-correct-slice-count-for-vdsc-on-dp.patch
new file mode 100644 (file)
index 0000000..b972363
--- /dev/null
@@ -0,0 +1,46 @@
+From f6cbe49be65ed800863ac5ba695555057363f9c2 Mon Sep 17 00:00:00 2001
+From: Manasi Navare <manasi.d.navare@intel.com>
+Date: Fri, 4 Dec 2020 12:58:04 -0800
+Subject: drm/i915/display/dp: Compute the correct slice count for VDSC on DP
+
+From: Manasi Navare <manasi.d.navare@intel.com>
+
+commit f6cbe49be65ed800863ac5ba695555057363f9c2 upstream.
+
+This patch fixes the slice count computation algorithm
+for calculating the slice count based on Peak pixel rate
+and the max slice width allowed on the DSC engines.
+We need to ensure slice count > min slice count req
+as per DP spec based on peak pixel rate and that it is
+greater than min slice count based on the max slice width
+advertised by DPCD. So use max of these two.
+In the prev patch we were using min of these 2 causing it
+to violate the max slice width limitation causing a blank
+screen on 8K@60.
+
+Fixes: d9218c8f6cf4 ("drm/i915/dp: Add helpers for Compressed BPP and Slice Count for DSC")
+Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201204205804.25225-1-manasi.d.navare@intel.com
+(cherry picked from commit d371d6ea92ad2a47f42bbcaa786ee5f6069c9c14)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/display/intel_dp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -597,7 +597,7 @@ static u8 intel_dp_dsc_get_slice_count(s
+               return 0;
+       }
+       /* Also take into account max slice width */
+-      min_slice_count = min_t(u8, min_slice_count,
++      min_slice_count = max_t(u8, min_slice_count,
+                               DIV_ROUND_UP(mode_hdisplay,
+                                            max_slice_width));
diff --git a/queue-5.9/drm-i915-gem-propagate-error-from-cancelled-submit-due-to-context-closure.patch b/queue-5.9/drm-i915-gem-propagate-error-from-cancelled-submit-due-to-context-closure.patch
new file mode 100644 (file)
index 0000000..939b926
--- /dev/null
@@ -0,0 +1,71 @@
+From 0e124e19ce52d20b28ee9f1d5cdb22e2106bfd29 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 3 Dec 2020 10:34:32 +0000
+Subject: drm/i915/gem: Propagate error from cancelled submit due to context closure
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 0e124e19ce52d20b28ee9f1d5cdb22e2106bfd29 upstream.
+
+In the course of discovering and closing many races with context closure
+and execbuf submission, since commit 61231f6bd056 ("drm/i915/gem: Check
+that the context wasn't closed during setup") we started checking that
+the context was not closed by another userspace thread during the execbuf
+ioctl. In doing so we cancelled the inflight request (by telling it to be
+skipped), but kept reporting success since we do submit a request, albeit
+one that doesn't execute. As the error is known before we return from the
+ioctl, we can report the error we detect immediately, rather than leave
+it on the fence status. With the immediate propagation of the error, it
+is easier for userspace to handle.
+
+Fixes: 61231f6bd056 ("drm/i915/gem: Check that the context wasn't closed during setup")
+Testcase: igt/gem_ctx_exec/basic-close-race
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v5.7+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201203103432.31526-1-chris@chris-wilson.co.uk
+(cherry picked from commit ba38b79eaeaeed29d2383f122d5c711ebf5ed3d1)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2613,7 +2613,7 @@ static void retire_requests(struct intel
+                       break;
+ }
+-static void eb_request_add(struct i915_execbuffer *eb)
++static int eb_request_add(struct i915_execbuffer *eb, int err)
+ {
+       struct i915_request *rq = eb->request;
+       struct intel_timeline * const tl = i915_request_timeline(rq);
+@@ -2634,6 +2634,7 @@ static void eb_request_add(struct i915_e
+               /* Serialise with context_close via the add_to_timeline */
+               i915_request_set_error_once(rq, -ENOENT);
+               __i915_request_skip(rq);
++              err = -ENOENT; /* override any transient errors */
+       }
+       __i915_request_queue(rq, &attr);
+@@ -2643,6 +2644,8 @@ static void eb_request_add(struct i915_e
+               retire_requests(tl, prev);
+       mutex_unlock(&tl->mutex);
++
++      return err;
+ }
+ static int
+@@ -2844,7 +2847,7 @@ i915_gem_do_execbuffer(struct drm_device
+ err_request:
+       add_to_client(eb.request, file);
+       i915_request_get(eb.request);
+-      eb_request_add(&eb);
++      err = eb_request_add(&eb, err);
+       if (fences)
+               signal_fence_array(&eb, fences);
diff --git a/queue-5.9/drm-i915-gt-cancel-the-preemption-timeout-on-responding-to-it.patch b/queue-5.9/drm-i915-gt-cancel-the-preemption-timeout-on-responding-to-it.patch
new file mode 100644 (file)
index 0000000..bb64cce
--- /dev/null
@@ -0,0 +1,44 @@
+From 0fe8bf4d3edce7aad6c14b9d5d92ff54dc19f0ba Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri, 4 Dec 2020 15:12:32 +0000
+Subject: drm/i915/gt: Cancel the preemption timeout on responding to it
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 0fe8bf4d3edce7aad6c14b9d5d92ff54dc19f0ba upstream.
+
+We currently presume that the engine reset is successful, cancelling the
+expired preemption timer in the process. However, engine resets can
+fail, leaving the timeout still pending and we will then respond to the
+timeout again next time the tasklet fires. What we want is for the
+failed engine reset to be promoted to a full device reset, which is
+kicked by the heartbeat once the engine stops processing events.
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1168
+Fixes: 3a7a92aba8fb ("drm/i915/execlists: Force preemption")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v5.5+
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201204151234.19729-2-chris@chris-wilson.co.uk
+(cherry picked from commit d997e240ceecb4f732611985d3a939ad1bfc1893)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_lrc.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -3172,8 +3172,10 @@ static void execlists_submission_tasklet
+               spin_unlock_irqrestore(&engine->active.lock, flags);
+               /* Recheck after serialising with direct-submission */
+-              if (unlikely(timeout && preempt_timeout(engine)))
++              if (unlikely(timeout && preempt_timeout(engine))) {
++                      cancel_timer(&engine->execlists.preempt);
+                       execlists_reset(engine, "preemption time out");
++              }
+       }
+ }
diff --git a/queue-5.9/drm-i915-gt-declare-gen9-has-64-mocs-entries.patch b/queue-5.9/drm-i915-gt-declare-gen9-has-64-mocs-entries.patch
new file mode 100644 (file)
index 0000000..8e03a58
--- /dev/null
@@ -0,0 +1,52 @@
+From 7c5c15dffe1e3c42f44735ce9552afb7207f1584 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri, 27 Nov 2020 10:25:40 +0000
+Subject: drm/i915/gt: Declare gen9 has 64 mocs entries!
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 7c5c15dffe1e3c42f44735ce9552afb7207f1584 upstream.
+
+We checked the table size against a hardcoded number of entries, and
+that number was excluding the special mocs registers at the end.
+
+Fixes: 777a7717d60c ("drm/i915/gt: Program mocs:63 for cache eviction on gen9")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v4.3+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201127102540.13117-1-chris@chris-wilson.co.uk
+(cherry picked from commit 444fbf5d7058099447c5366ba8bb60d610aeb44b)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+[backported and updated the Fixes sha]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_mocs.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -59,8 +59,7 @@ struct drm_i915_mocs_table {
+ #define _L3_CACHEABILITY(value)       ((value) << 4)
+ /* Helper defines */
+-#define GEN9_NUM_MOCS_ENTRIES 62  /* 62 out of 64 - 63 & 64 are reserved. */
+-#define GEN11_NUM_MOCS_ENTRIES        64  /* 63-64 are reserved, but configured. */
++#define GEN9_NUM_MOCS_ENTRIES 64  /* 63-64 are reserved, but configured. */
+ /* (e)LLC caching options */
+ /*
+@@ -328,11 +327,11 @@ static unsigned int get_mocs_settings(co
+       if (INTEL_GEN(i915) >= 12) {
+               table->size  = ARRAY_SIZE(tgl_mocs_table);
+               table->table = tgl_mocs_table;
+-              table->n_entries = GEN11_NUM_MOCS_ENTRIES;
++              table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+       } else if (IS_GEN(i915, 11)) {
+               table->size  = ARRAY_SIZE(icl_mocs_table);
+               table->table = icl_mocs_table;
+-              table->n_entries = GEN11_NUM_MOCS_ENTRIES;
++              table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+       } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
+               table->size  = ARRAY_SIZE(skl_mocs_table);
+               table->n_entries = GEN9_NUM_MOCS_ENTRIES;
diff --git a/queue-5.9/drm-i915-gt-ignore-repeated-attempts-to-suspend-request-flow-across-reset.patch b/queue-5.9/drm-i915-gt-ignore-repeated-attempts-to-suspend-request-flow-across-reset.patch
new file mode 100644 (file)
index 0000000..3db6e1e
--- /dev/null
@@ -0,0 +1,42 @@
+From 5419d93ffd774127b195b8543b063b2b4fa5aea9 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri, 4 Dec 2020 15:12:31 +0000
+Subject: drm/i915/gt: Ignore repeated attempts to suspend request flow across reset
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 5419d93ffd774127b195b8543b063b2b4fa5aea9 upstream.
+
+Before reseting the engine, we suspend the execution of the guilty
+request, so that we can continue execution with a new context while we
+slowly compress the captured error state for the guilty context. However,
+if the reset fails, we will promptly attempt to reset the same request
+again, and discover the ongoing capture. Ignore the second attempt to
+suspend and capture the same request.
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1168
+Fixes: 32ff621fd744 ("drm/i915/gt: Allow temporary suspension of inflight requests")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v5.7+
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201204151234.19729-1-chris@chris-wilson.co.uk
+(cherry picked from commit b969540500bce60cf1cdfff5464388af32b9a553)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_lrc.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -2788,6 +2788,9 @@ static void __execlists_hold(struct i915
+ static bool execlists_hold(struct intel_engine_cs *engine,
+                          struct i915_request *rq)
+ {
++      if (i915_request_on_hold(rq))
++              return false;
++
+       spin_lock_irq(&engine->active.lock);
+       if (i915_request_completed(rq)) { /* too late! */
diff --git a/queue-5.9/input-cm109-do-not-stomp-on-control-urb.patch b/queue-5.9/input-cm109-do-not-stomp-on-control-urb.patch
new file mode 100644 (file)
index 0000000..5ac5ef7
--- /dev/null
@@ -0,0 +1,42 @@
+From 82e06090473289ce63e23fdeb8737aad59b10645 Mon Sep 17 00:00:00 2001
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Wed, 9 Dec 2020 20:13:24 -0800
+Subject: Input: cm109 - do not stomp on control URB
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+commit 82e06090473289ce63e23fdeb8737aad59b10645 upstream.
+
+We need to make sure we are not stomping on the control URB that was
+issued when opening the device when attempting to toggle buzzer.
+To do that we need to mark it as pending in cm109_open().
+
+Reported-and-tested-by: syzbot+150f793ac5bc18eee150@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/misc/cm109.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/input/misc/cm109.c
++++ b/drivers/input/misc/cm109.c
+@@ -568,12 +568,15 @@ static int cm109_input_open(struct input
+       dev->ctl_data->byte[HID_OR2] = dev->keybit;
+       dev->ctl_data->byte[HID_OR3] = 0x00;
++      dev->ctl_urb_pending = 1;
+       error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
+-      if (error)
++      if (error) {
++              dev->ctl_urb_pending = 0;
+               dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
+                       __func__, error);
+-      else
++      } else {
+               dev->open = 1;
++      }
+       mutex_unlock(&dev->pm_mutex);
diff --git a/queue-5.9/input-i8042-add-acer-laptops-to-the-i8042-reset-list.patch b/queue-5.9/input-i8042-add-acer-laptops-to-the-i8042-reset-list.patch
new file mode 100644 (file)
index 0000000..c38d34f
--- /dev/null
@@ -0,0 +1,74 @@
+From ce6520b0eafad5962ffc21dc47cd7bd3250e9045 Mon Sep 17 00:00:00 2001
+From: Chris Chiu <chiu@endlessos.org>
+Date: Wed, 9 Dec 2020 20:24:47 -0800
+Subject: Input: i8042 - add Acer laptops to the i8042 reset list
+
+From: Chris Chiu <chiu@endlessos.org>
+
+commit ce6520b0eafad5962ffc21dc47cd7bd3250e9045 upstream.
+
+The touchpad operates in Basic Mode by default in the Acer BIOS
+setup, but some Aspire/TravelMate models require the i8042 to be
+reset in order to be correctly detected.
+
+Signed-off-by: Chris Chiu <chiu@endlessos.org>
+Link: https://lore.kernel.org/r/20201207071250.15021-1-chiu@endlessos.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/serio/i8042-x86ia64io.h |   42 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -612,6 +612,48 @@ static const struct dmi_system_id __init
+               },
+       },
+       {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
++              },
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
++              },
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
++              },
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
++              },
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
++              },
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
++              },
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
++              },
++      },
++      {
+               /* Advent 4211 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
diff --git a/queue-5.9/kbuild-avoid-static_assert-for-genksyms.patch b/queue-5.9/kbuild-avoid-static_assert-for-genksyms.patch
new file mode 100644 (file)
index 0000000..032aa9b
--- /dev/null
@@ -0,0 +1,48 @@
+From 14dc3983b5dff513a90bd5a8cc90acaf7867c3d0 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 11 Dec 2020 13:36:38 -0800
+Subject: kbuild: avoid static_assert for genksyms
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 14dc3983b5dff513a90bd5a8cc90acaf7867c3d0 upstream.
+
+genksyms does not know or care about the _Static_assert() built-in, and
+sometimes falls back to ignoring the later symbols, which causes
+undefined behavior such as
+
+  WARNING: modpost: EXPORT symbol "ethtool_set_ethtool_phy_ops" [vmlinux] version generation failed, symbol will not be versioned.
+  ld: net/ethtool/common.o: relocation R_AARCH64_ABS32 against `__crc_ethtool_set_ethtool_phy_ops' can not be used when making a shared object
+  net/ethtool/common.o:(_ftrace_annotated_branch+0x0): dangerous relocation: unsupported relocation
+
+Redefine static_assert for genksyms to avoid that.
+
+Link: https://lkml.kernel.org/r/20201203230955.1482058-1-arnd@kernel.org
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Suggested-by: Ard Biesheuvel <ardb@kernel.org>
+Cc: Masahiro Yamada <masahiroy@kernel.org>
+Cc: Michal Marek <michal.lkml@markovi.net>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Rikard Falkeborn <rikard.falkeborn@gmail.com>
+Cc: Marco Elver <elver@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/build_bug.h |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/build_bug.h
++++ b/include/linux/build_bug.h
+@@ -77,4 +77,9 @@
+ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+ #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
++#ifdef __GENKSYMS__
++/* genksyms gets confused by _Static_assert */
++#define _Static_assert(expr, ...)
++#endif
++
+ #endif        /* _LINUX_BUILD_BUG_H */
diff --git a/queue-5.9/ktest.pl-fix-incorrect-reboot-for-grub2bls.patch b/queue-5.9/ktest.pl-fix-incorrect-reboot-for-grub2bls.patch
new file mode 100644 (file)
index 0000000..2615912
--- /dev/null
@@ -0,0 +1,46 @@
+From 271e0c9dce1b02a825b3cc1a7aa1fab7c381d44b Mon Sep 17 00:00:00 2001
+From: Libo Chen <libo.chen@oracle.com>
+Date: Fri, 20 Nov 2020 18:12:43 -0800
+Subject: ktest.pl: Fix incorrect reboot for grub2bls
+
+From: Libo Chen <libo.chen@oracle.com>
+
+commit 271e0c9dce1b02a825b3cc1a7aa1fab7c381d44b upstream.
+
+This issue was first noticed when I was testing different kernels on
+Oracle Linux 8 which as Fedora 30+ adopts BLS as default. Even though a
+kernel entry was added successfully and the index of that kernel entry was
+retrieved correctly, ktest still wouldn't reboot the system into
+user-specified kernel.
+
+The bug was spotted in subroutine reboot_to where the if-statement never
+checks for REBOOT_TYPE "grub2bls", therefore the desired entry will not be
+set for the next boot.
+
+Add a check for "grub2bls" so that $grub_reboot $grub_number can
+be run before a reboot if REBOOT_TYPE is "grub2bls" then we can boot to
+the correct kernel.
+
+Link: https://lkml.kernel.org/r/20201121021243.1532477-1-libo.chen@oracle.com
+
+Cc: stable@vger.kernel.org
+Fixes: ac2466456eaa ("ktest: introduce grub2bls REBOOT_TYPE option")
+Signed-off-by: Libo Chen <libo.chen@oracle.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/ktest/ktest.pl |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2040,7 +2040,7 @@ sub reboot_to {
+     if ($reboot_type eq "grub") {
+       run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
+-    } elsif ($reboot_type eq "grub2") {
++    } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
+       run_ssh "$grub_reboot $grub_number";
+     } elsif ($reboot_type eq "syslinux") {
+       run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
diff --git a/queue-5.9/media-pulse8-cec-add-support-for-fw-v10-and-up.patch b/queue-5.9/media-pulse8-cec-add-support-for-fw-v10-and-up.patch
new file mode 100644 (file)
index 0000000..138849f
--- /dev/null
@@ -0,0 +1,114 @@
+From 45ba1c0ba3e589ad3ef0d0603c822eb27ea16563 Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Date: Fri, 27 Nov 2020 12:52:30 +0100
+Subject: media: pulse8-cec: add support for FW v10 and up
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+commit 45ba1c0ba3e589ad3ef0d0603c822eb27ea16563 upstream.
+
+Starting with firmware version 10 the GET/SET_HDMI_VERSION message
+was removed and GET/SET_AUTO_POWER_ON was added.
+
+The removal of GET/SET_HDMI_VERSION caused the probe of the
+Pulse-Eight to fail. Add a version check to handle this gracefully.
+
+Also show (but do not set) the Auto Power On value.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Reported-by: Maxime Ripard <mripard@kernel.org>
+Tested-by: Maxime Ripard <mripard@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/cec/usb/pulse8/pulse8-cec.c |   43 ++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 13 deletions(-)
+
+--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
++++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+@@ -88,13 +88,15 @@ enum pulse8_msgcodes {
+       MSGCODE_SET_PHYSICAL_ADDRESS,   /* 0x20 */
+       MSGCODE_GET_DEVICE_TYPE,
+       MSGCODE_SET_DEVICE_TYPE,
+-      MSGCODE_GET_HDMI_VERSION,
++      MSGCODE_GET_HDMI_VERSION,       /* Removed in FW >= 10 */
+       MSGCODE_SET_HDMI_VERSION,
+       MSGCODE_GET_OSD_NAME,
+       MSGCODE_SET_OSD_NAME,
+       MSGCODE_WRITE_EEPROM,
+       MSGCODE_GET_ADAPTER_TYPE,       /* 0x28 */
+       MSGCODE_SET_ACTIVE_SOURCE,
++      MSGCODE_GET_AUTO_POWER_ON,      /* New for FW >= 10 */
++      MSGCODE_SET_AUTO_POWER_ON,
+       MSGCODE_FRAME_EOM = 0x80,
+       MSGCODE_FRAME_ACK = 0x40,
+@@ -143,6 +145,8 @@ static const char * const pulse8_msgname
+       "WRITE_EEPROM",
+       "GET_ADAPTER_TYPE",
+       "SET_ACTIVE_SOURCE",
++      "GET_AUTO_POWER_ON",
++      "SET_AUTO_POWER_ON",
+ };
+ static const char *pulse8_msgname(u8 cmd)
+@@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(stru
+       if (err)
+               goto unlock;
+-      cmd[0] = MSGCODE_SET_HDMI_VERSION;
+-      cmd[1] = adap->log_addrs.cec_version;
+-      err = pulse8_send_and_wait(pulse8, cmd, 2,
+-                                 MSGCODE_COMMAND_ACCEPTED, 0);
+-      if (err)
+-              goto unlock;
++      if (pulse8->vers < 10) {
++              cmd[0] = MSGCODE_SET_HDMI_VERSION;
++              cmd[1] = adap->log_addrs.cec_version;
++              err = pulse8_send_and_wait(pulse8, cmd, 2,
++                                         MSGCODE_COMMAND_ACCEPTED, 0);
++              if (err)
++                      goto unlock;
++      }
+       if (adap->log_addrs.osd_name[0]) {
+               size_t osd_len = strlen(adap->log_addrs.osd_name);
+@@ -691,6 +697,14 @@ static int pulse8_setup(struct pulse8 *p
+       dev_dbg(pulse8->dev, "Autonomous mode: %s",
+               data[0] ? "on" : "off");
++      if (pulse8->vers >= 10) {
++              cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
++              err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
++              if (!err)
++                      dev_dbg(pulse8->dev, "Auto Power On: %s",
++                              data[0] ? "on" : "off");
++      }
++
+       cmd[0] = MSGCODE_GET_DEVICE_TYPE;
+       err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+       if (err)
+@@ -752,12 +766,15 @@ static int pulse8_setup(struct pulse8 *p
+       dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
+               cec_phys_addr_exp(*pa));
+-      cmd[0] = MSGCODE_GET_HDMI_VERSION;
+-      err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+-      if (err)
+-              return err;
+-      log_addrs->cec_version = data[0];
+-      dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
++      log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
++      if (pulse8->vers < 10) {
++              cmd[0] = MSGCODE_GET_HDMI_VERSION;
++              err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
++              if (err)
++                      return err;
++              log_addrs->cec_version = data[0];
++              dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
++      }
+       cmd[0] = MSGCODE_GET_OSD_NAME;
+       err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
diff --git a/queue-5.9/media-pulse8-cec-fix-duplicate-free-at-disconnect-or-probe-error.patch b/queue-5.9/media-pulse8-cec-fix-duplicate-free-at-disconnect-or-probe-error.patch
new file mode 100644 (file)
index 0000000..95defd6
--- /dev/null
@@ -0,0 +1,79 @@
+From 024e01dead12c2b9fbe31216f2099401ebb78a4a Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Date: Fri, 27 Nov 2020 10:36:32 +0100
+Subject: media: pulse8-cec: fix duplicate free at disconnect or probe error
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+commit 024e01dead12c2b9fbe31216f2099401ebb78a4a upstream.
+
+Commit 601282d65b96 ("media: pulse8-cec: use adap_free callback") used
+the adap_free callback to clean up on disconnect. What I forgot was that
+in the probe it will call cec_delete_adapter() followed by kfree(pulse8)
+if an error occurs. But by using the adap_free callback,
+cec_delete_adapter() is already freeing the pulse8 struct.
+
+This wasn't noticed since normally the probe works fine, but Pulse-Eight
+published a new firmware version that caused a probe error, so now it
+hits this bug. This affects firmware version 12, but probably any
+version >= 10.
+
+Commit aa9eda76129c ("media: pulse8-cec: close serio in disconnect, not
+adap_free") made this worse by adding the line 'pulse8->serio = NULL'
+right after the call to cec_unregister_adapter in the disconnect()
+function. Unfortunately, cec_unregister_adapter will typically call
+cec_delete_adapter (unless a filehandle to the cec device is still
+open), which frees the pulse8 struct. So now it will also crash on a
+simple unplug of the Pulse-Eight device.
+
+With this fix both the unplug issue and a probe() error situation are
+handled correctly again.
+
+It will still fail to probe() with a v12 firmware, that's something
+to look at separately.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Reported-by: Maxime Ripard <mripard@kernel.org>
+Tested-by: Maxime Ripard <mripard@kernel.org>
+Fixes: aa9eda76129c ("media: pulse8-cec: close serio in disconnect, not adap_free")
+Fixes: 601282d65b96 ("media: pulse8-cec: use adap_free callback")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/cec/usb/pulse8/pulse8-cec.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
++++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+@@ -650,7 +650,6 @@ static void pulse8_disconnect(struct ser
+       struct pulse8 *pulse8 = serio_get_drvdata(serio);
+       cec_unregister_adapter(pulse8->adap);
+-      pulse8->serio = NULL;
+       serio_set_drvdata(serio, NULL);
+       serio_close(serio);
+ }
+@@ -830,8 +829,10 @@ static int pulse8_connect(struct serio *
+       pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
+                                           dev_name(&serio->dev), caps, 1);
+       err = PTR_ERR_OR_ZERO(pulse8->adap);
+-      if (err < 0)
+-              goto free_device;
++      if (err < 0) {
++              kfree(pulse8);
++              return err;
++      }
+       pulse8->dev = &serio->dev;
+       serio_set_drvdata(serio, pulse8);
+@@ -874,8 +875,6 @@ close_serio:
+       serio_close(serio);
+ delete_adap:
+       cec_delete_adapter(pulse8->adap);
+-free_device:
+-      kfree(pulse8);
+       return err;
+ }
diff --git a/queue-5.9/mm-hugetlb-clear-compound_nr-before-freeing-gigantic-pages.patch b/queue-5.9/mm-hugetlb-clear-compound_nr-before-freeing-gigantic-pages.patch
new file mode 100644 (file)
index 0000000..cac055e
--- /dev/null
@@ -0,0 +1,75 @@
+From ba9c1201beaa86a773e83be5654602a0667e4a4d Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Date: Fri, 11 Dec 2020 13:36:53 -0800
+Subject: mm/hugetlb: clear compound_nr before freeing gigantic pages
+
+From: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+
+commit ba9c1201beaa86a773e83be5654602a0667e4a4d upstream.
+
+Commit 1378a5ee451a ("mm: store compound_nr as well as compound_order")
+added compound_nr counter to first tail struct page, overlaying with
+page->mapping.  The overlay itself is fine, but while freeing gigantic
+hugepages via free_contig_range(), a "bad page" check will trigger for
+non-NULL page->mapping on the first tail page:
+
+  BUG: Bad page state in process bash  pfn:380001
+  page:00000000c35f0856 refcount:0 mapcount:0 mapping:00000000126b68aa index:0x0 pfn:0x380001
+  aops:0x0
+  flags: 0x3ffff00000000000()
+  raw: 3ffff00000000000 0000000000000100 0000000000000122 0000000100000000
+  raw: 0000000000000000 0000000000000000 ffffffff00000000 0000000000000000
+  page dumped because: non-NULL mapping
+  Modules linked in:
+  CPU: 6 PID: 616 Comm: bash Not tainted 5.10.0-rc7-next-20201208 #1
+  Hardware name: IBM 3906 M03 703 (LPAR)
+  Call Trace:
+    show_stack+0x6e/0xe8
+    dump_stack+0x90/0xc8
+    bad_page+0xd6/0x130
+    free_pcppages_bulk+0x26a/0x800
+    free_unref_page+0x6e/0x90
+    free_contig_range+0x94/0xe8
+    update_and_free_page+0x1c4/0x2c8
+    free_pool_huge_page+0x11e/0x138
+    set_max_huge_pages+0x228/0x300
+    nr_hugepages_store_common+0xb8/0x130
+    kernfs_fop_write+0xd2/0x218
+    vfs_write+0xb0/0x2b8
+    ksys_write+0xac/0xe0
+    system_call+0xe6/0x288
+  Disabling lock debugging due to kernel taint
+
+This is because only the compound_order is cleared in
+destroy_compound_gigantic_page(), and compound_nr is set to
+1U << order == 1 for order 0 in set_compound_order(page, 0).
+
+Fix this by explicitly clearing compound_nr for first tail page after
+calling set_compound_order(page, 0).
+
+Link: https://lkml.kernel.org/r/20201208182813.66391-2-gerald.schaefer@linux.ibm.com
+Fixes: 1378a5ee451a ("mm: store compound_nr as well as compound_order")
+Signed-off-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: <stable@vger.kernel.org>   [5.9+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1227,6 +1227,7 @@ static void destroy_compound_gigantic_pa
+       }
+       set_compound_order(page, 0);
++      page[1].compound_nr = 0;
+       __ClearPageHead(page);
+ }
diff --git a/queue-5.9/mmc-block-fixup-condition-for-cmd13-polling-for-rpmb-requests.patch b/queue-5.9/mmc-block-fixup-condition-for-cmd13-polling-for-rpmb-requests.patch
new file mode 100644 (file)
index 0000000..65d0c9f
--- /dev/null
@@ -0,0 +1,40 @@
+From 6246d7c9d15aaff0bc3863f67900c6a6e6be921b Mon Sep 17 00:00:00 2001
+From: Bean Huo <beanhuo@micron.com>
+Date: Wed, 2 Dec 2020 21:23:20 +0100
+Subject: mmc: block: Fixup condition for CMD13 polling for RPMB requests
+
+From: Bean Huo <beanhuo@micron.com>
+
+commit 6246d7c9d15aaff0bc3863f67900c6a6e6be921b upstream.
+
+The CMD13 polling is needed for commands with R1B responses. In commit
+a0d4c7eb71dd ("mmc: block: Add CMD13 polling for MMC IOCTLS with R1B
+response"), the intent was to introduce this for requests targeted to the
+RPMB partition. However, the condition to trigger the polling loop became
+wrong, leading to unnecessary polling. Let's fix the condition to avoid
+this.
+
+Fixes: a0d4c7eb71dd ("mmc: block: Add CMD13 polling for MMC IOCTLS with R1B response")
+Cc: stable@vger.kernel.org
+Reported-by: Zhan Liu <zliua@micron.com>
+Signed-off-by: Zhan Liu <zliua@micron.com>
+Signed-off-by: Bean Huo <beanhuo@micron.com>
+Link: https://lore.kernel.org/r/20201202202320.22165-1-huobean@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/block.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -580,7 +580,7 @@ static int __mmc_blk_ioctl_cmd(struct mm
+       memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
+-      if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
++      if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+               /*
+                * Ensure RPMB/R1B command has completed by polling CMD13
+                * "Send Status".
diff --git a/queue-5.9/mmc-mediatek-extend-recheck_sdio_irq-fix-to-more-variants.patch b/queue-5.9/mmc-mediatek-extend-recheck_sdio_irq-fix-to-more-variants.patch
new file mode 100644 (file)
index 0000000..5c94038
--- /dev/null
@@ -0,0 +1,81 @@
+From 903a72eca4abf241293dcc1385896fd428e15fe9 Mon Sep 17 00:00:00 2001
+From: yong mao <yong.mao@mediatek.com>
+Date: Thu, 19 Nov 2020 11:02:37 +0800
+Subject: mmc: mediatek: Extend recheck_sdio_irq fix to more variants
+
+From: yong mao <yong.mao@mediatek.com>
+
+commit 903a72eca4abf241293dcc1385896fd428e15fe9 upstream.
+
+The SDIO recheck fix is required for more of the supported variants. Let's
+add it to those that needs it.
+
+Reported-by: Fabien Parent <fparent@baylibre.com>
+Reported-by: Mattijs Korpershoek <mkorpershoek@baylibre.com>
+Signed-off-by: Yong Mao <yong.mao@mediatek.com>
+Link: https://lore.kernel.org/r/20201119030237.9414-1-yong.mao@mediatek.com
+Fixes: 9e2582e57407 ("mmc: mediatek: fix SDIO irq issue")
+Cc: stable@vger.kernel.org
+[Ulf: Clarified commitmsg ]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/mtk-sd.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -447,7 +447,7 @@ struct msdc_host {
+ static const struct mtk_mmc_compatible mt8135_compat = {
+       .clk_div_bits = 8,
+-      .recheck_sdio_irq = false,
++      .recheck_sdio_irq = true,
+       .hs400_tune = false,
+       .pad_tune_reg = MSDC_PAD_TUNE,
+       .async_fifo = false,
+@@ -486,7 +486,7 @@ static const struct mtk_mmc_compatible m
+ static const struct mtk_mmc_compatible mt2701_compat = {
+       .clk_div_bits = 12,
+-      .recheck_sdio_irq = false,
++      .recheck_sdio_irq = true,
+       .hs400_tune = false,
+       .pad_tune_reg = MSDC_PAD_TUNE0,
+       .async_fifo = true,
+@@ -512,7 +512,7 @@ static const struct mtk_mmc_compatible m
+ static const struct mtk_mmc_compatible mt7622_compat = {
+       .clk_div_bits = 12,
+-      .recheck_sdio_irq = false,
++      .recheck_sdio_irq = true,
+       .hs400_tune = false,
+       .pad_tune_reg = MSDC_PAD_TUNE0,
+       .async_fifo = true,
+@@ -525,7 +525,7 @@ static const struct mtk_mmc_compatible m
+ static const struct mtk_mmc_compatible mt8516_compat = {
+       .clk_div_bits = 12,
+-      .recheck_sdio_irq = false,
++      .recheck_sdio_irq = true,
+       .hs400_tune = false,
+       .pad_tune_reg = MSDC_PAD_TUNE0,
+       .async_fifo = true,
+@@ -536,7 +536,7 @@ static const struct mtk_mmc_compatible m
+ static const struct mtk_mmc_compatible mt7620_compat = {
+       .clk_div_bits = 8,
+-      .recheck_sdio_irq = false,
++      .recheck_sdio_irq = true,
+       .hs400_tune = false,
+       .pad_tune_reg = MSDC_PAD_TUNE,
+       .async_fifo = false,
+@@ -549,6 +549,7 @@ static const struct mtk_mmc_compatible m
+ static const struct mtk_mmc_compatible mt6779_compat = {
+       .clk_div_bits = 12,
++      .recheck_sdio_irq = false,
+       .hs400_tune = false,
+       .pad_tune_reg = MSDC_PAD_TUNE0,
+       .async_fifo = true,
diff --git a/queue-5.9/mmc-mediatek-fix-system-suspend-resume-support-for-cqhci.patch b/queue-5.9/mmc-mediatek-fix-system-suspend-resume-support-for-cqhci.patch
new file mode 100644 (file)
index 0000000..039472a
--- /dev/null
@@ -0,0 +1,83 @@
+From c0a2074ac575fff2848c8ef804bdc8590466c36c Mon Sep 17 00:00:00 2001
+From: Wenbin Mei <wenbin.mei@mediatek.com>
+Date: Wed, 18 Nov 2020 14:34:05 +0800
+Subject: mmc: mediatek: Fix system suspend/resume support for CQHCI
+
+From: Wenbin Mei <wenbin.mei@mediatek.com>
+
+commit c0a2074ac575fff2848c8ef804bdc8590466c36c upstream.
+
+Before we got these errors on MT8192 platform:
+[   59.153891] Restarting tasks ...
+[   59.154540] done.
+[   59.159175] PM: suspend exit
+[   59.218724] mtk-msdc 11f60000.mmc: phase: [map:fffffffe] [maxlen:31]
+[final:16]
+[  119.776083] mmc0: cqhci: timeout for tag 9
+[  119.780196] mmc0: cqhci: ============ CQHCI REGISTER DUMP ===========
+[  119.786709] mmc0: cqhci: Caps:      0x100020b6 | Version:  0x00000510
+[  119.793225] mmc0: cqhci: Config:    0x00000101 | Control:  0x00000000
+[  119.799706] mmc0: cqhci: Int stat:  0x00000000 | Int enab: 0x00000000
+[  119.806177] mmc0: cqhci: Int sig:   0x00000000 | Int Coal: 0x00000000
+[  119.812670] mmc0: cqhci: TDL base:  0x00000000 | TDL up32: 0x00000000
+[  119.819149] mmc0: cqhci: Doorbell:  0x003ffc00 | TCN:      0x00000200
+[  119.825656] mmc0: cqhci: Dev queue: 0x00000000 | Dev Pend: 0x00000000
+[  119.832155] mmc0: cqhci: Task clr:  0x00000000 | SSC1:     0x00001000
+[  119.838627] mmc0: cqhci: SSC2:      0x00000000 | DCMD rsp: 0x00000000
+[  119.845174] mmc0: cqhci: RED mask:  0xfdf9a080 | TERRI:    0x0000891c
+[  119.851654] mmc0: cqhci: Resp idx:  0x00000000 | Resp arg: 0x00000000
+[  119.865773] mmc0: cqhci: : ===========================================
+[  119.872358] mmc0: running CQE recovery
+From these logs, we found TDL base was back to the default value.
+
+After suspend, the mmc host is powered off by HW, and bring CQE register
+to the default value, so we add system suspend/resume interface, then bring
+CQE to deactivated state before suspend, it will be enabled by CQE first
+request after resume.
+
+Signed-off-by: Wenbin Mei <wenbin.mei@mediatek.com>
+Link: https://lore.kernel.org/r/20201118063405.24906-1-wenbin.mei@mediatek.com
+Fixes: 88bd652b3c74 ("mmc: mediatek: command queue support")
+Cc: stable@vger.kernel.org
+[Ulf: Renamed functions]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/mtk-sd.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2654,11 +2654,29 @@ static int msdc_runtime_resume(struct de
+       msdc_restore_reg(host);
+       return 0;
+ }
++
++static int msdc_suspend(struct device *dev)
++{
++      struct mmc_host *mmc = dev_get_drvdata(dev);
++      int ret;
++
++      if (mmc->caps2 & MMC_CAP2_CQE) {
++              ret = cqhci_suspend(mmc);
++              if (ret)
++                      return ret;
++      }
++
++      return pm_runtime_force_suspend(dev);
++}
++
++static int msdc_resume(struct device *dev)
++{
++      return pm_runtime_force_resume(dev);
++}
+ #endif
+ static const struct dev_pm_ops msdc_dev_pm_ops = {
+-      SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+-                              pm_runtime_force_resume)
++      SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
+       SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
+ };
diff --git a/queue-5.9/mmc-sdhci-of-arasan-fix-clock-registration-error-for-keem-bay-soc.patch b/queue-5.9/mmc-sdhci-of-arasan-fix-clock-registration-error-for-keem-bay-soc.patch
new file mode 100644 (file)
index 0000000..12b3692
--- /dev/null
@@ -0,0 +1,53 @@
+From a42a7ec9bb99a17869c3b9f3d365aaf2bdb1a554 Mon Sep 17 00:00:00 2001
+From: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Date: Wed, 18 Nov 2020 20:01:20 +0800
+Subject: mmc: sdhci-of-arasan: Fix clock registration error for Keem Bay SOC
+
+From: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+
+commit a42a7ec9bb99a17869c3b9f3d365aaf2bdb1a554 upstream.
+
+The commit 16ada730a759 ("mmc: sdhci-of-arasan: Modify clock operations
+handling") introduced support for platform specific clock operations.
+Around the same point in time the commit 36c6aadaae86 ("mmc:
+sdhci-of-arasan: Add support for Intel Keem Bay") was also  merged.
+Unfortunate it was not really tested on top of the previously mentioned
+commit, which causes clock registration failures for Keem Bay SOC devices.
+
+Let's fix this, by properly declaring the clock operation for Keem Bay SOC
+devices.
+
+Fixes: 36c6aadaae86 ("mmc: sdhci-of-arasan: Add support for Intel Keem Bay")
+Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
+Reviewed-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20201118120120.24908-2-muhammad.husaini.zulkifli@intel.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-arasan.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -1186,16 +1186,19 @@ static struct sdhci_arasan_of_data sdhci
+ static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
+       .soc_ctl_map = &intel_keembay_soc_ctl_map,
+       .pdata = &sdhci_keembay_emmc_pdata,
++      .clk_ops = &arasan_clk_ops,
+ };
+ static struct sdhci_arasan_of_data intel_keembay_sd_data = {
+       .soc_ctl_map = &intel_keembay_soc_ctl_map,
+       .pdata = &sdhci_keembay_sd_pdata,
++      .clk_ops = &arasan_clk_ops,
+ };
+ static struct sdhci_arasan_of_data intel_keembay_sdio_data = {
+       .soc_ctl_map = &intel_keembay_soc_ctl_map,
+       .pdata = &sdhci_keembay_sdio_pdata,
++      .clk_ops = &arasan_clk_ops,
+ };
+ static const struct of_device_id sdhci_arasan_of_match[] = {
diff --git a/queue-5.9/pinctrl-amd-remove-debounce-filter-setting-in-irq-type-setting.patch b/queue-5.9/pinctrl-amd-remove-debounce-filter-setting-in-irq-type-setting.patch
new file mode 100644 (file)
index 0000000..9299d79
--- /dev/null
@@ -0,0 +1,93 @@
+From 47a0001436352c9853d72bf2071e85b316d688a2 Mon Sep 17 00:00:00 2001
+From: Coiby Xu <coiby.xu@gmail.com>
+Date: Wed, 25 Nov 2020 21:03:19 +0800
+Subject: pinctrl: amd: remove debounce filter setting in IRQ type setting
+
+From: Coiby Xu <coiby.xu@gmail.com>
+
+commit 47a0001436352c9853d72bf2071e85b316d688a2 upstream.
+
+Debounce filter setting should be independent from IRQ type setting
+because according to the ACPI specs, there are separate arguments for
+specifying debounce timeout and IRQ type in GpioIo() and GpioInt().
+
+Together with commit 06abe8291bc31839950f7d0362d9979edc88a666
+("pinctrl: amd: fix incorrect way to disable debounce filter") and
+Andy's patch "gpiolib: acpi: Take into account debounce settings" [1],
+this will fix broken touchpads for laptops whose BIOS set the
+debounce timeout to a relatively large value. For example, the BIOS
+of Lenovo AMD gaming laptops including Legion-5 15ARH05 (R7000),
+Legion-5P (R7000P) and IdeaPad Gaming 3 15ARH05, set the debounce
+timeout to 124.8ms. This led to the kernel receiving only ~7 HID
+reports per second from the Synaptics touchpad
+(MSFT0001:00 06CB:7F28).
+
+Existing touchpads like [2][3] are not troubled by this bug because
+the debounce timeout has been set to 0 by the BIOS before enabling
+the debounce filter in setting IRQ type.
+
+[1] https://lore.kernel.org/linux-gpio/20201111222008.39993-11-andriy.shevchenko@linux.intel.com/
+    8dcb7a15a585 ("gpiolib: acpi: Take into account debounce settings")
+[2] https://github.com/Syniurge/i2c-amd-mp2/issues/11#issuecomment-721331582
+[3] https://forum.manjaro.org/t/random-short-touchpad-freezes/30832/28
+
+Signed-off-by: Coiby Xu <coiby.xu@gmail.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/linux-gpio/CAHp75VcwiGREBUJ0A06EEw-SyabqYsp%2Bdqs2DpSrhaY-2GVdAA%40mail.gmail.com/
+BugLink: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1887190
+Link: https://lore.kernel.org/r/20201125130320.311059-1-coiby.xu@gmail.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/pinctrl-amd.c |    7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -429,7 +429,6 @@ static int amd_gpio_irq_set_type(struct
+               pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+               pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+               pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+-              pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+               irq_set_handler_locked(d, handle_edge_irq);
+               break;
+@@ -437,7 +436,6 @@ static int amd_gpio_irq_set_type(struct
+               pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+               pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+               pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+-              pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+               irq_set_handler_locked(d, handle_edge_irq);
+               break;
+@@ -445,7 +443,6 @@ static int amd_gpio_irq_set_type(struct
+               pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+               pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+               pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
+-              pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+               irq_set_handler_locked(d, handle_edge_irq);
+               break;
+@@ -453,8 +450,6 @@ static int amd_gpio_irq_set_type(struct
+               pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+               pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+               pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+-              pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+-              pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
+               irq_set_handler_locked(d, handle_level_irq);
+               break;
+@@ -462,8 +457,6 @@ static int amd_gpio_irq_set_type(struct
+               pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+               pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+               pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+-              pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+-              pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
+               irq_set_handler_locked(d, handle_level_irq);
+               break;
diff --git a/queue-5.9/pinctrl-jasperlake-fix-hostsw_own-offset.patch b/queue-5.9/pinctrl-jasperlake-fix-hostsw_own-offset.patch
new file mode 100644 (file)
index 0000000..8839057
--- /dev/null
@@ -0,0 +1,36 @@
+From cdd8fc2dd64e3f1b22a6636e242d0eff49c4ba22 Mon Sep 17 00:00:00 2001
+From: Evan Green <evgreen@chromium.org>
+Date: Wed, 11 Nov 2020 15:17:28 -0800
+Subject: pinctrl: jasperlake: Fix HOSTSW_OWN offset
+
+From: Evan Green <evgreen@chromium.org>
+
+commit cdd8fc2dd64e3f1b22a6636e242d0eff49c4ba22 upstream.
+
+GPIOs that attempt to use interrupts get thwarted with a message like:
+"pin 161 cannot be used as IRQ" (for instance with SD_CD). This is because
+the HOSTSW_OWN offset is incorrect, so every GPIO looks like it's
+owned by ACPI.
+
+Fixes: e278dcb7048b1 ("pinctrl: intel: Add Intel Jasper Lake pin controller support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Evan Green <evgreen@chromium.org>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-jasperlake.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-jasperlake.c
++++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
+@@ -16,7 +16,7 @@
+ #define JSL_PAD_OWN   0x020
+ #define JSL_PADCFGLOCK        0x080
+-#define JSL_HOSTSW_OWN        0x0b0
++#define JSL_HOSTSW_OWN        0x0c0
+ #define JSL_GPI_IS    0x100
+ #define JSL_GPI_IE    0x120
diff --git a/queue-5.9/proc-use-untagged_addr-for-pagemap_read-addresses.patch b/queue-5.9/proc-use-untagged_addr-for-pagemap_read-addresses.patch
new file mode 100644 (file)
index 0000000..0117104
--- /dev/null
@@ -0,0 +1,125 @@
+From 40d6366e9d86d9a67b5642040e76082fdb5bdcf9 Mon Sep 17 00:00:00 2001
+From: Miles Chen <miles.chen@mediatek.com>
+Date: Fri, 11 Dec 2020 13:36:31 -0800
+Subject: proc: use untagged_addr() for pagemap_read addresses
+
+From: Miles Chen <miles.chen@mediatek.com>
+
+commit 40d6366e9d86d9a67b5642040e76082fdb5bdcf9 upstream.
+
+When we try to visit the pagemap of a tagged userspace pointer, we find
+that the start_vaddr is not correct because of the tag.
+To fix it, we should untag the userspace pointers in pagemap_read().
+
+I tested with 5.10-rc4 and the issue remains.
+
+Explanation from Catalin in [1]:
+
+ "Arguably, that's a user-space bug since tagged file offsets were never
+  supported. In this case it's not even a tag at bit 56 as per the arm64
+  tagged address ABI but rather down to bit 47. You could say that the
+  problem is caused by the C library (malloc()) or whoever created the
+  tagged vaddr and passed it to this function. It's not a kernel
+  regression as we've never supported it.
+
+  Now, pagemap is a special case where the offset is usually not
+  generated as a classic file offset but rather derived by shifting a
+  user virtual address. I guess we can make a concession for pagemap
+  (only) and allow such offset with the tag at bit (56 - PAGE_SHIFT + 3)"
+
+My test code is based on [2]:
+
+A userspace pointer which has been tagged by 0xb4: 0xb400007662f541c8
+
+userspace program:
+
+  uint64 OsLayer::VirtualToPhysical(void *vaddr) {
+       uint64 frame, paddr, pfnmask, pagemask;
+       int pagesize = sysconf(_SC_PAGESIZE);
+       off64_t off = ((uintptr_t)vaddr) / pagesize * 8; // off = 0xb400007662f541c8 / pagesize * 8 = 0x5a00003b317aa0
+       int fd = open(kPagemapPath, O_RDONLY);
+       ...
+
+       if (lseek64(fd, off, SEEK_SET) != off || read(fd, &frame, 8) != 8) {
+               int err = errno;
+               string errtxt = ErrorString(err);
+               if (fd >= 0)
+                       close(fd);
+               return 0;
+       }
+  ...
+  }
+
+kernel fs/proc/task_mmu.c:
+
+  static ssize_t pagemap_read(struct file *file, char __user *buf,
+               size_t count, loff_t *ppos)
+  {
+       ...
+       src = *ppos;
+       svpfn = src / PM_ENTRY_BYTES; // svpfn == 0xb400007662f54
+       start_vaddr = svpfn << PAGE_SHIFT; // start_vaddr == 0xb400007662f54000
+       end_vaddr = mm->task_size;
+
+       /* watch out for wraparound */
+       // svpfn == 0xb400007662f54
+       // (mm->task_size >> PAGE) == 0x8000000
+       if (svpfn > mm->task_size >> PAGE_SHIFT) // the condition is true because of the tag 0xb4
+               start_vaddr = end_vaddr;
+
+       ret = 0;
+       while (count && (start_vaddr < end_vaddr)) { // we cannot visit correct entry because start_vaddr is set to end_vaddr
+               int len;
+               unsigned long end;
+               ...
+       }
+       ...
+  }
+
+[1] https://lore.kernel.org/patchwork/patch/1343258/
+[2] https://github.com/stressapptest/stressapptest/blob/master/src/os.cc#L158
+
+Link: https://lkml.kernel.org/r/20201204024347.8295-1-miles.chen@mediatek.com
+Signed-off-by: Miles Chen <miles.chen@mediatek.com>
+Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: Andrey Konovalov <andreyknvl@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Song Bao Hua (Barry Song) <song.bao.hua@hisilicon.com>
+Cc: <stable@vger.kernel.org>   [5.4-]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/task_mmu.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1541,11 +1541,15 @@ static ssize_t pagemap_read(struct file
+       src = *ppos;
+       svpfn = src / PM_ENTRY_BYTES;
+-      start_vaddr = svpfn << PAGE_SHIFT;
+       end_vaddr = mm->task_size;
+       /* watch out for wraparound */
+-      if (svpfn > mm->task_size >> PAGE_SHIFT)
++      start_vaddr = end_vaddr;
++      if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
++              start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
++
++      /* Ensure the address is inside the task */
++      if (start_vaddr > mm->task_size)
+               start_vaddr = end_vaddr;
+       /*
diff --git a/queue-5.9/scsi-be2iscsi-revert-fix-a-theoretical-leak-in-beiscsi_create_eqs.patch b/queue-5.9/scsi-be2iscsi-revert-fix-a-theoretical-leak-in-beiscsi_create_eqs.patch
new file mode 100644 (file)
index 0000000..0674853
--- /dev/null
@@ -0,0 +1,61 @@
+From eeaf06af6f87e1dba371fbe42674e6f963220b9c Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 3 Dec 2020 15:18:26 +0300
+Subject: scsi: be2iscsi: Revert "Fix a theoretical leak in beiscsi_create_eqs()"
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit eeaf06af6f87e1dba371fbe42674e6f963220b9c upstream.
+
+My patch caused kernel Oopses and delays in boot.  Revert it.
+
+The problem was that I moved the "mem->dma = paddr;" before the call to
+be_fill_queue().  But the first thing that the be_fill_queue() function
+does is memset the whole struct to zero which overwrites the assignment.
+
+Link: https://lore.kernel.org/r/X8jXkt6eThjyVP1v@mwanda
+Fixes: 38b2db564d9a ("scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs()")
+Cc: stable <stable@vger.kernel.org>
+Reported-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/be2iscsi/be_main.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -3020,7 +3020,6 @@ static int beiscsi_create_eqs(struct bei
+                       goto create_eq_error;
+               }
+-              mem->dma = paddr;
+               mem->va = eq_vaddress;
+               ret = be_fill_queue(eq, phba->params.num_eq_entries,
+                                   sizeof(struct be_eq_entry), eq_vaddress);
+@@ -3030,6 +3029,7 @@ static int beiscsi_create_eqs(struct bei
+                       goto create_eq_error;
+               }
++              mem->dma = paddr;
+               ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+                                           BEISCSI_EQ_DELAY_DEF);
+               if (ret) {
+@@ -3086,7 +3086,6 @@ static int beiscsi_create_cqs(struct bei
+                       goto create_cq_error;
+               }
+-              mem->dma = paddr;
+               ret = be_fill_queue(cq, phba->params.num_cq_entries,
+                                   sizeof(struct sol_cqe), cq_vaddress);
+               if (ret) {
+@@ -3096,6 +3095,7 @@ static int beiscsi_create_cqs(struct bei
+                       goto create_cq_error;
+               }
++              mem->dma = paddr;
+               ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+                                           false, 0);
+               if (ret) {
index aecec48d80fd68e4b3e814c55ac507952e7891a5..38cd5d1338d5a4a7829184f7efd6b64a2c341205 100644 (file)
@@ -73,3 +73,32 @@ platform-x86-touchscreen_dmi-add-info-for-the-irbis-.patch
 can-m_can-m_can_dev_setup-add-support-for-bosch-mcan.patch
 s390-fix-irq-state-tracing.patch
 intel_idle-build-fix.patch
+media-pulse8-cec-fix-duplicate-free-at-disconnect-or-probe-error.patch
+media-pulse8-cec-add-support-for-fw-v10-and-up.patch
+mmc-mediatek-fix-system-suspend-resume-support-for-cqhci.patch
+mmc-mediatek-extend-recheck_sdio_irq-fix-to-more-variants.patch
+ktest.pl-fix-incorrect-reboot-for-grub2bls.patch
+xen-add-helpers-for-caching-grant-mapping-pages.patch
+xen-don-t-use-page-lru-for-zone_device-memory.patch
+input-cm109-do-not-stomp-on-control-urb.patch
+input-i8042-add-acer-laptops-to-the-i8042-reset-list.patch
+pinctrl-jasperlake-fix-hostsw_own-offset.patch
+pinctrl-amd-remove-debounce-filter-setting-in-irq-type-setting.patch
+mmc-sdhci-of-arasan-fix-clock-registration-error-for-keem-bay-soc.patch
+mmc-block-fixup-condition-for-cmd13-polling-for-rpmb-requests.patch
+drm-amdgpu-disply-set-num_crtc-earlier.patch
+drm-i915-gem-propagate-error-from-cancelled-submit-due-to-context-closure.patch
+drm-i915-display-dp-compute-the-correct-slice-count-for-vdsc-on-dp.patch
+drm-i915-gt-declare-gen9-has-64-mocs-entries.patch
+drm-i915-gt-ignore-repeated-attempts-to-suspend-request-flow-across-reset.patch
+drm-i915-gt-cancel-the-preemption-timeout-on-responding-to-it.patch
+drm-amdgpu-fix-sdma-instance-fw-version-and-feature-version-init.patch
+kbuild-avoid-static_assert-for-genksyms.patch
+proc-use-untagged_addr-for-pagemap_read-addresses.patch
+mm-hugetlb-clear-compound_nr-before-freeing-gigantic-pages.patch
+zonefs-fix-page-reference-and-bio-leak.patch
+scsi-be2iscsi-revert-fix-a-theoretical-leak-in-beiscsi_create_eqs.patch
+x86-mm-mem_encrypt-fix-definition-of-pmd_flags_dec_wp.patch
+x86-membarrier-get-rid-of-a-dubious-optimization.patch
+x86-apic-vector-fix-ordering-in-vector-assignment.patch
+x86-kprobes-fix-optprobe-to-detect-int3-padding-correctly.patch
diff --git a/queue-5.9/x86-apic-vector-fix-ordering-in-vector-assignment.patch b/queue-5.9/x86-apic-vector-fix-ordering-in-vector-assignment.patch
new file mode 100644 (file)
index 0000000..85f9fc5
--- /dev/null
@@ -0,0 +1,90 @@
+From 190113b4c6531c8e09b31d5235f9b5175cbb0f72 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 Dec 2020 21:18:22 +0100
+Subject: x86/apic/vector: Fix ordering in vector assignment
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 190113b4c6531c8e09b31d5235f9b5175cbb0f72 upstream.
+
+Prarit reported that depending on the affinity setting the
+
+ ' irq $N: Affinity broken due to vector space exhaustion.'
+
+message is showing up in dmesg, but the vector space on the CPUs in the
+affinity mask is definitely not exhausted.
+
+Shung-Hsi provided traces and analysis which pinpoints the problem:
+
+The ordering of trying to assign an interrupt vector in
+assign_irq_vector_any_locked() is simply wrong if the interrupt data has a
+valid node assigned. It does:
+
+ 1) Try the intersection of affinity mask and node mask
+ 2) Try the node mask
+ 3) Try the full affinity mask
+ 4) Try the full online mask
+
+Obviously #2 and #3 are in the wrong order as the requested affinity
+mask has to take precedence.
+
+In the observed cases #1 failed because the affinity mask did not contain
+CPUs from node 0. That made it allocate a vector from node 0, thereby
+breaking affinity and emitting the misleading message.
+
+Revert the order of #2 and #3 so the full affinity mask without the node
+intersection is tried before actually affinity is broken.
+
+If no node is assigned then only the full affinity mask and if that fails
+the full online mask is tried.
+
+Fixes: d6ffc6ac83b1 ("x86/vector: Respect affinity mask in irq descriptor")
+Reported-by: Prarit Bhargava <prarit@redhat.com>
+Reported-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/87ft4djtyp.fsf@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c |   24 ++++++++++++++----------
+ 1 file changed, 14 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -273,20 +273,24 @@ static int assign_irq_vector_any_locked(
+       const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
+       int node = irq_data_get_node(irqd);
+-      if (node == NUMA_NO_NODE)
+-              goto all;
+-      /* Try the intersection of @affmsk and node mask */
+-      cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
+-      if (!assign_vector_locked(irqd, vector_searchmask))
+-              return 0;
+-      /* Try the node mask */
+-      if (!assign_vector_locked(irqd, cpumask_of_node(node)))
+-              return 0;
+-all:
++      if (node != NUMA_NO_NODE) {
++              /* Try the intersection of @affmsk and node mask */
++              cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
++              if (!assign_vector_locked(irqd, vector_searchmask))
++                      return 0;
++      }
++
+       /* Try the full affinity mask */
+       cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
+       if (!assign_vector_locked(irqd, vector_searchmask))
+               return 0;
++
++      if (node != NUMA_NO_NODE) {
++              /* Try the node mask */
++              if (!assign_vector_locked(irqd, cpumask_of_node(node)))
++                      return 0;
++      }
++
+       /* Try the full online mask */
+       return assign_vector_locked(irqd, cpu_online_mask);
+ }
diff --git a/queue-5.9/x86-kprobes-fix-optprobe-to-detect-int3-padding-correctly.patch b/queue-5.9/x86-kprobes-fix-optprobe-to-detect-int3-padding-correctly.patch
new file mode 100644 (file)
index 0000000..00628f8
--- /dev/null
@@ -0,0 +1,77 @@
+From 0d07c0ec4381f630c801539c79ad8dcc627f6e4a Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 11 Dec 2020 16:04:17 +0900
+Subject: x86/kprobes: Fix optprobe to detect INT3 padding correctly
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 0d07c0ec4381f630c801539c79ad8dcc627f6e4a upstream.
+
+Commit
+
+  7705dc855797 ("x86/vmlinux: Use INT3 instead of NOP for linker fill bytes")
+
+changed the padding bytes between functions from NOP to INT3. However,
+when optprobe decodes a target function it finds INT3 and gives up the
+jump optimization.
+
+Instead of giving up any INT3 detection, check whether the rest of the
+bytes to the end of the function are INT3. If all of them are INT3,
+those come from the linker. In that case, continue the optprobe jump
+optimization.
+
+ [ bp: Massage commit message. ]
+
+Fixes: 7705dc855797 ("x86/vmlinux: Use INT3 instead of NOP for linker fill bytes")
+Reported-by: Adam Zabrocki <pi3@pi3.com.pl>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/160767025681.3880685.16021570341428835411.stgit@devnote2
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kprobes/opt.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -271,6 +271,19 @@ static int insn_is_indirect_jump(struct
+       return ret;
+ }
++static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
++{
++      unsigned char ops;
++
++      for (; addr < eaddr; addr++) {
++              if (get_kernel_nofault(ops, (void *)addr) < 0 ||
++                  ops != INT3_INSN_OPCODE)
++                      return false;
++      }
++
++      return true;
++}
++
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
+@@ -309,9 +322,14 @@ static int can_optimize(unsigned long pa
+                       return 0;
+               kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
+               insn_get_length(&insn);
+-              /* Another subsystem puts a breakpoint */
++              /*
++               * In the case of detecting unknown breakpoint, this could be
++               * a padding INT3 between functions. Let's check that all the
++               * rest of the bytes are also INT3.
++               */
+               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+-                      return 0;
++                      return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
++
+               /* Recover address */
+               insn.kaddr = (void *)addr;
+               insn.next_byte = (void *)(addr + insn.length);
diff --git a/queue-5.9/x86-membarrier-get-rid-of-a-dubious-optimization.patch b/queue-5.9/x86-membarrier-get-rid-of-a-dubious-optimization.patch
new file mode 100644 (file)
index 0000000..dbecacc
--- /dev/null
@@ -0,0 +1,70 @@
+From a493d1ca1a03b532871f1da27f8dbda2b28b04c4 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 3 Dec 2020 21:07:03 -0800
+Subject: x86/membarrier: Get rid of a dubious optimization
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit a493d1ca1a03b532871f1da27f8dbda2b28b04c4 upstream.
+
+sync_core_before_usermode() had an incorrect optimization.  If the kernel
+returns from an interrupt, it can get to usermode without IRET. It just has
+to schedule to a different task in the same mm and do SYSRET.  Fortunately,
+there were no callers of sync_core_before_usermode() that could have had
+in_irq() or in_nmi() equal to true, because it's only ever called from the
+scheduler.
+
+While at it, clarify a related comment.
+
+Fixes: 70216e18e519 ("membarrier: Provide core serializing command, *_SYNC_CORE")
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/5afc7632be1422f91eaf7611aaaa1b5b8580a086.1607058304.git.luto@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/sync_core.h |    9 +++++----
+ arch/x86/mm/tlb.c                |   10 ++++++++--
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/sync_core.h
++++ b/arch/x86/include/asm/sync_core.h
+@@ -88,12 +88,13 @@ static inline void sync_core_before_user
+       /* With PTI, we unconditionally serialize before running user code. */
+       if (static_cpu_has(X86_FEATURE_PTI))
+               return;
++
+       /*
+-       * Return from interrupt and NMI is done through iret, which is core
+-       * serializing.
++       * Even if we're in an interrupt, we might reschedule before returning,
++       * in which case we could switch to a different thread in the same mm
++       * and return using SYSRET or SYSEXIT.  Instead of trying to keep
++       * track of our need to sync the core, just sync right away.
+        */
+-      if (in_irq() || in_nmi())
+-              return;
+       sync_core();
+ }
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -475,8 +475,14 @@ void switch_mm_irqs_off(struct mm_struct
+       /*
+        * The membarrier system call requires a full memory barrier and
+        * core serialization before returning to user-space, after
+-       * storing to rq->curr. Writing to CR3 provides that full
+-       * memory barrier and core serializing instruction.
++       * storing to rq->curr, when changing mm.  This is because
++       * membarrier() sends IPIs to all CPUs that are in the target mm
++       * to make them issue memory barriers.  However, if another CPU
++       * switches to/from the target mm concurrently with
++       * membarrier(), it can cause that CPU not to receive an IPI
++       * when it really should issue a memory barrier.  Writing to CR3
++       * provides that full memory barrier and core serializing
++       * instruction.
+        */
+       if (real_prev == next) {
+               VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
diff --git a/queue-5.9/x86-mm-mem_encrypt-fix-definition-of-pmd_flags_dec_wp.patch b/queue-5.9/x86-mm-mem_encrypt-fix-definition-of-pmd_flags_dec_wp.patch
new file mode 100644 (file)
index 0000000..add3374
--- /dev/null
@@ -0,0 +1,53 @@
+From 29ac40cbed2bc06fa218ca25d7f5e280d3d08a25 Mon Sep 17 00:00:00 2001
+From: Arvind Sankar <nivedita@alum.mit.edu>
+Date: Wed, 11 Nov 2020 11:09:45 -0500
+Subject: x86/mm/mem_encrypt: Fix definition of PMD_FLAGS_DEC_WP
+
+From: Arvind Sankar <nivedita@alum.mit.edu>
+
+commit 29ac40cbed2bc06fa218ca25d7f5e280d3d08a25 upstream.
+
+The PAT bit is in different locations for 4k and 2M/1G page table
+entries.
+
+Add a definition for _PAGE_LARGE_CACHE_MASK to represent the three
+caching bits (PWT, PCD, PAT), similar to _PAGE_CACHE_MASK for 4k pages,
+and use it in the definition of PMD_FLAGS_DEC_WP to get the correct PAT
+index for write-protected pages.
+
+Fixes: 6ebcb060713f ("x86/mm: Add support to encrypt the kernel in-place")
+Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20201111160946.147341-1-nivedita@alum.mit.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pgtable_types.h |    1 +
+ arch/x86/mm/mem_encrypt_identity.c   |    4 ++--
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -155,6 +155,7 @@ enum page_cache_mode {
+ #define _PAGE_ENC             (_AT(pteval_t, sme_me_mask))
+ #define _PAGE_CACHE_MASK      (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
++#define _PAGE_LARGE_CACHE_MASK        (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
+ #define _PAGE_NOCACHE         (cachemode2protval(_PAGE_CACHE_MODE_UC))
+ #define _PAGE_CACHE_WP                (cachemode2protval(_PAGE_CACHE_MODE_WP))
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -45,8 +45,8 @@
+ #define PMD_FLAGS_LARGE               (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+ #define PMD_FLAGS_DEC         PMD_FLAGS_LARGE
+-#define PMD_FLAGS_DEC_WP      ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+-                               (_PAGE_PAT | _PAGE_PWT))
++#define PMD_FLAGS_DEC_WP      ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
++                               (_PAGE_PAT_LARGE | _PAGE_PWT))
+ #define PMD_FLAGS_ENC         (PMD_FLAGS_LARGE | _PAGE_ENC)
diff --git a/queue-5.9/xen-add-helpers-for-caching-grant-mapping-pages.patch b/queue-5.9/xen-add-helpers-for-caching-grant-mapping-pages.patch
new file mode 100644 (file)
index 0000000..d8e3901
--- /dev/null
@@ -0,0 +1,467 @@
+From ca33479cc7be2c9b5f8be078c8bf3ac26b7d6186 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 7 Dec 2020 08:31:22 +0100
+Subject: xen: add helpers for caching grant mapping pages
+
+From: Juergen Gross <jgross@suse.com>
+
+commit ca33479cc7be2c9b5f8be078c8bf3ac26b7d6186 upstream.
+
+Instead of having similar helpers in multiple backend drivers use
+common helpers for caching pages allocated via gnttab_alloc_pages().
+
+Make use of those helpers in blkback and scsiback.
+
+Cc: <stable@vger.kernel.org> # 5.9
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovksy@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/xen-blkback/blkback.c |   89 ++++++------------------------------
+ drivers/block/xen-blkback/common.h  |    4 -
+ drivers/block/xen-blkback/xenbus.c  |    6 --
+ drivers/xen/grant-table.c           |   72 +++++++++++++++++++++++++++++
+ drivers/xen/xen-scsiback.c          |   60 ++++--------------------
+ include/xen/grant_table.h           |   13 +++++
+ 6 files changed, 116 insertions(+), 128 deletions(-)
+
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
+ #define BLKBACK_INVALID_HANDLE (~0)
+-/* Number of free pages to remove on each call to gnttab_free_pages */
+-#define NUM_BATCH_FREE_PAGES 10
+-
+ static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
+ {
+       return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
+                       HZ * pgrant_timeout);
+ }
+-static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ring->free_pages_lock, flags);
+-      if (list_empty(&ring->free_pages)) {
+-              BUG_ON(ring->free_pages_num != 0);
+-              spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-              return gnttab_alloc_pages(1, page);
+-      }
+-      BUG_ON(ring->free_pages_num == 0);
+-      page[0] = list_first_entry(&ring->free_pages, struct page, lru);
+-      list_del(&page[0]->lru);
+-      ring->free_pages_num--;
+-      spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-
+-      return 0;
+-}
+-
+-static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
+-                                  int num)
+-{
+-      unsigned long flags;
+-      int i;
+-
+-      spin_lock_irqsave(&ring->free_pages_lock, flags);
+-      for (i = 0; i < num; i++)
+-              list_add(&page[i]->lru, &ring->free_pages);
+-      ring->free_pages_num += num;
+-      spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-}
+-
+-static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
+-{
+-      /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
+-      struct page *page[NUM_BATCH_FREE_PAGES];
+-      unsigned int num_pages = 0;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ring->free_pages_lock, flags);
+-      while (ring->free_pages_num > num) {
+-              BUG_ON(list_empty(&ring->free_pages));
+-              page[num_pages] = list_first_entry(&ring->free_pages,
+-                                                 struct page, lru);
+-              list_del(&page[num_pages]->lru);
+-              ring->free_pages_num--;
+-              if (++num_pages == NUM_BATCH_FREE_PAGES) {
+-                      spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-                      gnttab_free_pages(num_pages, page);
+-                      spin_lock_irqsave(&ring->free_pages_lock, flags);
+-                      num_pages = 0;
+-              }
+-      }
+-      spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-      if (num_pages != 0)
+-              gnttab_free_pages(num_pages, page);
+-}
+-
+ #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
+ static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
+@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct
+                       unmap_data.count = segs_to_unmap;
+                       BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-                      put_free_pages(ring, pages, segs_to_unmap);
++                      gnttab_page_cache_put(&ring->free_pages, pages,
++                                            segs_to_unmap);
+                       segs_to_unmap = 0;
+               }
+@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struc
+               if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+                       unmap_data.count = segs_to_unmap;
+                       BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-                      put_free_pages(ring, pages, segs_to_unmap);
++                      gnttab_page_cache_put(&ring->free_pages, pages,
++                                            segs_to_unmap);
+                       segs_to_unmap = 0;
+               }
+               kfree(persistent_gnt);
+@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struc
+       if (segs_to_unmap > 0) {
+               unmap_data.count = segs_to_unmap;
+               BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-              put_free_pages(ring, pages, segs_to_unmap);
++              gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
+       }
+ }
+@@ -664,9 +605,10 @@ purge_gnt_list:
+               /* Shrink the free pages pool if it is too large. */
+               if (time_before(jiffies, blkif->buffer_squeeze_end))
+-                      shrink_free_pagepool(ring, 0);
++                      gnttab_page_cache_shrink(&ring->free_pages, 0);
+               else
+-                      shrink_free_pagepool(ring, max_buffer_pages);
++                      gnttab_page_cache_shrink(&ring->free_pages,
++                                               max_buffer_pages);
+               if (log_stats && time_after(jiffies, ring->st_print))
+                       print_stats(ring);
+@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_bl
+       ring->persistent_gnt_c = 0;
+       /* Since we are shutting down remove all pages from the buffer */
+-      shrink_free_pagepool(ring, 0 /* All */);
++      gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
+ }
+ static unsigned int xen_blkbk_unmap_prepare(
+@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_
+          but is this the best way to deal with this? */
+       BUG_ON(result);
+-      put_free_pages(ring, data->pages, data->count);
++      gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
+       make_response(ring, pending_req->id,
+                     pending_req->operation, pending_req->status);
+       free_req(ring, pending_req);
+@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_b
+               if (invcount) {
+                       ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
+                       BUG_ON(ret);
+-                      put_free_pages(ring, unmap_pages, invcount);
++                      gnttab_page_cache_put(&ring->free_pages, unmap_pages,
++                                            invcount);
+               }
+               pages += batch;
+               num -= batch;
+@@ -850,7 +793,8 @@ again:
+                       pages[i]->page = persistent_gnt->page;
+                       pages[i]->persistent_gnt = persistent_gnt;
+               } else {
+-                      if (get_free_page(ring, &pages[i]->page))
++                      if (gnttab_page_cache_get(&ring->free_pages,
++                                                &pages[i]->page))
+                               goto out_of_memory;
+                       addr = vaddr(pages[i]->page);
+                       pages_to_gnt[segs_to_map] = pages[i]->page;
+@@ -883,7 +827,8 @@ again:
+                       BUG_ON(new_map_idx >= segs_to_map);
+                       if (unlikely(map[new_map_idx].status != 0)) {
+                               pr_debug("invalid buffer -- could not remap it\n");
+-                              put_free_pages(ring, &pages[seg_idx]->page, 1);
++                              gnttab_page_cache_put(&ring->free_pages,
++                                                    &pages[seg_idx]->page, 1);
+                               pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+                               ret |= 1;
+                               goto next;
+@@ -944,7 +889,7 @@ next:
+ out_of_memory:
+       pr_alert("%s: out of memory\n", __func__);
+-      put_free_pages(ring, pages_to_gnt, segs_to_map);
++      gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
+       for (i = last_map; i < num; i++)
+               pages[i]->handle = BLKBACK_INVALID_HANDLE;
+       return -ENOMEM;
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -288,9 +288,7 @@ struct xen_blkif_ring {
+       struct work_struct      persistent_purge_work;
+       /* Buffer of free pages to map grant refs. */
+-      spinlock_t              free_pages_lock;
+-      int                     free_pages_num;
+-      struct list_head        free_pages;
++      struct gnttab_page_cache free_pages;
+       struct work_struct      free_work;
+       /* Thread shutdown wait queue. */
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct
+               INIT_LIST_HEAD(&ring->pending_free);
+               INIT_LIST_HEAD(&ring->persistent_purge_list);
+               INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
+-              spin_lock_init(&ring->free_pages_lock);
+-              INIT_LIST_HEAD(&ring->free_pages);
++              gnttab_page_cache_init(&ring->free_pages);
+               spin_lock_init(&ring->pending_free_lock);
+               init_waitqueue_head(&ring->pending_free_wq);
+@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct x
+               BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
+               BUG_ON(!list_empty(&ring->persistent_purge_list));
+               BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
+-              BUG_ON(!list_empty(&ring->free_pages));
+-              BUG_ON(ring->free_pages_num != 0);
++              BUG_ON(ring->free_pages.num_pages != 0);
+               BUG_ON(ring->persistent_gnt_c != 0);
+               WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+               ring->active = false;
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -813,6 +813,78 @@ int gnttab_alloc_pages(int nr_pages, str
+ }
+ EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
++void gnttab_page_cache_init(struct gnttab_page_cache *cache)
++{
++      spin_lock_init(&cache->lock);
++      INIT_LIST_HEAD(&cache->pages);
++      cache->num_pages = 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
++
++int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&cache->lock, flags);
++
++      if (list_empty(&cache->pages)) {
++              spin_unlock_irqrestore(&cache->lock, flags);
++              return gnttab_alloc_pages(1, page);
++      }
++
++      page[0] = list_first_entry(&cache->pages, struct page, lru);
++      list_del(&page[0]->lru);
++      cache->num_pages--;
++
++      spin_unlock_irqrestore(&cache->lock, flags);
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
++
++void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
++                         unsigned int num)
++{
++      unsigned long flags;
++      unsigned int i;
++
++      spin_lock_irqsave(&cache->lock, flags);
++
++      for (i = 0; i < num; i++)
++              list_add(&page[i]->lru, &cache->pages);
++      cache->num_pages += num;
++
++      spin_unlock_irqrestore(&cache->lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
++
++void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
++{
++      struct page *page[10];
++      unsigned int i = 0;
++      unsigned long flags;
++
++      spin_lock_irqsave(&cache->lock, flags);
++
++      while (cache->num_pages > num) {
++              page[i] = list_first_entry(&cache->pages, struct page, lru);
++              list_del(&page[i]->lru);
++              cache->num_pages--;
++              if (++i == ARRAY_SIZE(page)) {
++                      spin_unlock_irqrestore(&cache->lock, flags);
++                      gnttab_free_pages(i, page);
++                      i = 0;
++                      spin_lock_irqsave(&cache->lock, flags);
++              }
++      }
++
++      spin_unlock_irqrestore(&cache->lock, flags);
++
++      if (i != 0)
++              gnttab_free_pages(i, page);
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
++
+ void gnttab_pages_clear_private(int nr_pages, struct page **pages)
+ {
+       int i;
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -99,6 +99,8 @@ struct vscsibk_info {
+       struct list_head v2p_entry_lists;
+       wait_queue_head_t waiting_to_free;
++
++      struct gnttab_page_cache free_pages;
+ };
+ /* theoretical maximum of grants for one request */
+@@ -188,10 +190,6 @@ module_param_named(max_buffer_pages, scs
+ MODULE_PARM_DESC(max_buffer_pages,
+ "Maximum number of free pages to keep in backend buffer");
+-static DEFINE_SPINLOCK(free_pages_lock);
+-static int free_pages_num;
+-static LIST_HEAD(scsiback_free_pages);
+-
+ /* Global spinlock to protect scsiback TPG list */
+ static DEFINE_MUTEX(scsiback_mutex);
+ static LIST_HEAD(scsiback_list);
+@@ -207,41 +205,6 @@ static void scsiback_put(struct vscsibk_
+               wake_up(&info->waiting_to_free);
+ }
+-static void put_free_pages(struct page **page, int num)
+-{
+-      unsigned long flags;
+-      int i = free_pages_num + num, n = num;
+-
+-      if (num == 0)
+-              return;
+-      if (i > scsiback_max_buffer_pages) {
+-              n = min(num, i - scsiback_max_buffer_pages);
+-              gnttab_free_pages(n, page + num - n);
+-              n = num - n;
+-      }
+-      spin_lock_irqsave(&free_pages_lock, flags);
+-      for (i = 0; i < n; i++)
+-              list_add(&page[i]->lru, &scsiback_free_pages);
+-      free_pages_num += n;
+-      spin_unlock_irqrestore(&free_pages_lock, flags);
+-}
+-
+-static int get_free_page(struct page **page)
+-{
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&free_pages_lock, flags);
+-      if (list_empty(&scsiback_free_pages)) {
+-              spin_unlock_irqrestore(&free_pages_lock, flags);
+-              return gnttab_alloc_pages(1, page);
+-      }
+-      page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
+-      list_del(&page[0]->lru);
+-      free_pages_num--;
+-      spin_unlock_irqrestore(&free_pages_lock, flags);
+-      return 0;
+-}
+-
+ static unsigned long vaddr_page(struct page *page)
+ {
+       unsigned long pfn = page_to_pfn(page);
+@@ -302,7 +265,8 @@ static void scsiback_fast_flush_area(str
+               BUG_ON(err);
+       }
+-      put_free_pages(req->pages, req->n_grants);
++      gnttab_page_cache_put(&req->info->free_pages, req->pages,
++                            req->n_grants);
+       req->n_grants = 0;
+ }
+@@ -445,8 +409,8 @@ static int scsiback_gnttab_data_map_list
+       struct vscsibk_info *info = pending_req->info;
+       for (i = 0; i < cnt; i++) {
+-              if (get_free_page(pg + mapcount)) {
+-                      put_free_pages(pg, mapcount);
++              if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
++                      gnttab_page_cache_put(&info->free_pages, pg, mapcount);
+                       pr_err("no grant page\n");
+                       return -ENOMEM;
+               }
+@@ -796,6 +760,8 @@ static int scsiback_do_cmd_fn(struct vsc
+               cond_resched();
+       }
++      gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
++
+       RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+       return more_to_do;
+ }
+@@ -1233,6 +1199,8 @@ static int scsiback_remove(struct xenbus
+       scsiback_release_translation_entry(info);
++      gnttab_page_cache_shrink(&info->free_pages, 0);
++
+       dev_set_drvdata(&dev->dev, NULL);
+       return 0;
+@@ -1263,6 +1231,7 @@ static int scsiback_probe(struct xenbus_
+       info->irq = 0;
+       INIT_LIST_HEAD(&info->v2p_entry_lists);
+       spin_lock_init(&info->v2p_lock);
++      gnttab_page_cache_init(&info->free_pages);
+       err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
+                           SG_ALL);
+@@ -1879,13 +1848,6 @@ out:
+ static void __exit scsiback_exit(void)
+ {
+-      struct page *page;
+-
+-      while (free_pages_num) {
+-              if (get_free_page(&page))
+-                      BUG();
+-              gnttab_free_pages(1, &page);
+-      }
+       target_unregister_template(&scsiback_ops);
+       xenbus_unregister_driver(&scsiback_driver);
+ }
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -198,6 +198,19 @@ void gnttab_free_auto_xlat_frames(void);
+ int gnttab_alloc_pages(int nr_pages, struct page **pages);
+ void gnttab_free_pages(int nr_pages, struct page **pages);
++struct gnttab_page_cache {
++      spinlock_t              lock;
++      struct list_head        pages;
++      unsigned int            num_pages;
++};
++
++void gnttab_page_cache_init(struct gnttab_page_cache *cache);
++int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
++void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
++                         unsigned int num);
++void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
++                            unsigned int num);
++
+ #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ struct gnttab_dma_alloc_args {
+       /* Device for which DMA memory will be/was allocated. */
diff --git a/queue-5.9/xen-don-t-use-page-lru-for-zone_device-memory.patch b/queue-5.9/xen-don-t-use-page-lru-for-zone_device-memory.patch
new file mode 100644 (file)
index 0000000..64ca38e
--- /dev/null
@@ -0,0 +1,214 @@
+From ee32f32335e8c7f6154bf397f4ac9b6175b488a8 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 7 Dec 2020 09:36:14 +0100
+Subject: xen: don't use page->lru for ZONE_DEVICE memory
+
+From: Juergen Gross <jgross@suse.com>
+
+commit ee32f32335e8c7f6154bf397f4ac9b6175b488a8 upstream.
+
+Commit 9e2369c06c8a18 ("xen: add helpers to allocate unpopulated
+memory") introduced usage of ZONE_DEVICE memory for foreign memory
+mappings.
+
+Unfortunately this collides with using page->lru for Xen backend
+private page caches.
+
+Fix that by using page->zone_device_data instead.
+
+Cc: <stable@vger.kernel.org> # 5.9
+Fixes: 9e2369c06c8a18 ("xen: add helpers to allocate unpopulated memory")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovksy@oracle.com>
+Reviewed-by: Jason Andryuk <jandryuk@gmail.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/grant-table.c       |   65 +++++++++++++++++++++++++++++++++++-----
+ drivers/xen/unpopulated-alloc.c |   20 ++++++------
+ include/xen/grant_table.h       |    4 ++
+ 3 files changed, 73 insertions(+), 16 deletions(-)
+
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -813,10 +813,63 @@ int gnttab_alloc_pages(int nr_pages, str
+ }
+ EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++static inline void cache_init(struct gnttab_page_cache *cache)
++{
++      cache->pages = NULL;
++}
++
++static inline bool cache_empty(struct gnttab_page_cache *cache)
++{
++      return !cache->pages;
++}
++
++static inline struct page *cache_deq(struct gnttab_page_cache *cache)
++{
++      struct page *page;
++
++      page = cache->pages;
++      cache->pages = page->zone_device_data;
++
++      return page;
++}
++
++static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
++{
++      page->zone_device_data = cache->pages;
++      cache->pages = page;
++}
++#else
++static inline void cache_init(struct gnttab_page_cache *cache)
++{
++      INIT_LIST_HEAD(&cache->pages);
++}
++
++static inline bool cache_empty(struct gnttab_page_cache *cache)
++{
++      return list_empty(&cache->pages);
++}
++
++static inline struct page *cache_deq(struct gnttab_page_cache *cache)
++{
++      struct page *page;
++
++      page = list_first_entry(&cache->pages, struct page, lru);
++      list_del(&page->lru);
++
++      return page;
++}
++
++static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
++{
++      list_add(&page->lru, &cache->pages);
++}
++#endif
++
+ void gnttab_page_cache_init(struct gnttab_page_cache *cache)
+ {
+       spin_lock_init(&cache->lock);
+-      INIT_LIST_HEAD(&cache->pages);
++      cache_init(cache);
+       cache->num_pages = 0;
+ }
+ EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
+@@ -827,13 +880,12 @@ int gnttab_page_cache_get(struct gnttab_
+       spin_lock_irqsave(&cache->lock, flags);
+-      if (list_empty(&cache->pages)) {
++      if (cache_empty(cache)) {
+               spin_unlock_irqrestore(&cache->lock, flags);
+               return gnttab_alloc_pages(1, page);
+       }
+-      page[0] = list_first_entry(&cache->pages, struct page, lru);
+-      list_del(&page[0]->lru);
++      page[0] = cache_deq(cache);
+       cache->num_pages--;
+       spin_unlock_irqrestore(&cache->lock, flags);
+@@ -851,7 +903,7 @@ void gnttab_page_cache_put(struct gnttab
+       spin_lock_irqsave(&cache->lock, flags);
+       for (i = 0; i < num; i++)
+-              list_add(&page[i]->lru, &cache->pages);
++              cache_enq(cache, page[i]);
+       cache->num_pages += num;
+       spin_unlock_irqrestore(&cache->lock, flags);
+@@ -867,8 +919,7 @@ void gnttab_page_cache_shrink(struct gnt
+       spin_lock_irqsave(&cache->lock, flags);
+       while (cache->num_pages > num) {
+-              page[i] = list_first_entry(&cache->pages, struct page, lru);
+-              list_del(&page[i]->lru);
++              page[i] = cache_deq(cache);
+               cache->num_pages--;
+               if (++i == ARRAY_SIZE(page)) {
+                       spin_unlock_irqrestore(&cache->lock, flags);
+--- a/drivers/xen/unpopulated-alloc.c
++++ b/drivers/xen/unpopulated-alloc.c
+@@ -12,7 +12,7 @@
+ #include <xen/xen.h>
+ static DEFINE_MUTEX(list_lock);
+-static LIST_HEAD(page_list);
++static struct page *page_list;
+ static unsigned int list_count;
+ static int fill_list(unsigned int nr_pages)
+@@ -75,7 +75,8 @@ static int fill_list(unsigned int nr_pag
+               struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+               BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+-              list_add(&pg->lru, &page_list);
++              pg->zone_device_data = page_list;
++              page_list = pg;
+               list_count++;
+       }
+@@ -101,12 +102,10 @@ int xen_alloc_unpopulated_pages(unsigned
+       }
+       for (i = 0; i < nr_pages; i++) {
+-              struct page *pg = list_first_entry_or_null(&page_list,
+-                                                         struct page,
+-                                                         lru);
++              struct page *pg = page_list;
+               BUG_ON(!pg);
+-              list_del(&pg->lru);
++              page_list = pg->zone_device_data;
+               list_count--;
+               pages[i] = pg;
+@@ -117,7 +116,8 @@ int xen_alloc_unpopulated_pages(unsigned
+                               unsigned int j;
+                               for (j = 0; j <= i; j++) {
+-                                      list_add(&pages[j]->lru, &page_list);
++                                      pages[j]->zone_device_data = page_list;
++                                      page_list = pages[j];
+                                       list_count++;
+                               }
+                               goto out;
+@@ -143,7 +143,8 @@ void xen_free_unpopulated_pages(unsigned
+       mutex_lock(&list_lock);
+       for (i = 0; i < nr_pages; i++) {
+-              list_add(&pages[i]->lru, &page_list);
++              pages[i]->zone_device_data = page_list;
++              page_list = pages[i];
+               list_count++;
+       }
+       mutex_unlock(&list_lock);
+@@ -172,7 +173,8 @@ static int __init init(void)
+                       struct page *pg =
+                               pfn_to_page(xen_extra_mem[i].start_pfn + j);
+-                      list_add(&pg->lru, &page_list);
++                      pg->zone_device_data = page_list;
++                      page_list = pg;
+                       list_count++;
+               }
+       }
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -200,7 +200,11 @@ void gnttab_free_pages(int nr_pages, str
+ struct gnttab_page_cache {
+       spinlock_t              lock;
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++      struct page             *pages;
++#else
+       struct list_head        pages;
++#endif
+       unsigned int            num_pages;
+ };
diff --git a/queue-5.9/zonefs-fix-page-reference-and-bio-leak.patch b/queue-5.9/zonefs-fix-page-reference-and-bio-leak.patch
new file mode 100644 (file)
index 0000000..340ee69
--- /dev/null
@@ -0,0 +1,67 @@
+From 6bea0225a4bf14a58af71cb9677a756921469e46 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Wed, 9 Dec 2020 20:16:10 +0900
+Subject: zonefs: fix page reference and BIO leak
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 6bea0225a4bf14a58af71cb9677a756921469e46 upstream.
+
+In zonefs_file_dio_append(), the pages obtained using
+bio_iov_iter_get_pages() are not released on completion of the
+REQ_OP_APPEND BIO, nor when bio_iov_iter_get_pages() fails.
+Furthermore, a call to bio_put() is missing when
+bio_iov_iter_get_pages() fails.
+
+Fix these resource leaks by adding BIO resource release code (bio_put()i
+and bio_release_pages()) at the end of the function after the BIO
+execution and add a jump to this resource cleanup code in case of
+bio_iov_iter_get_pages() failure.
+
+While at it, also fix the call to task_io_account_write() to be passed
+the correct BIO size instead of bio_iov_iter_get_pages() return value.
+
+Reported-by: Christoph Hellwig <hch@lst.de>
+Fixes: 02ef12a663c7 ("zonefs: use REQ_OP_ZONE_APPEND for sync DIO")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/zonefs/super.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -628,21 +628,23 @@ static ssize_t zonefs_file_dio_append(st
+               bio->bi_opf |= REQ_FUA;
+       ret = bio_iov_iter_get_pages(bio, from);
+-      if (unlikely(ret)) {
+-              bio_io_error(bio);
+-              return ret;
+-      }
++      if (unlikely(ret))
++              goto out_release;
++
+       size = bio->bi_iter.bi_size;
+-      task_io_account_write(ret);
++      task_io_account_write(size);
+       if (iocb->ki_flags & IOCB_HIPRI)
+               bio_set_polled(bio, iocb);
+       ret = submit_bio_wait(bio);
++      zonefs_file_write_dio_end_io(iocb, size, ret, 0);
++
++out_release:
++      bio_release_pages(bio, false);
+       bio_put(bio);
+-      zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+       if (ret >= 0) {
+               iocb->ki_pos += size;
+               return size;