--- /dev/null
+From 9bbd77d5bbc9aff8cb74d805c31751f5f0691ba8 Mon Sep 17 00:00:00 2001
+From: Benjamin Valentin <benpicco@googlemail.com>
+Date: Thu, 21 Jan 2021 19:24:17 -0800
+Subject: Input: xpad - sync supported devices with fork on GitHub
+
+From: Benjamin Valentin <benpicco@googlemail.com>
+
+commit 9bbd77d5bbc9aff8cb74d805c31751f5f0691ba8 upstream.
+
+There is a fork of this driver on GitHub [0] that has been updated
+with new device IDs.
+
+Merge those into the mainline driver, so the out-of-tree fork is not
+needed for users of those devices anymore.
+
+[0] https://github.com/paroj/xpad
+
+Signed-off-by: Benjamin Valentin <benpicco@googlemail.com>
+Link: https://lore.kernel.org/r/20210121142523.1b6b050f@rechenknecht2k11
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/input/joystick/xpad.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -215,9 +215,17 @@ static const struct xpad_device {
+ { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
+@@ -296,6 +304,9 @@ static const struct xpad_device {
+ { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_t
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ { }
+ };
+
--- /dev/null
+From 29b32839725f8c89a41cb6ee054c85f3116ea8b5 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Wed, 27 Jan 2021 09:53:17 -0800
+Subject: iommu/vt-d: Do not use flush-queue when caching-mode is on
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 29b32839725f8c89a41cb6ee054c85f3116ea8b5 upstream.
+
+When an Intel IOMMU is virtualized, and a physical device is
+passed-through to the VM, changes of the virtual IOMMU need to be
+propagated to the physical IOMMU. The hypervisor therefore needs to
+monitor PTE mappings in the IOMMU page-tables. Intel specifications
+provide "caching-mode" capability that a virtual IOMMU uses to report
+that the IOMMU is virtualized and a TLB flush is needed after mapping to
+allow the hypervisor to propagate virtual IOMMU mappings to the physical
+IOMMU. To the best of my knowledge no real physical IOMMU reports
+"caching-mode" as turned on.
+
+Synchronizing the virtual and the physical IOMMU tables is expensive if
+the hypervisor is unaware which PTEs have changed, as the hypervisor is
+required to walk all the virtualized tables and look for changes.
+Consequently, domain flushes are much more expensive than page-specific
+flushes on virtualized IOMMUs with passthrough devices. The kernel
+therefore exploited the "caching-mode" indication to avoid domain
+flushing and use page-specific flushing in virtualized environments. See
+commit 78d5f0f500e6 ("intel-iommu: Avoid global flushes with caching
+mode.")
+
+This behavior changed after commit 13cf01744608 ("iommu/vt-d: Make use
+of iova deferred flushing"). Now, when batched TLB flushing is used (the
+default), full TLB domain flushes are performed frequently, requiring
+the hypervisor to perform expensive synchronization between the virtual
+TLB and the physical one.
+
+Getting batched TLB flushes to use page-specific invalidations again in
+such circumstances is not easy, since the TLB invalidation scheme
+assumes that "full" domain TLB flushes are performed for scalability.
+
+Disable batched TLB flushes when caching-mode is on, as the performance
+benefit from using batched TLB invalidations is likely to be much
+smaller than the overhead of the virtual-to-physical IOMMU page-tables
+synchronization.
+
+Fixes: 13cf01744608 ("iommu/vt-d: Make use of iova deferred flushing")
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Lu Baolu <baolu.lu@linux.intel.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: stable@vger.kernel.org
+Acked-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20210127175317.1600473-1-namit@vmware.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3285,6 +3285,12 @@ static int __init init_dmars(void)
+
+ if (!ecap_pass_through(iommu->ecap))
+ hw_pass_through = 0;
++
++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
++ pr_info("Disable batched IOTLB flush due to virtualization");
++ intel_iommu_strict = 1;
++ }
++
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_supported(iommu))
+ intel_svm_init(iommu);
--- /dev/null
+From 64f55156f7adedb1ac5bb9cdbcbc9ac05ff5a724 Mon Sep 17 00:00:00 2001
+From: Luca Coelho <luciano.coelho@intel.com>
+Date: Thu, 8 Oct 2020 18:09:43 +0300
+Subject: iwlwifi: mvm: don't send RFH_QUEUE_CONFIG_CMD with no queues
+
+From: Luca Coelho <luciano.coelho@intel.com>
+
+commit 64f55156f7adedb1ac5bb9cdbcbc9ac05ff5a724 upstream.
+
+If we have only a single RX queue, such as when MSI-X is not
+available, we should not send the RFH_QUEUEU_CONFIG_CMD, because our
+only queue is the same as the command queue and will be configured as
+part of the context info. Our code was actually trying to send the
+command with 0 queues, which caused UMAC assert 0x1D04.
+
+Fix that by not sending the command when we have a single queue.
+
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/iwlwifi.20201008180656.c35eeb3299f8.I08f79a6ebe150a7d180b7005b24504bfdba6d8b5@changeid
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -134,7 +134,14 @@ static int iwl_configure_rxq(struct iwl_
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+- /* Do not configure default queue, it is configured via context info */
++ /*
++ * The default queue is configured via context info, so if we
++ * have a single queue, there's nothing to do here.
++ */
++ if (mvm->trans->num_rx_queues == 1)
++ return 0;
++
++ /* skip the default queue */
+ num_queues = mvm->trans->num_rx_queues - 1;
+
+ size = struct_size(cmd, data, num_queues);
--- /dev/null
+From dc5d17a3c39b06aef866afca19245a9cfb533a79 Mon Sep 17 00:00:00 2001
+From: Xiao Ni <xni@redhat.com>
+Date: Thu, 10 Dec 2020 14:33:32 +0800
+Subject: md: Set prev_flush_start and flush_bio in an atomic way
+
+From: Xiao Ni <xni@redhat.com>
+
+commit dc5d17a3c39b06aef866afca19245a9cfb533a79 upstream.
+
+One customer reports a crash problem which causes by flush request. It
+triggers a warning before crash.
+
+ /* new request after previous flush is completed */
+ if (ktime_after(req_start, mddev->prev_flush_start)) {
+ WARN_ON(mddev->flush_bio);
+ mddev->flush_bio = bio;
+ bio = NULL;
+ }
+
+The WARN_ON is triggered. We use spin lock to protect prev_flush_start and
+flush_bio in md_flush_request. But there is no lock protection in
+md_submit_flush_data. It can set flush_bio to NULL first because of
+compiler reordering write instructions.
+
+For example, flush bio1 sets flush bio to NULL first in
+md_submit_flush_data. An interrupt or vmware causing an extended stall
+happen between updating flush_bio and prev_flush_start. Because flush_bio
+is NULL, flush bio2 can get the lock and submit to underlayer disks. Then
+flush bio1 updates prev_flush_start after the interrupt or extended stall.
+
+Then flush bio3 enters in md_flush_request. The start time req_start is
+behind prev_flush_start. The flush_bio is not NULL(flush bio2 hasn't
+finished). So it can trigger the WARN_ON now. Then it calls INIT_WORK
+again. INIT_WORK() will re-initialize the list pointers in the
+work_struct, which then can result in a corrupted work list and the
+work_struct queued a second time. With the work list corrupted, it can
+lead in invalid work items being used and cause a crash in
+process_one_work.
+
+We need to make sure only one flush bio can be handled at one same time.
+So add spin lock in md_submit_flush_data to protect prev_flush_start and
+flush_bio in an atomic way.
+
+Reviewed-by: David Jeffery <djeffery@redhat.com>
+Signed-off-by: Xiao Ni <xni@redhat.com>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -538,8 +538,10 @@ static void md_submit_flush_data(struct
+ * could wait for this and below md_handle_request could wait for those
+ * bios because of suspend check
+ */
++ spin_lock_irq(&mddev->lock);
+ mddev->last_flush = mddev->start_flush;
+ mddev->flush_bio = NULL;
++ spin_unlock_irq(&mddev->lock);
+ wake_up(&mddev->sb_wait);
+
+ if (bio->bi_iter.bi_size == 0) {
mm-thp-fix-madv_remove-deadlock-on-shmem-thp.patch
x86-build-disable-cet-instrumentation-in-the-kernel.patch
x86-apic-add-extra-serialization-for-non-serializing-msrs.patch
+iwlwifi-mvm-don-t-send-rfh_queue_config_cmd-with-no-queues.patch
+input-xpad-sync-supported-devices-with-fork-on-github.patch
+iommu-vt-d-do-not-use-flush-queue-when-caching-mode-is-on.patch
+md-set-prev_flush_start-and-flush_bio-in-an-atomic-way.patch