--- /dev/null
+From fccd0f7cf4d532674d727c7f204f038456675dee Mon Sep 17 00:00:00 2001
+From: Colin Xu <colin.xu@intel.com>
+Date: Mon, 1 Jun 2020 11:06:38 +0800
+Subject: drm/i915/gvt: Fix two CFL MMIO handling caused by regression.
+
+From: Colin Xu <colin.xu@intel.com>
+
+commit fccd0f7cf4d532674d727c7f204f038456675dee upstream.
+
+D_CFL was incorrectly removed for:
+GAMT_CHKN_BIT_REG
+GEN9_CTX_PREEMPT_REG
+
+V2: Update commit message.
+V3: Rebase and split Fixes and mis-handled MMIO.
+
+Fixes: 43226e6fe798 (drm/i915/gvt: replaced register address with name)
+Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Colin Xu <colin.xu@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20200601030638.16002-1-colin.xu@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gvt/handlers.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/handlers.c
++++ b/drivers/gpu/drm/i915/gvt/handlers.c
+@@ -3131,8 +3131,8 @@ static int init_skl_mmio_info(struct int
+ MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+
+- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
+- MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
++ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
++ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
+
+ return 0;
+ }
--- /dev/null
+From 9486727f5981a5ec5c0b699fb1777451bd6786e4 Mon Sep 17 00:00:00 2001
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Tue, 23 Jun 2020 07:13:40 +0800
+Subject: iommu/vt-d: Make Intel SVM code 64-bit only
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+commit 9486727f5981a5ec5c0b699fb1777451bd6786e4 upstream.
+
+Current Intel SVM is designed by setting the pgd_t of the processor page
+table to FLPTR field of the PASID entry. The first level translation only
+supports 4 and 5 level paging structures, hence it's infeasible for the
+IOMMU to share a processor's page table when it's running in 32-bit mode.
+Let's disable 32bit support for now and claim support only when all the
+missing pieces are ready in the future.
+
+Fixes: 1c4f88b7f1f92 ("iommu/vt-d: Shared virtual address in scalable mode")
+Suggested-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Link: https://lore.kernel.org/r/20200622231345.29722-2-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS
+
+ config INTEL_IOMMU_SVM
+ bool "Support for Shared Virtual Memory with Intel IOMMU"
+- depends on INTEL_IOMMU && X86
++ depends on INTEL_IOMMU && X86_64
+ select PCI_PASID
+ select PCI_PRI
+ select MMU_NOTIFIER
--- /dev/null
+From ef7232da6bcd4294cbb2d424bc35885721570f01 Mon Sep 17 00:00:00 2001
+From: Shannon Nelson <snelson@pensando.io>
+Date: Tue, 16 Jun 2020 08:06:26 -0700
+Subject: ionic: export features for vlans to use
+
+From: Shannon Nelson <snelson@pensando.io>
+
+commit ef7232da6bcd4294cbb2d424bc35885721570f01 upstream.
+
+Set up vlan_features for use by any vlans above us.
+
+Fixes: beead698b173 ("ionic: Add the basic NDO callbacks for netdev support")
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Acked-by: Jonathan Toppins <jtoppins@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -1236,6 +1236,7 @@ static int ionic_init_nic_features(struc
+
+ netdev->hw_features |= netdev->hw_enc_features;
+ netdev->features |= netdev->hw_features;
++ netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT |
+ IFF_LIVE_ADDR_CHANGE;
--- /dev/null
+From 3103b6feb4454646558eedc50ece728bc469f341 Mon Sep 17 00:00:00 2001
+From: Shannon Nelson <snelson@pensando.io>
+Date: Mon, 15 Jun 2020 18:14:59 -0700
+Subject: ionic: no link check while resetting queues
+
+From: Shannon Nelson <snelson@pensando.io>
+
+commit 3103b6feb4454646558eedc50ece728bc469f341 upstream.
+
+If the driver is busy resetting queues after a change in
+MTU or queue parameters, don't bother checking the link,
+wait until the next watchdog cycle.
+
+Fixes: 987c0871e8ae ("ionic: check for linkup in watchdog")
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Acked-by: Jonathan Toppins <jtoppins@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -85,7 +85,8 @@ static void ionic_link_status_check(stru
+ u16 link_status;
+ bool link_up;
+
+- if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
++ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
++ test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
+ return;
+
+ if (lif->ionic->is_mgmt_nic)
--- /dev/null
+From 2f3fead62144002557f322c2a7c15e1255df0653 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Tue, 9 Jun 2020 11:57:56 +0200
+Subject: libceph: don't omit recovery_deletes in target_copy()
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 2f3fead62144002557f322c2a7c15e1255df0653 upstream.
+
+Currently target_copy() is used only for sending linger pings, so
+this doesn't come up, but generally omitting recovery_deletes can
+result in unneeded resends (force_resend in calc_target()).
+
+Fixes: ae78dd8139ce ("libceph: make RECOVERY_DELETES feature create a new interval")
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ceph/osd_client.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -445,6 +445,7 @@ static void target_copy(struct ceph_osd_
+ dest->size = src->size;
+ dest->min_size = src->min_size;
+ dest->sort_bitwise = src->sort_bitwise;
++ dest->recovery_deletes = src->recovery_deletes;
+
+ dest->flags = src->flags;
+ dest->paused = src->paused;
--- /dev/null
+From 7f70c2a68a51496289df163f6969d4db7c383f30 Mon Sep 17 00:00:00 2001
+From: Arjun Roy <arjunroy@google.com>
+Date: Thu, 25 Jun 2020 20:30:01 -0700
+Subject: mm/memory.c: properly pte_offset_map_lock/unlock in vm_insert_pages()
+
+From: Arjun Roy <arjunroy@google.com>
+
+commit 7f70c2a68a51496289df163f6969d4db7c383f30 upstream.
+
+Calls to pte_offset_map() in vm_insert_pages() are erroneously not
+matched with a call to pte_unmap(). This would cause problems on
+architectures where that is not a no-op.
+
+This patch does away with the non-traditional locking in the existing
+code, and instead uses pte_offset_map_lock/unlock() as usual,
+incrementing PTE as necessary. The PTE pointer is kept within bounds
+since we clamp it with PTRS_PER_PTE.
+
+Link: http://lkml.kernel.org/r/20200618220446.20284-1-arjunroy.kdev@gmail.com
+Fixes: 8cd3984d81d5 ("mm/memory.c: add vm_insert_pages()")
+Signed-off-by: Arjun Roy <arjunroy@google.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1501,7 +1501,7 @@ out:
+ }
+
+ #ifdef pte_index
+-static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
++static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
+ unsigned long addr, struct page *page, pgprot_t prot)
+ {
+ int err;
+@@ -1509,8 +1509,9 @@ static int insert_page_in_batch_locked(s
+ if (!page_count(page))
+ return -EINVAL;
+ err = validate_page_before_insert(page);
+- return err ? err : insert_page_into_pte_locked(
+- mm, pte_offset_map(pmd, addr), addr, page, prot);
++ if (err)
++ return err;
++ return insert_page_into_pte_locked(mm, pte, addr, page, prot);
+ }
+
+ /* insert_pages() amortizes the cost of spinlock operations
+@@ -1520,7 +1521,8 @@ static int insert_pages(struct vm_area_s
+ struct page **pages, unsigned long *num, pgprot_t prot)
+ {
+ pmd_t *pmd = NULL;
+- spinlock_t *pte_lock = NULL;
++ pte_t *start_pte, *pte;
++ spinlock_t *pte_lock;
+ struct mm_struct *const mm = vma->vm_mm;
+ unsigned long curr_page_idx = 0;
+ unsigned long remaining_pages_total = *num;
+@@ -1539,18 +1541,17 @@ more:
+ ret = -ENOMEM;
+ if (pte_alloc(mm, pmd))
+ goto out;
+- pte_lock = pte_lockptr(mm, pmd);
+
+ while (pages_to_write_in_pmd) {
+ int pte_idx = 0;
+ const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
+
+- spin_lock(pte_lock);
+- for (; pte_idx < batch_size; ++pte_idx) {
+- int err = insert_page_in_batch_locked(mm, pmd,
++ start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
++ for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
++ int err = insert_page_in_batch_locked(mm, pte,
+ addr, pages[curr_page_idx], prot);
+ if (unlikely(err)) {
+- spin_unlock(pte_lock);
++ pte_unmap_unlock(start_pte, pte_lock);
+ ret = err;
+ remaining_pages_total -= pte_idx;
+ goto out;
+@@ -1558,7 +1559,7 @@ more:
+ addr += PAGE_SIZE;
+ ++curr_page_idx;
+ }
+- spin_unlock(pte_lock);
++ pte_unmap_unlock(start_pte, pte_lock);
+ pages_to_write_in_pmd -= batch_size;
+ remaining_pages_total -= batch_size;
+ }
--- /dev/null
+From 0e2c09011d4de4161f615ff860a605a9186cf62a Mon Sep 17 00:00:00 2001
+From: Atish Patra <atish.patra@wdc.com>
+Date: Wed, 17 Jun 2020 13:37:32 -0700
+Subject: RISC-V: Acquire mmap lock before invoking walk_page_range
+
+From: Atish Patra <atish.patra@wdc.com>
+
+commit 0e2c09011d4de4161f615ff860a605a9186cf62a upstream.
+
+As per walk_page_range documentation, mmap lock should be acquired by the
+caller before invoking walk_page_range. mmap_assert_locked gets triggered
+without that. The details can be found here.
+
+http://lists.infradead.org/pipermail/linux-riscv/2020-June/010335.html
+
+Fixes: 395a21ff859c(riscv: add ARCH_HAS_SET_DIRECT_MAP support)
+Signed-off-by: Atish Patra <atish.patra@wdc.com>
+Reviewed-by: Michel Lespinasse <walken@google.com>
+Reviewed-by: Zong Li <zong.li@sifive.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/riscv/mm/pageattr.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -151,6 +151,7 @@ int set_memory_nx(unsigned long addr, in
+
+ int set_direct_map_invalid_noflush(struct page *page)
+ {
++ int ret;
+ unsigned long start = (unsigned long)page_address(page);
+ unsigned long end = start + PAGE_SIZE;
+ struct pageattr_masks masks = {
+@@ -158,11 +159,16 @@ int set_direct_map_invalid_noflush(struc
+ .clear_mask = __pgprot(_PAGE_PRESENT)
+ };
+
+- return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
++ mmap_read_lock(&init_mm);
++ ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
++ mmap_read_unlock(&init_mm);
++
++ return ret;
+ }
+
+ int set_direct_map_default_noflush(struct page *page)
+ {
++ int ret;
+ unsigned long start = (unsigned long)page_address(page);
+ unsigned long end = start + PAGE_SIZE;
+ struct pageattr_masks masks = {
+@@ -170,7 +176,11 @@ int set_direct_map_default_noflush(struc
+ .clear_mask = __pgprot(0)
+ };
+
+- return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
++ mmap_read_lock(&init_mm);
++ ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
++ mmap_read_unlock(&init_mm);
++
++ return ret;
+ }
+
+ void __kernel_map_pages(struct page *page, int numpages, int enable)
--- /dev/null
+From aadf9dcef9d4cd68c73a4ab934f93319c4becc47 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Wed, 17 Jun 2020 22:50:33 +0100
+Subject: rxrpc: Fix trace string
+
+From: David Howells <dhowells@redhat.com>
+
+commit aadf9dcef9d4cd68c73a4ab934f93319c4becc47 upstream.
+
+The trace symbol printer (__print_symbolic()) ignores symbols that map to
+an empty string and prints the hex value instead.
+
+Fix the symbol for rxrpc_cong_no_change to " -" instead of "" to avoid
+this.
+
+Fixes: b54a134a7de4 ("rxrpc: Fix handling of enums-to-string translation in tracing")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/trace/events/rxrpc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -400,7 +400,7 @@ enum rxrpc_tx_point {
+ EM(rxrpc_cong_begin_retransmission, " Retrans") \
+ EM(rxrpc_cong_cleared_nacks, " Cleared") \
+ EM(rxrpc_cong_new_low_nack, " NewLowN") \
+- EM(rxrpc_cong_no_change, "") \
++ EM(rxrpc_cong_no_change, " -") \
+ EM(rxrpc_cong_progress, " Progres") \
+ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
+ EM(rxrpc_cong_rtt_window_end, " RttWinE") \
drm-i915-move-cec_notifier-to-intel_hdmi_connector_unregister-v2.patch
drm-i915-gt-ignore-irq-enabling-on-the-virtual-engines.patch
drm-i915-gt-only-swap-to-a-random-sibling-once-upon-creation.patch
+libceph-don-t-omit-recovery_deletes-in-target_copy.patch
+risc-v-acquire-mmap-lock-before-invoking-walk_page_range.patch
+rxrpc-fix-trace-string.patch
+spi-sprd-switch-the-sequence-of-setting-wdg_load_low-and-_high.patch
+ionic-no-link-check-while-resetting-queues.patch
+ionic-export-features-for-vlans-to-use.patch
+iommu-vt-d-make-intel-svm-code-64-bit-only.patch
+mm-memory.c-properly-pte_offset_map_lock-unlock-in-vm_insert_pages.patch
+drm-i915-gvt-fix-two-cfl-mmio-handling-caused-by-regression.patch
--- /dev/null
+From 8bdd79dae1ff5397351b95e249abcae126572617 Mon Sep 17 00:00:00 2001
+From: Lingling Xu <ling_ling.xu@unisoc.com>
+Date: Tue, 2 Jun 2020 16:24:15 +0800
+Subject: spi: sprd: switch the sequence of setting WDG_LOAD_LOW and _HIGH
+
+From: Lingling Xu <ling_ling.xu@unisoc.com>
+
+commit 8bdd79dae1ff5397351b95e249abcae126572617 upstream.
+
+The watchdog counter consists of WDG_LOAD_LOW and WDG_LOAD_HIGH,
+which would be loaded to watchdog counter once writing WDG_LOAD_LOW.
+
+Fixes: ac1775012058 ("spi: sprd: Add the support of restarting the system")
+Signed-off-by: Lingling Xu <ling_ling.xu@unisoc.com>
+Signed-off-by: Chunyan Zhang <chunyan.zhang@unisoc.com>
+Link: https://lore.kernel.org/r/20200602082415.5848-1-zhang.lyra@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-sprd-adi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-sprd-adi.c
++++ b/drivers/spi/spi-sprd-adi.c
+@@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(stru
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+
+ /* Load the watchdog timeout value, 50ms is always enough. */
++ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
+ WDG_LOAD_VAL & WDG_LOAD_MASK);
+- sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
+
+ /* Start the watchdog to reset system */
+ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);