]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 12:53:22 +0000 (14:53 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 12:53:22 +0000 (14:53 +0200)
added patches:
arm-8992-1-fix-unwind_frame-for-clang-built-kernels.patch
erofs-fix-extended-inode-could-cross-boundary.patch
include-asm-generic-vmlinux.lds.h-align-ro_after_init.patch
irqdomain-treewide-free-firmware-node-after-domain-removal.patch
mtd-rawnand-qcom-avoid-write-to-unavailable-register.patch
parisc-do-not-use-an-ordered-store-in-pa_tlb_lock.patch
parisc-implement-__smp_store_release-and-__smp_load_acquire-barriers.patch
parisc-mask-out-enable-and-reserved-bits-from-sba-imask.patch
revert-parisc-drop-ldcw-barrier-in-cas-code-when-running-up.patch
revert-parisc-revert-release-spinlocks-using-ordered-store.patch
revert-parisc-use-ldcw-instruction-for-smp-spinlock-release-barrier.patch
spi-spidev-align-buffers-for-dma.patch

13 files changed:
queue-5.4/arm-8992-1-fix-unwind_frame-for-clang-built-kernels.patch [new file with mode: 0644]
queue-5.4/erofs-fix-extended-inode-could-cross-boundary.patch [new file with mode: 0644]
queue-5.4/include-asm-generic-vmlinux.lds.h-align-ro_after_init.patch [new file with mode: 0644]
queue-5.4/irqdomain-treewide-free-firmware-node-after-domain-removal.patch [new file with mode: 0644]
queue-5.4/mtd-rawnand-qcom-avoid-write-to-unavailable-register.patch [new file with mode: 0644]
queue-5.4/parisc-do-not-use-an-ordered-store-in-pa_tlb_lock.patch [new file with mode: 0644]
queue-5.4/parisc-implement-__smp_store_release-and-__smp_load_acquire-barriers.patch [new file with mode: 0644]
queue-5.4/parisc-mask-out-enable-and-reserved-bits-from-sba-imask.patch [new file with mode: 0644]
queue-5.4/revert-parisc-drop-ldcw-barrier-in-cas-code-when-running-up.patch [new file with mode: 0644]
queue-5.4/revert-parisc-revert-release-spinlocks-using-ordered-store.patch [new file with mode: 0644]
queue-5.4/revert-parisc-use-ldcw-instruction-for-smp-spinlock-release-barrier.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/spi-spidev-align-buffers-for-dma.patch [new file with mode: 0644]

diff --git a/queue-5.4/arm-8992-1-fix-unwind_frame-for-clang-built-kernels.patch b/queue-5.4/arm-8992-1-fix-unwind_frame-for-clang-built-kernels.patch
new file mode 100644 (file)
index 0000000..05f2b30
--- /dev/null
@@ -0,0 +1,80 @@
+From b4d5ec9b39f8b31d98f65bc5577b5d15d93795d7 Mon Sep 17 00:00:00 2001
+From: Nathan Huckleberry <nhuck@google.com>
+Date: Fri, 10 Jul 2020 20:23:37 +0100
+Subject: ARM: 8992/1: Fix unwind_frame for clang-built kernels
+
+From: Nathan Huckleberry <nhuck@google.com>
+
+commit b4d5ec9b39f8b31d98f65bc5577b5d15d93795d7 upstream.
+
+Since clang does not push pc and sp in function prologues, the current
+implementation of unwind_frame does not work. By using the previous
+frame's lr/fp instead of saved pc/sp we get valid unwinds on clang-built
+kernels.
+
+The bounds check on next frame pointer must be changed as well since
+there are 8 less bytes between frames.
+
+This fixes /proc/<pid>/stack.
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/912
+
+Reported-by: Miles Chen <miles.chen@mediatek.com>
+Tested-by: Miles Chen <miles.chen@mediatek.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Nathan Huckleberry <nhuck@google.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/stacktrace.c |   24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -22,6 +22,19 @@
+  * A simple function epilogue looks like this:
+  *    ldm     sp, {fp, sp, pc}
+  *
++ * When compiled with clang, pc and sp are not pushed. A simple function
++ * prologue looks like this when built with clang:
++ *
++ *    stmdb   {..., fp, lr}
++ *    add     fp, sp, #x
++ *    sub     sp, sp, #y
++ *
++ * A simple function epilogue looks like this when built with clang:
++ *
++ *    sub     sp, fp, #x
++ *    ldm     {..., fp, pc}
++ *
++ *
+  * Note that with framepointer enabled, even the leaf functions have the same
+  * prologue and epilogue, therefore we can ignore the LR value in this case.
+  */
+@@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackfra
+       low = frame->sp;
+       high = ALIGN(low, THREAD_SIZE);
++#ifdef CONFIG_CC_IS_CLANG
++      /* check current frame pointer is within bounds */
++      if (fp < low + 4 || fp > high - 4)
++              return -EINVAL;
++
++      frame->sp = frame->fp;
++      frame->fp = *(unsigned long *)(fp);
++      frame->pc = frame->lr;
++      frame->lr = *(unsigned long *)(fp + 4);
++#else
+       /* check current frame pointer is within bounds */
+       if (fp < low + 12 || fp > high - 4)
+               return -EINVAL;
+@@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackfra
+       frame->fp = *(unsigned long *)(fp - 12);
+       frame->sp = *(unsigned long *)(fp - 8);
+       frame->pc = *(unsigned long *)(fp - 4);
++#endif
+       return 0;
+ }
diff --git a/queue-5.4/erofs-fix-extended-inode-could-cross-boundary.patch b/queue-5.4/erofs-fix-extended-inode-could-cross-boundary.patch
new file mode 100644 (file)
index 0000000..2fe1ece
--- /dev/null
@@ -0,0 +1,234 @@
+From 0dcd3c94e02438f4a571690e26f4ee997524102a Mon Sep 17 00:00:00 2001
+From: Gao Xiang <hsiangkao@redhat.com>
+Date: Thu, 30 Jul 2020 01:58:01 +0800
+Subject: erofs: fix extended inode could cross boundary
+
+From: Gao Xiang <hsiangkao@redhat.com>
+
+commit 0dcd3c94e02438f4a571690e26f4ee997524102a upstream.
+
+Each ondisk inode should be aligned with inode slot boundary
+(32-byte alignment) because of nid calculation formula, so all
+compact inodes (32 byte) cannot across page boundary. However,
+extended inode is now 64-byte form, which can across page boundary
+in principle if the location is specified on purpose, although
+it's hard to be generated by mkfs due to the allocation policy
+and rarely used by Android use case now mainly for > 4GiB files.
+
+For now, only two fields `i_ctime_nsec` and `i_nlink' couldn't
+be read from disk properly and cause out-of-bound memory read
+with random value.
+
+Let's fix now.
+
+Fixes: 431339ba9042 ("staging: erofs: add inode operations")
+Cc: <stable@vger.kernel.org> # 4.19+
+Link: https://lore.kernel.org/r/20200729175801.GA23973@xiangao.remote.csb
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/erofs/inode.c |  121 +++++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 79 insertions(+), 42 deletions(-)
+
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -8,31 +8,80 @@
+ #include <trace/events/erofs.h>
+-/* no locking */
+-static int erofs_read_inode(struct inode *inode, void *data)
++/*
++ * if inode is successfully read, return its inode page (or sometimes
++ * the inode payload page if it's an extended inode) in order to fill
++ * inline data if possible.
++ */
++static struct page *erofs_read_inode(struct inode *inode,
++                                   unsigned int *ofs)
+ {
++      struct super_block *sb = inode->i_sb;
++      struct erofs_sb_info *sbi = EROFS_SB(sb);
+       struct erofs_inode *vi = EROFS_I(inode);
+-      struct erofs_inode_compact *dic = data;
+-      struct erofs_inode_extended *die;
++      const erofs_off_t inode_loc = iloc(sbi, vi->nid);
+-      const unsigned int ifmt = le16_to_cpu(dic->i_format);
+-      struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+-      erofs_blk_t nblks = 0;
++      erofs_blk_t blkaddr, nblks = 0;
++      struct page *page;
++      struct erofs_inode_compact *dic;
++      struct erofs_inode_extended *die, *copied = NULL;
++      unsigned int ifmt;
++      int err;
+-      vi->datalayout = erofs_inode_datalayout(ifmt);
++      blkaddr = erofs_blknr(inode_loc);
++      *ofs = erofs_blkoff(inode_loc);
++      erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
++                __func__, vi->nid, *ofs, blkaddr);
++
++      page = erofs_get_meta_page(sb, blkaddr);
++      if (IS_ERR(page)) {
++              erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
++                        vi->nid, PTR_ERR(page));
++              return page;
++      }
++
++      dic = page_address(page) + *ofs;
++      ifmt = le16_to_cpu(dic->i_format);
++
++      vi->datalayout = erofs_inode_datalayout(ifmt);
+       if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
+               erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+                         vi->datalayout, vi->nid);
+-              DBG_BUGON(1);
+-              return -EOPNOTSUPP;
++              err = -EOPNOTSUPP;
++              goto err_out;
+       }
+       switch (erofs_inode_version(ifmt)) {
+       case EROFS_INODE_LAYOUT_EXTENDED:
+-              die = data;
+-
+               vi->inode_isize = sizeof(struct erofs_inode_extended);
++              /* check if the inode acrosses page boundary */
++              if (*ofs + vi->inode_isize <= PAGE_SIZE) {
++                      *ofs += vi->inode_isize;
++                      die = (struct erofs_inode_extended *)dic;
++              } else {
++                      const unsigned int gotten = PAGE_SIZE - *ofs;
++
++                      copied = kmalloc(vi->inode_isize, GFP_NOFS);
++                      if (!copied) {
++                              err = -ENOMEM;
++                              goto err_out;
++                      }
++                      memcpy(copied, dic, gotten);
++                      unlock_page(page);
++                      put_page(page);
++
++                      page = erofs_get_meta_page(sb, blkaddr + 1);
++                      if (IS_ERR(page)) {
++                              erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld",
++                                        vi->nid, PTR_ERR(page));
++                              kfree(copied);
++                              return page;
++                      }
++                      *ofs = vi->inode_isize - gotten;
++                      memcpy((u8 *)copied + gotten, page_address(page), *ofs);
++                      die = copied;
++              }
+               vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
+               inode->i_mode = le16_to_cpu(die->i_mode);
+@@ -69,9 +118,12 @@ static int erofs_read_inode(struct inode
+               /* total blocks for compressed files */
+               if (erofs_inode_is_data_compressed(vi->datalayout))
+                       nblks = le32_to_cpu(die->i_u.compressed_blocks);
++
++              kfree(copied);
+               break;
+       case EROFS_INODE_LAYOUT_COMPACT:
+               vi->inode_isize = sizeof(struct erofs_inode_compact);
++              *ofs += vi->inode_isize;
+               vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
+               inode->i_mode = le16_to_cpu(dic->i_mode);
+@@ -111,8 +163,8 @@ static int erofs_read_inode(struct inode
+               erofs_err(inode->i_sb,
+                         "unsupported on-disk inode version %u of nid %llu",
+                         erofs_inode_version(ifmt), vi->nid);
+-              DBG_BUGON(1);
+-              return -EOPNOTSUPP;
++              err = -EOPNOTSUPP;
++              goto err_out;
+       }
+       if (!nblks)
+@@ -120,13 +172,18 @@ static int erofs_read_inode(struct inode
+               inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
+       else
+               inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
+-      return 0;
++      return page;
+ bogusimode:
+       erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
+                 inode->i_mode, vi->nid);
++      err = -EFSCORRUPTED;
++err_out:
+       DBG_BUGON(1);
+-      return -EFSCORRUPTED;
++      kfree(copied);
++      unlock_page(page);
++      put_page(page);
++      return ERR_PTR(err);
+ }
+ static int erofs_fill_symlink(struct inode *inode, void *data,
+@@ -146,7 +203,7 @@ static int erofs_fill_symlink(struct ino
+       if (!lnk)
+               return -ENOMEM;
+-      m_pofs += vi->inode_isize + vi->xattr_isize;
++      m_pofs += vi->xattr_isize;
+       /* inline symlink data shouldn't cross page boundary as well */
+       if (m_pofs + inode->i_size > PAGE_SIZE) {
+               kfree(lnk);
+@@ -167,37 +224,17 @@ static int erofs_fill_symlink(struct ino
+ static int erofs_fill_inode(struct inode *inode, int isdir)
+ {
+-      struct super_block *sb = inode->i_sb;
+       struct erofs_inode *vi = EROFS_I(inode);
+       struct page *page;
+-      void *data;
+-      int err;
+-      erofs_blk_t blkaddr;
+       unsigned int ofs;
+-      erofs_off_t inode_loc;
++      int err = 0;
+       trace_erofs_fill_inode(inode, isdir);
+-      inode_loc = iloc(EROFS_SB(sb), vi->nid);
+-      blkaddr = erofs_blknr(inode_loc);
+-      ofs = erofs_blkoff(inode_loc);
+-
+-      erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
+-                __func__, vi->nid, ofs, blkaddr);
+-      page = erofs_get_meta_page(sb, blkaddr);
+-
+-      if (IS_ERR(page)) {
+-              erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
+-                        vi->nid, PTR_ERR(page));
++      /* read inode base data from disk */
++      page = erofs_read_inode(inode, &ofs);
++      if (IS_ERR(page))
+               return PTR_ERR(page);
+-      }
+-
+-      DBG_BUGON(!PageUptodate(page));
+-      data = page_address(page);
+-
+-      err = erofs_read_inode(inode, data + ofs);
+-      if (err)
+-              goto out_unlock;
+       /* setup the new inode */
+       switch (inode->i_mode & S_IFMT) {
+@@ -210,7 +247,7 @@ static int erofs_fill_inode(struct inode
+               inode->i_fop = &erofs_dir_fops;
+               break;
+       case S_IFLNK:
+-              err = erofs_fill_symlink(inode, data, ofs);
++              err = erofs_fill_symlink(inode, page_address(page), ofs);
+               if (err)
+                       goto out_unlock;
+               inode_nohighmem(inode);
diff --git a/queue-5.4/include-asm-generic-vmlinux.lds.h-align-ro_after_init.patch b/queue-5.4/include-asm-generic-vmlinux.lds.h-align-ro_after_init.patch
new file mode 100644 (file)
index 0000000..c336bd4
--- /dev/null
@@ -0,0 +1,48 @@
+From 7f897acbe5d57995438c831670b7c400e9c0dc00 Mon Sep 17 00:00:00 2001
+From: Romain Naour <romain.naour@gmail.com>
+Date: Fri, 14 Aug 2020 17:31:57 -0700
+Subject: include/asm-generic/vmlinux.lds.h: align ro_after_init
+
+From: Romain Naour <romain.naour@gmail.com>
+
+commit 7f897acbe5d57995438c831670b7c400e9c0dc00 upstream.
+
+Since the patch [1], building the kernel using a toolchain built with
+binutils 2.33.1 prevents booting a sh4 system under Qemu.  Apply the patch
+provided by Alan Modra [2] that fix alignment of rodata.
+
+[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=ebd2263ba9a9124d93bbc0ece63d7e0fae89b40e
+[2] https://www.sourceware.org/ml/binutils/2019-12/msg00112.html
+
+Signed-off-by: Romain Naour <romain.naour@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Alan Modra <amodra@gmail.com>
+Cc: Bin Meng <bin.meng@windriver.com>
+Cc: Chen Zhou <chenzhou10@huawei.com>
+Cc: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Krzysztof Kozlowski <krzk@kernel.org>
+Cc: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Sam Ravnborg <sam@ravnborg.org>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: <stable@vger.kernel.org>
+Link: https://marc.info/?l=linux-sh&m=158429470221261
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/vmlinux.lds.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -340,6 +340,7 @@
+  */
+ #ifndef RO_AFTER_INIT_DATA
+ #define RO_AFTER_INIT_DATA                                            \
++      . = ALIGN(8);                                                   \
+       __start_ro_after_init = .;                                      \
+       *(.data..ro_after_init)                                         \
+       JUMP_TABLE_DATA                                                 \
diff --git a/queue-5.4/irqdomain-treewide-free-firmware-node-after-domain-removal.patch b/queue-5.4/irqdomain-treewide-free-firmware-node-after-domain-removal.patch
new file mode 100644 (file)
index 0000000..afc970d
--- /dev/null
@@ -0,0 +1,125 @@
+From ec0160891e387f4771f953b888b1fe951398e5d9 Mon Sep 17 00:00:00 2001
+From: Jon Derrick <jonathan.derrick@intel.com>
+Date: Tue, 21 Jul 2020 14:26:09 -0600
+Subject: irqdomain/treewide: Free firmware node after domain removal
+
+From: Jon Derrick <jonathan.derrick@intel.com>
+
+commit ec0160891e387f4771f953b888b1fe951398e5d9 upstream.
+
+Commit 711419e504eb ("irqdomain: Add the missing assignment of
+domain->fwnode for named fwnode") unintentionally caused a dangling pointer
+page fault issue on firmware nodes that were freed after IRQ domain
+allocation. Commit e3beca48a45b fixed that dangling pointer issue by only
+freeing the firmware node after an IRQ domain allocation failure. That fix
+no longer frees the firmware node immediately, but leaves the firmware node
+allocated after the domain is removed.
+
+The firmware node must be kept around through irq_domain_remove, but should be
+freed it afterwards.
+
+Add the missing free operations after domain removal where where appropriate.
+
+Fixes: e3beca48a45b ("irqdomain/treewide: Keep firmware node unconditionally allocated")
+Signed-off-by: Jon Derrick <jonathan.derrick@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>  # drivers/pci
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/1595363169-7157-1-git-send-email-jonathan.derrick@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/pci/pci-xtalk-bridge.c    |    3 +++
+ arch/x86/kernel/apic/io_apic.c      |    5 +++++
+ drivers/iommu/intel_irq_remapping.c |    8 ++++++++
+ drivers/pci/controller/vmd.c        |    3 +++
+ 4 files changed, 19 insertions(+)
+
+--- a/arch/mips/pci/pci-xtalk-bridge.c
++++ b/arch/mips/pci/pci-xtalk-bridge.c
+@@ -539,6 +539,7 @@ err_free_resource:
+       pci_free_resource_list(&host->windows);
+ err_remove_domain:
+       irq_domain_remove(domain);
++      irq_domain_free_fwnode(fn);
+       return err;
+ }
+@@ -546,8 +547,10 @@ static int bridge_remove(struct platform
+ {
+       struct pci_bus *bus = platform_get_drvdata(pdev);
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
++      struct fwnode_handle *fn = bc->domain->fwnode;
+       irq_domain_remove(bc->domain);
++      irq_domain_free_fwnode(fn);
+       pci_lock_rescan_remove();
+       pci_stop_root_bus(bus);
+       pci_remove_root_bus(bus);
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2348,8 +2348,13 @@ static int mp_irqdomain_create(int ioapi
+ static void ioapic_destroy_irqdomain(int idx)
+ {
++      struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
++      struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
++
+       if (ioapics[idx].irqdomain) {
+               irq_domain_remove(ioapics[idx].irqdomain);
++              if (!cfg->dev)
++                      irq_domain_free_fwnode(fn);
+               ioapics[idx].irqdomain = NULL;
+       }
+ }
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -628,13 +628,21 @@ out_free_table:
+ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
+ {
++      struct fwnode_handle *fn;
++
+       if (iommu && iommu->ir_table) {
+               if (iommu->ir_msi_domain) {
++                      fn = iommu->ir_msi_domain->fwnode;
++
+                       irq_domain_remove(iommu->ir_msi_domain);
++                      irq_domain_free_fwnode(fn);
+                       iommu->ir_msi_domain = NULL;
+               }
+               if (iommu->ir_domain) {
++                      fn = iommu->ir_domain->fwnode;
++
+                       irq_domain_remove(iommu->ir_domain);
++                      irq_domain_free_fwnode(fn);
+                       iommu->ir_domain = NULL;
+               }
+               free_pages((unsigned long)iommu->ir_table->base,
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -694,6 +694,7 @@ static int vmd_enable_domain(struct vmd_
+       if (!vmd->bus) {
+               pci_free_resource_list(&resources);
+               irq_domain_remove(vmd->irq_domain);
++              irq_domain_free_fwnode(fn);
+               return -ENODEV;
+       }
+@@ -808,6 +809,7 @@ static void vmd_cleanup_srcu(struct vmd_
+ static void vmd_remove(struct pci_dev *dev)
+ {
+       struct vmd_dev *vmd = pci_get_drvdata(dev);
++      struct fwnode_handle *fn = vmd->irq_domain->fwnode;
+       sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
+       pci_stop_root_bus(vmd->bus);
+@@ -816,6 +818,7 @@ static void vmd_remove(struct pci_dev *d
+       vmd_teardown_dma_ops(vmd);
+       vmd_detach_resources(vmd);
+       irq_domain_remove(vmd->irq_domain);
++      irq_domain_free_fwnode(fn);
+ }
+ #ifdef CONFIG_PM_SLEEP
diff --git a/queue-5.4/mtd-rawnand-qcom-avoid-write-to-unavailable-register.patch b/queue-5.4/mtd-rawnand-qcom-avoid-write-to-unavailable-register.patch
new file mode 100644 (file)
index 0000000..62d1a30
--- /dev/null
@@ -0,0 +1,68 @@
+From 443440cc4a901af462239d286cd10721aa1c7dfc Mon Sep 17 00:00:00 2001
+From: Sivaprakash Murugesan <sivaprak@codeaurora.org>
+Date: Fri, 12 Jun 2020 13:28:15 +0530
+Subject: mtd: rawnand: qcom: avoid write to unavailable register
+
+From: Sivaprakash Murugesan <sivaprak@codeaurora.org>
+
+commit 443440cc4a901af462239d286cd10721aa1c7dfc upstream.
+
+SFLASHC_BURST_CFG is only available on older ipq NAND platforms, this
+register has been removed when the NAND controller got implemented in
+the qpic controller.
+
+Avoid writing this register on devices which are based on qpic NAND
+controller.
+
+Fixes: dce84760b09f ("mtd: nand: qcom: Support for IPQ8074 QPIC NAND controller")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sivaprakash Murugesan <sivaprak@codeaurora.org>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/1591948696-16015-2-git-send-email-sivaprak@codeaurora.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/raw/qcom_nandc.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -459,11 +459,13 @@ struct qcom_nand_host {
+  * among different NAND controllers.
+  * @ecc_modes - ecc mode for NAND
+  * @is_bam - whether NAND controller is using BAM
++ * @is_qpic - whether NAND CTRL is part of qpic IP
+  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+  */
+ struct qcom_nandc_props {
+       u32 ecc_modes;
+       bool is_bam;
++      bool is_qpic;
+       u32 dev_cmd_reg_start;
+ };
+@@ -2751,7 +2753,8 @@ static int qcom_nandc_setup(struct qcom_
+       u32 nand_ctrl;
+       /* kill onenand */
+-      nandc_write(nandc, SFLASHC_BURST_CFG, 0);
++      if (!nandc->props->is_qpic)
++              nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+       nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+                   NAND_DEV_CMD_VLD_VAL);
+@@ -3007,12 +3010,14 @@ static const struct qcom_nandc_props ipq
+ static const struct qcom_nandc_props ipq4019_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+       .is_bam = true,
++      .is_qpic = true,
+       .dev_cmd_reg_start = 0x0,
+ };
+ static const struct qcom_nandc_props ipq8074_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+       .is_bam = true,
++      .is_qpic = true,
+       .dev_cmd_reg_start = 0x7000,
+ };
diff --git a/queue-5.4/parisc-do-not-use-an-ordered-store-in-pa_tlb_lock.patch b/queue-5.4/parisc-do-not-use-an-ordered-store-in-pa_tlb_lock.patch
new file mode 100644 (file)
index 0000000..36c36da
--- /dev/null
@@ -0,0 +1,47 @@
+From e72b23dec1da5e62a0090c5da1d926778284e230 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Tue, 28 Jul 2020 19:13:20 +0200
+Subject: parisc: Do not use an ordered store in pa_tlb_lock()
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit e72b23dec1da5e62a0090c5da1d926778284e230 upstream.
+
+No need to use an ordered store in pa_tlb_lock() and update the comment
+regarng usage of the sid register to unlocak a spinlock in
+tlb_unlock0().
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/entry.S |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -455,7 +455,7 @@
+       LDREG           0(\ptp),\pte
+       bb,<,n          \pte,_PAGE_PRESENT_BIT,3f
+       b               \fault
+-      stw,ma          \spc,0(\tmp)
++      stw             \spc,0(\tmp)
+ 99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+ 2:    LDREG           0(\ptp),\pte
+@@ -463,7 +463,12 @@
+ 3:
+       .endm
+-      /* Release pa_tlb_lock lock without reloading lock address. */
++      /* Release pa_tlb_lock lock without reloading lock address.
++         Note that the values in the register spc are limited to
++         NR_SPACE_IDS (262144). Thus, the stw instruction always
++         stores a nonzero value even when register spc is 64 bits.
++         We use an ordered store to ensure all prior accesses are
++         performed prior to releasing the lock. */
+       .macro          tlb_unlock0     spc,tmp
+ #ifdef CONFIG_SMP
+ 98:   or,COND(=)      %r0,\spc,%r0
diff --git a/queue-5.4/parisc-implement-__smp_store_release-and-__smp_load_acquire-barriers.patch b/queue-5.4/parisc-implement-__smp_store_release-and-__smp_load_acquire-barriers.patch
new file mode 100644 (file)
index 0000000..8cbcfec
--- /dev/null
@@ -0,0 +1,92 @@
+From e96ebd589debd9a6a793608c4ec7019c38785dea Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Thu, 30 Jul 2020 08:59:12 -0400
+Subject: parisc: Implement __smp_store_release and __smp_load_acquire barriers
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit e96ebd589debd9a6a793608c4ec7019c38785dea upstream.
+
+This patch implements the __smp_store_release and __smp_load_acquire barriers
+using ordered stores and loads.  This avoids the sync instruction present in
+the generic implementation.
+
+Cc: <stable@vger.kernel.org> # 4.14+
+Signed-off-by: Dave Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/barrier.h |   61 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 61 insertions(+)
+
+--- a/arch/parisc/include/asm/barrier.h
++++ b/arch/parisc/include/asm/barrier.h
+@@ -26,6 +26,67 @@
+ #define __smp_rmb()   mb()
+ #define __smp_wmb()   mb()
++#define __smp_store_release(p, v)                                     \
++do {                                                                  \
++      typeof(p) __p = (p);                                            \
++        union { typeof(*p) __val; char __c[1]; } __u =                        \
++                { .__val = (__force typeof(*p)) (v) };                        \
++      compiletime_assert_atomic_type(*p);                             \
++      switch (sizeof(*p)) {                                           \
++      case 1:                                                         \
++              asm volatile("stb,ma %0,0(%1)"                          \
++                              : : "r"(*(__u8 *)__u.__c), "r"(__p)     \
++                              : "memory");                            \
++              break;                                                  \
++      case 2:                                                         \
++              asm volatile("sth,ma %0,0(%1)"                          \
++                              : : "r"(*(__u16 *)__u.__c), "r"(__p)    \
++                              : "memory");                            \
++              break;                                                  \
++      case 4:                                                         \
++              asm volatile("stw,ma %0,0(%1)"                          \
++                              : : "r"(*(__u32 *)__u.__c), "r"(__p)    \
++                              : "memory");                            \
++              break;                                                  \
++      case 8:                                                         \
++              if (IS_ENABLED(CONFIG_64BIT))                           \
++                      asm volatile("std,ma %0,0(%1)"                  \
++                              : : "r"(*(__u64 *)__u.__c), "r"(__p)    \
++                              : "memory");                            \
++              break;                                                  \
++      }                                                               \
++} while (0)
++
++#define __smp_load_acquire(p)                                         \
++({                                                                    \
++      union { typeof(*p) __val; char __c[1]; } __u;                   \
++      typeof(p) __p = (p);                                            \
++      compiletime_assert_atomic_type(*p);                             \
++      switch (sizeof(*p)) {                                           \
++      case 1:                                                         \
++              asm volatile("ldb,ma 0(%1),%0"                          \
++                              : "=r"(*(__u8 *)__u.__c) : "r"(__p)     \
++                              : "memory");                            \
++              break;                                                  \
++      case 2:                                                         \
++              asm volatile("ldh,ma 0(%1),%0"                          \
++                              : "=r"(*(__u16 *)__u.__c) : "r"(__p)    \
++                              : "memory");                            \
++              break;                                                  \
++      case 4:                                                         \
++              asm volatile("ldw,ma 0(%1),%0"                          \
++                              : "=r"(*(__u32 *)__u.__c) : "r"(__p)    \
++                              : "memory");                            \
++              break;                                                  \
++      case 8:                                                         \
++              if (IS_ENABLED(CONFIG_64BIT))                           \
++                      asm volatile("ldd,ma 0(%1),%0"                  \
++                              : "=r"(*(__u64 *)__u.__c) : "r"(__p)    \
++                              : "memory");                            \
++              break;                                                  \
++      }                                                               \
++      __u.__val;                                                      \
++})
+ #include <asm-generic/barrier.h>
+ #endif /* !__ASSEMBLY__ */
diff --git a/queue-5.4/parisc-mask-out-enable-and-reserved-bits-from-sba-imask.patch b/queue-5.4/parisc-mask-out-enable-and-reserved-bits-from-sba-imask.patch
new file mode 100644 (file)
index 0000000..5074710
--- /dev/null
@@ -0,0 +1,33 @@
+From 5b24993c21cbf2de11aff077a48c5cb0505a0450 Mon Sep 17 00:00:00 2001
+From: Sven Schnelle <svens@stackframe.org>
+Date: Tue, 11 Aug 2020 18:19:19 +0200
+Subject: parisc: mask out enable and reserved bits from sba imask
+
+From: Sven Schnelle <svens@stackframe.org>
+
+commit 5b24993c21cbf2de11aff077a48c5cb0505a0450 upstream.
+
+When using kexec the SBA IOMMU IBASE might still have the RE
+bit set. This triggers a WARN_ON when trying to write back the
+IBASE register later, and it also makes some mask calculations fail.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sven Schnelle <svens@stackframe.org>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/parisc/sba_iommu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device
+       ** (one that doesn't overlap memory or LMMIO space) in the
+       ** IBASE and IMASK registers.
+       */
+-      ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
++      ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
+       iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
+       if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
diff --git a/queue-5.4/revert-parisc-drop-ldcw-barrier-in-cas-code-when-running-up.patch b/queue-5.4/revert-parisc-drop-ldcw-barrier-in-cas-code-when-running-up.patch
new file mode 100644 (file)
index 0000000..cf5cb38
--- /dev/null
@@ -0,0 +1,62 @@
+From 462fb756c7de1ffe5bc6099149136031c2d9c02a Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 28 Jul 2020 18:52:58 +0200
+Subject: Revert "parisc: Drop LDCW barrier in CAS code when running UP"
+
+From: Helge Deller <deller@gmx.de>
+
+commit 462fb756c7de1ffe5bc6099149136031c2d9c02a upstream.
+
+This reverts commit e6eb5fe9123f05dcbf339ae5c0b6d32fcc0685d5.
+We need to optimize it differently. A follow up patch will correct it.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v5.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/syscall.S |   12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -641,8 +641,7 @@ cas_action:
+ 2:    stw     %r24, 0(%r26)
+       /* Free lock */
+ #ifdef CONFIG_SMP
+-98:   LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+ #endif
+       stw     %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+@@ -659,8 +658,7 @@ cas_action:
+       /* Error occurred on load or store */
+       /* Free lock */
+ #ifdef CONFIG_SMP
+-98:   LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+ #endif
+       stw     %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+@@ -864,8 +862,7 @@ cas2_action:
+ cas2_end:
+       /* Free lock */
+ #ifdef CONFIG_SMP
+-98:   LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+ #endif
+       stw     %r20, 0(%sr2,%r20)
+       /* Enable interrupts */
+@@ -878,8 +875,7 @@ cas2_end:
+       /* Error occurred on load or store */
+       /* Free lock */
+ #ifdef CONFIG_SMP
+-98:   LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+ #endif
+       stw     %r20, 0(%sr2,%r20)
+       ssm     PSW_SM_I, %r0
diff --git a/queue-5.4/revert-parisc-revert-release-spinlocks-using-ordered-store.patch b/queue-5.4/revert-parisc-revert-release-spinlocks-using-ordered-store.patch
new file mode 100644 (file)
index 0000000..139f818
--- /dev/null
@@ -0,0 +1,75 @@
+From 157e9afcc4fa25068b0e8743bc254a9b56010e13 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 28 Jul 2020 18:56:14 +0200
+Subject: Revert "parisc: Revert "Release spinlocks using ordered store""
+
+From: Helge Deller <deller@gmx.de>
+
+commit 157e9afcc4fa25068b0e8743bc254a9b56010e13 upstream.
+
+This reverts commit 86d4d068df573a8c2105554624796c086d6bec3d.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v5.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/spinlock.h |    4 ++--
+ arch/parisc/kernel/syscall.S       |   12 ++++--------
+ 2 files changed, 6 insertions(+), 10 deletions(-)
+
+--- a/arch/parisc/include/asm/spinlock.h
++++ b/arch/parisc/include/asm/spinlock.h
+@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch
+       volatile unsigned int *a;
+       a = __ldcw_align(x);
+-      mb();
+-      *a = 1;
++      /* Release with ordered store. */
++      __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+ }
+ static inline int arch_spin_trylock(arch_spinlock_t *x)
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -640,8 +640,7 @@ cas_action:
+       sub,<>  %r28, %r25, %r0
+ 2:    stw     %r24, 0(%r26)
+       /* Free lock */
+-      sync
+-      stw     %r20, 0(%sr2,%r20)
++      stw,ma  %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+       /* Clear thread register indicator */
+       stw     %r0, 4(%sr2,%r20)
+@@ -655,8 +654,7 @@ cas_action:
+ 3:            
+       /* Error occurred on load or store */
+       /* Free lock */
+-      sync
+-      stw     %r20, 0(%sr2,%r20)
++      stw,ma  %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+       stw     %r0, 4(%sr2,%r20)
+ #endif
+@@ -857,8 +855,7 @@ cas2_action:
+ cas2_end:
+       /* Free lock */
+-      sync
+-      stw     %r20, 0(%sr2,%r20)
++      stw,ma  %r20, 0(%sr2,%r20)
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
+       /* Return to userspace, set no error */
+@@ -868,8 +865,7 @@ cas2_end:
+ 22:
+       /* Error occurred on load or store */
+       /* Free lock */
+-      sync
+-      stw     %r20, 0(%sr2,%r20)
++      stw,ma  %r20, 0(%sr2,%r20)
+       ssm     PSW_SM_I, %r0
+       ldo     1(%r0),%r28
+       b       lws_exit
diff --git a/queue-5.4/revert-parisc-use-ldcw-instruction-for-smp-spinlock-release-barrier.patch b/queue-5.4/revert-parisc-use-ldcw-instruction-for-smp-spinlock-release-barrier.patch
new file mode 100644 (file)
index 0000000..166c18a
--- /dev/null
@@ -0,0 +1,258 @@
+From 6e9f06ee6c9566f3606d93182ac8f803a148504b Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Tue, 28 Jul 2020 18:54:40 +0200
+Subject: Revert "parisc: Use ldcw instruction for SMP spinlock release barrier"
+
+From: Helge Deller <deller@gmx.de>
+
+commit 6e9f06ee6c9566f3606d93182ac8f803a148504b upstream.
+
+This reverts commit 9e5c602186a692a7e848c0da17aed40f49d30519.
+No need to use the ldcw instruction as SMP spinlock release barrier.
+Revert it to gain back speed again.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: <stable@vger.kernel.org> # v5.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/spinlock.h |    4 ---
+ arch/parisc/kernel/entry.S         |   43 +++++++++++++++++--------------------
+ arch/parisc/kernel/syscall.S       |   16 +++----------
+ 3 files changed, 24 insertions(+), 39 deletions(-)
+
+--- a/arch/parisc/include/asm/spinlock.h
++++ b/arch/parisc/include/asm/spinlock.h
+@@ -37,11 +37,7 @@ static inline void arch_spin_unlock(arch
+       volatile unsigned int *a;
+       a = __ldcw_align(x);
+-#ifdef CONFIG_SMP
+-      (void) __ldcw(a);
+-#else
+       mb();
+-#endif
+       *a = 1;
+ }
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -454,9 +454,8 @@
+       nop
+       LDREG           0(\ptp),\pte
+       bb,<,n          \pte,_PAGE_PRESENT_BIT,3f
+-      LDCW            0(\tmp),\tmp1
+       b               \fault
+-      stw             \spc,0(\tmp)
++      stw,ma          \spc,0(\tmp)
+ 99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+ 2:    LDREG           0(\ptp),\pte
+@@ -465,22 +464,20 @@
+       .endm
+       /* Release pa_tlb_lock lock without reloading lock address. */
+-      .macro          tlb_unlock0     spc,tmp,tmp1
++      .macro          tlb_unlock0     spc,tmp
+ #ifdef CONFIG_SMP
+ 98:   or,COND(=)      %r0,\spc,%r0
+-      LDCW            0(\tmp),\tmp1
+-      or,COND(=)      %r0,\spc,%r0
+-      stw             \spc,0(\tmp)
++      stw,ma          \spc,0(\tmp)
+ 99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+       .endm
+       /* Release pa_tlb_lock lock. */
+-      .macro          tlb_unlock1     spc,tmp,tmp1
++      .macro          tlb_unlock1     spc,tmp
+ #ifdef CONFIG_SMP
+ 98:   load_pa_tlb_lock \tmp
+ 99:   ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-      tlb_unlock0     \spc,\tmp,\tmp1
++      tlb_unlock0     \spc,\tmp
+ #endif
+       .endm
+@@ -1163,7 +1160,7 @@ dtlb_miss_20w:
+       
+       idtlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1189,7 +1186,7 @@ nadtlb_miss_20w:
+       idtlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1223,7 +1220,7 @@ dtlb_miss_11:
+       mtsp            t1, %sr1        /* Restore sr1 */
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1256,7 +1253,7 @@ nadtlb_miss_11:
+       mtsp            t1, %sr1        /* Restore sr1 */
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1285,7 +1282,7 @@ dtlb_miss_20:
+       idtlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1313,7 +1310,7 @@ nadtlb_miss_20:
+       
+       idtlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1420,7 +1417,7 @@ itlb_miss_20w:
+       
+       iitlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1444,7 +1441,7 @@ naitlb_miss_20w:
+       iitlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1478,7 +1475,7 @@ itlb_miss_11:
+       mtsp            t1, %sr1        /* Restore sr1 */
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1502,7 +1499,7 @@ naitlb_miss_11:
+       mtsp            t1, %sr1        /* Restore sr1 */
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1532,7 +1529,7 @@ itlb_miss_20:
+       iitlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1552,7 +1549,7 @@ naitlb_miss_20:
+       iitlbt          pte,prot
+-      tlb_unlock1     spc,t0,t1
++      tlb_unlock1     spc,t0
+       rfir
+       nop
+@@ -1582,7 +1579,7 @@ dbit_trap_20w:
+               
+       idtlbt          pte,prot
+-      tlb_unlock0     spc,t0,t1
++      tlb_unlock0     spc,t0
+       rfir
+       nop
+ #else
+@@ -1608,7 +1605,7 @@ dbit_trap_11:
+       mtsp            t1, %sr1     /* Restore sr1 */
+-      tlb_unlock0     spc,t0,t1
++      tlb_unlock0     spc,t0
+       rfir
+       nop
+@@ -1628,7 +1625,7 @@ dbit_trap_20:
+       
+       idtlbt          pte,prot
+-      tlb_unlock0     spc,t0,t1
++      tlb_unlock0     spc,t0
+       rfir
+       nop
+ #endif
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -640,9 +640,7 @@ cas_action:
+       sub,<>  %r28, %r25, %r0
+ 2:    stw     %r24, 0(%r26)
+       /* Free lock */
+-#ifdef CONFIG_SMP
+-      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-#endif
++      sync
+       stw     %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+       /* Clear thread register indicator */
+@@ -657,9 +655,7 @@ cas_action:
+ 3:            
+       /* Error occurred on load or store */
+       /* Free lock */
+-#ifdef CONFIG_SMP
+-      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-#endif
++      sync
+       stw     %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+       stw     %r0, 4(%sr2,%r20)
+@@ -861,9 +857,7 @@ cas2_action:
+ cas2_end:
+       /* Free lock */
+-#ifdef CONFIG_SMP
+-      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-#endif
++      sync
+       stw     %r20, 0(%sr2,%r20)
+       /* Enable interrupts */
+       ssm     PSW_SM_I, %r0
+@@ -874,9 +868,7 @@ cas2_end:
+ 22:
+       /* Error occurred on load or store */
+       /* Free lock */
+-#ifdef CONFIG_SMP
+-      LDCW    0(%sr2,%r20), %r1                       /* Barrier */
+-#endif
++      sync
+       stw     %r20, 0(%sr2,%r20)
+       ssm     PSW_SM_I, %r0
+       ldo     1(%r0),%r28
index 6be6d94bdd93755e7718841f7d9ca2f7076d54d6..b3a9866dc61524f23acd14f7e392dd7dec538069 100644 (file)
@@ -239,6 +239,18 @@ nfs-don-t-move-layouts-to-plh_return_segs-list-while-in-use.patch
 nfs-don-t-return-layout-segments-that-are-in-use.patch
 cpufreq-fix-locking-issues-with-governors.patch
 cpufreq-dt-fix-oops-on-armada37xx.patch
+include-asm-generic-vmlinux.lds.h-align-ro_after_init.patch
+spi-spidev-align-buffers-for-dma.patch
+mtd-rawnand-qcom-avoid-write-to-unavailable-register.patch
+erofs-fix-extended-inode-could-cross-boundary.patch
+revert-parisc-drop-ldcw-barrier-in-cas-code-when-running-up.patch
+revert-parisc-use-ldcw-instruction-for-smp-spinlock-release-barrier.patch
+revert-parisc-revert-release-spinlocks-using-ordered-store.patch
+parisc-do-not-use-an-ordered-store-in-pa_tlb_lock.patch
+parisc-implement-__smp_store_release-and-__smp_load_acquire-barriers.patch
+parisc-mask-out-enable-and-reserved-bits-from-sba-imask.patch
+arm-8992-1-fix-unwind_frame-for-clang-built-kernels.patch
+irqdomain-treewide-free-firmware-node-after-domain-removal.patch
 alsa-usb-audio-add-quirk-for-pioneer-ddj-rb.patch
 tpm-unify-the-mismatching-tpm-space-buffer-sizes.patch
 pstore-fix-linking-when-crypto-api-disabled.patch
diff --git a/queue-5.4/spi-spidev-align-buffers-for-dma.patch b/queue-5.4/spi-spidev-align-buffers-for-dma.patch
new file mode 100644 (file)
index 0000000..cd29916
--- /dev/null
@@ -0,0 +1,94 @@
+From aa9e862d7d5bcecd4dca9f39e8b684b93dd84ee7 Mon Sep 17 00:00:00 2001
+From: Christian Eggers <ceggers@arri.de>
+Date: Tue, 28 Jul 2020 12:08:32 +0200
+Subject: spi: spidev: Align buffers for DMA
+
+From: Christian Eggers <ceggers@arri.de>
+
+commit aa9e862d7d5bcecd4dca9f39e8b684b93dd84ee7 upstream.
+
+Simply copying all xfers from userspace into one bounce buffer causes
+alignment problems if the SPI controller uses DMA.
+
+Ensure that all transfer data blocks within the rx and tx bounce buffers
+are aligned for DMA (according to ARCH_KMALLOC_MINALIGN).
+
+Alignment may increase the usage of the bounce buffers. In some cases,
+the buffers may need to be increased using the "bufsiz" module
+parameter.
+
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200728100832.24788-1-ceggers@arri.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spidev.c |   21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -223,6 +223,11 @@ static int spidev_message(struct spidev_
+       for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+                       n;
+                       n--, k_tmp++, u_tmp++) {
++              /* Ensure that also following allocations from rx_buf/tx_buf will meet
++               * DMA alignment requirements.
++               */
++              unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
++
+               k_tmp->len = u_tmp->len;
+               total += k_tmp->len;
+@@ -238,17 +243,17 @@ static int spidev_message(struct spidev_
+               if (u_tmp->rx_buf) {
+                       /* this transfer needs space in RX bounce buffer */
+-                      rx_total += k_tmp->len;
++                      rx_total += len_aligned;
+                       if (rx_total > bufsiz) {
+                               status = -EMSGSIZE;
+                               goto done;
+                       }
+                       k_tmp->rx_buf = rx_buf;
+-                      rx_buf += k_tmp->len;
++                      rx_buf += len_aligned;
+               }
+               if (u_tmp->tx_buf) {
+                       /* this transfer needs space in TX bounce buffer */
+-                      tx_total += k_tmp->len;
++                      tx_total += len_aligned;
+                       if (tx_total > bufsiz) {
+                               status = -EMSGSIZE;
+                               goto done;
+@@ -258,7 +263,7 @@ static int spidev_message(struct spidev_
+                                               (uintptr_t) u_tmp->tx_buf,
+                                       u_tmp->len))
+                               goto done;
+-                      tx_buf += k_tmp->len;
++                      tx_buf += len_aligned;
+               }
+               k_tmp->cs_change = !!u_tmp->cs_change;
+@@ -290,16 +295,16 @@ static int spidev_message(struct spidev_
+               goto done;
+       /* copy any rx data out of bounce buffer */
+-      rx_buf = spidev->rx_buffer;
+-      for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
++      for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
++                      n;
++                      n--, k_tmp++, u_tmp++) {
+               if (u_tmp->rx_buf) {
+                       if (copy_to_user((u8 __user *)
+-                                      (uintptr_t) u_tmp->rx_buf, rx_buf,
++                                      (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
+                                       u_tmp->len)) {
+                               status = -EFAULT;
+                               goto done;
+                       }
+-                      rx_buf += u_tmp->len;
+               }
+       }
+       status = total;