--- /dev/null
+From c7df4a1ecb8579838ec8c56b2bb6a6716e974f37 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Mon, 11 Nov 2019 22:18:13 -0500
+Subject: ext4: work around deleting a file with i_nlink == 0 safely
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit c7df4a1ecb8579838ec8c56b2bb6a6716e974f37 upstream.
+
+If the file system is corrupted such that a file's i_links_count is
+too small, then it's possible that when unlinking that file, i_nlink
+will already be zero. Previously we were working around this kind of
+corruption by forcing i_nlink to one; but we were doing this before
+trying to delete the directory entry --- and if the file system is
+corrupted enough that ext4_delete_entry() fails, then we exit with
+i_nlink elevated, and this causes the orphan inode list handling to be
+FUBAR'ed, such that when we unmount the file system, the orphan inode
+list can get corrupted.
+
+A better way to fix this is to simply skip trying to call drop_nlink()
+if i_nlink is already zero, thus moving the check to the place where
+it makes the most sense.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=205433
+
+Link: https://lore.kernel.org/r/20191112032903.8828-1-tytso@mit.edu
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/namei.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3182,18 +3182,17 @@ static int ext4_unlink(struct inode *dir
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
+
+- if (inode->i_nlink == 0) {
+- ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
+- dentry->d_name.len, dentry->d_name.name);
+- set_nlink(inode, 1);
+- }
+ retval = ext4_delete_entry(handle, dir, de, bh);
+ if (retval)
+ goto end_unlink;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ ext4_update_dx_flag(dir);
+ ext4_mark_inode_dirty(handle, dir);
+- drop_nlink(inode);
++ if (inode->i_nlink == 0)
++ ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
++ dentry->d_name.len, dentry->d_name.name);
++ else
++ drop_nlink(inode);
+ if (!inode->i_nlink)
+ ext4_orphan_add(handle, inode);
+ inode->i_ctime = current_time(inode);
--- /dev/null
+From ff34f3cce278a0982a7b66b1afaed6295141b1fc Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Mon, 4 Nov 2019 15:58:15 +0000
+Subject: firmware: qcom: scm: Ensure 'a0' status code is treated as signed
+
+From: Will Deacon <will@kernel.org>
+
+commit ff34f3cce278a0982a7b66b1afaed6295141b1fc upstream.
+
+The 'a0' member of 'struct arm_smccc_res' is declared as 'unsigned long',
+however the Qualcomm SCM firmware interface driver expects to receive
+negative error codes via this field, so ensure that it's cast to 'long'
+before comparing to see if it is less than 0.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firmware/qcom_scm-64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/firmware/qcom_scm-64.c
++++ b/drivers/firmware/qcom_scm-64.c
+@@ -150,7 +150,7 @@ static int qcom_scm_call(struct device *
+ kfree(args_virt);
+ }
+
+- if (res->a0 < 0)
++ if ((long)res->a0 < 0)
+ return qcom_scm_remap_error(res->a0);
+
+ return 0;
--- /dev/null
+From a9f2f6865d784477e1c7b59269d3a384abafd9ca Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Tue, 19 Nov 2019 12:30:53 +0100
+Subject: s390/kaslr: store KASLR offset for early dumps
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit a9f2f6865d784477e1c7b59269d3a384abafd9ca upstream.
+
+The KASLR offset is added to vmcoreinfo in arch_crash_save_vmcoreinfo(),
+so that it can be found by crash when processing kernel dumps.
+
+However, arch_crash_save_vmcoreinfo() is called during a subsys_initcall,
+so if the kernel crashes before that, we have no vmcoreinfo and no KASLR
+offset.
+
+Fix this by storing the KASLR offset in the lowcore, where the vmcore_info
+pointer will be stored, and where it can be found by crash. In order to
+make it distinguishable from a real vmcore_info pointer, mark it as uneven
+(KASLR offset itself is aligned to THREAD_SIZE).
+
+When arch_crash_save_vmcoreinfo() stores the real vmcore_info pointer in
+the lowcore, it overwrites the KASLR offset. At that point, the KASLR
+offset is not yet added to vmcoreinfo, so we also need to move the
+mem_assign_absolute() behind the vmcoreinfo_append_str().
+
+Fixes: b2d24b97b2a9 ("s390/kernel: add support for kernel address space layout randomization (KASLR)")
+Cc: <stable@vger.kernel.org> # v5.2+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/boot/startup.c | 5 +++++
+ arch/s390/kernel/machine_kexec.c | 2 +-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -164,6 +164,11 @@ void startup_kernel(void)
+ handle_relocs(__kaslr_offset);
+
+ if (__kaslr_offset) {
++ /*
++ * Save KASLR offset for early dumps, before vmcore_info is set.
++ * Mark as uneven to distinguish from real vmcore_info pointer.
++ */
++ S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
+ /* Clear non-relocated kernel */
+ if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
+ memset(img, 0, vmlinux.image_size);
+--- a/arch/s390/kernel/machine_kexec.c
++++ b/arch/s390/kernel/machine_kexec.c
+@@ -254,10 +254,10 @@ void arch_crash_save_vmcoreinfo(void)
+ VMCOREINFO_SYMBOL(lowcore_ptr);
+ VMCOREINFO_SYMBOL(high_memory);
+ VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
+- mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+ vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
+ vmcoreinfo_append_str("EDMA=%lx\n", __edma);
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
++ mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+ }
+
+ void machine_shutdown(void)
--- /dev/null
+From a2308c11ecbc3471ebb7435ee8075815b1502ef0 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 18 Nov 2019 13:09:52 +0100
+Subject: s390/smp,vdso: fix ASCE handling
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit a2308c11ecbc3471ebb7435ee8075815b1502ef0 upstream.
+
+When a secondary CPU is brought up it must initialize its control
+registers. CPU A which triggers that a secondary CPU B is brought up
+stores its control register contents into the lowcore of new CPU B,
+which then loads these values on startup.
+
+This is problematic in various ways: the control register which
+contains the home space ASCE will correctly contain the kernel ASCE;
+however control registers for primary and secondary ASCEs are
+initialized with whatever values were present in CPU A.
+
+Typically:
+- the primary ASCE will contain the user process ASCE of the process
+ that triggered onlining of CPU B.
+- the secondary ASCE will contain the percpu VDSO ASCE of CPU A.
+
+Due to lazy ASCE handling we may also end up with other combinations.
+
+When then CPU B switches to a different process (!= idle) it will
+fixup the primary ASCE. However the problem is that the (wrong) ASCE
+from CPU A was loaded into control register 1: as soon as an ASCE is
+attached (aka loaded) a CPU is free to generate TLB entries using that
+address space.
+Even though it is very unlikey that CPU B will actually generate such
+entries, this could result in TLB entries of the address space of the
+process that ran on CPU A. These entries shouldn't exist at all and
+could cause problems later on.
+
+Furthermore the secondary ASCE of CPU B will not be updated correctly.
+This means that processes may see wrong results or even crash if they
+access VDSO data on CPU B. The correct VDSO ASCE will eventually be
+loaded on return to user space as soon as the kernel executed a call
+to strnlen_user or an atomic futex operation on CPU B.
+
+Fix both issues by intializing the to be loaded control register
+contents with the correct ASCEs and also enforce (re-)loading of the
+ASCEs upon first context switch and return to user space.
+
+Fixes: 0aaba41b58bc ("s390: remove all code using the access register mode")
+Cc: stable@vger.kernel.org # v4.15+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/smp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -262,10 +262,13 @@ static void pcpu_prepare_secondary(struc
+ lc->spinlock_index = 0;
+ lc->percpu_offset = __per_cpu_offset[cpu];
+ lc->kernel_asce = S390_lowcore.kernel_asce;
++ lc->user_asce = S390_lowcore.kernel_asce;
+ lc->machine_flags = S390_lowcore.machine_flags;
+ lc->user_timer = lc->system_timer =
+ lc->steal_timer = lc->avg_steal_timer = 0;
+ __ctl_store(lc->cregs_save_area, 0, 15);
++ lc->cregs_save_area[1] = lc->kernel_asce;
++ lc->cregs_save_area[7] = lc->vdso_asce;
+ save_access_regs((unsigned int *) lc->access_regs_save_area);
+ memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+ sizeof(lc->stfle_fac_list));
+@@ -816,6 +819,8 @@ static void smp_init_secondary(void)
+
+ S390_lowcore.last_update_clock = get_tod_clock();
+ restore_access_regs(S390_lowcore.access_regs_save_area);
++ set_cpu_flag(CIF_ASCE_PRIMARY);
++ set_cpu_flag(CIF_ASCE_SECONDARY);
+ cpu_init();
+ preempt_disable();
+ init_cpu_timer();
mfd-rk808-fix-rk818-id-template.patch
mm-memory.c-fix-a-huge-pud-insertion-race-during-faulting.patch
mm-memcg-slab-wait-for-root-kmem_cache-refcnt-killing-on-root-kmem_cache-destruction.patch
+ext4-work-around-deleting-a-file-with-i_nlink-0-safely.patch
+firmware-qcom-scm-ensure-a0-status-code-is-treated-as-signed.patch
+s390-smp-vdso-fix-asce-handling.patch
+s390-kaslr-store-kaslr-offset-for-early-dumps.patch