--- /dev/null
+From 5012284700775a4e6e3fbe7eac4c543c4874b559 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sat, 28 Jul 2018 08:12:04 -0400
+Subject: ext4: fix check to prevent initializing reserved inodes
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 5012284700775a4e6e3fbe7eac4c543c4874b559 upstream.
+
+Commit 8844618d8aa7: "ext4: only look at the bg_flags field if it is
+valid" will complain if block group zero does not have the
+EXT4_BG_INODE_ZEROED flag set. Unfortunately, this is not correct,
+since a freshly created file system has this flag cleared. It gets
+almost immediately after the file system is mounted read-write --- but
+the following somewhat unlikely sequence will end up triggering a
+false positive report of a corrupted file system:
+
+ mkfs.ext4 /dev/vdc
+ mount -o ro /dev/vdc /vdc
+ mount -o remount,rw /dev/vdc
+
+Instead, when initializing the inode table for block group zero, test
+to make sure that itable_unused count is not too large, since that is
+the case that will result in some or all of the reserved inodes
+getting cleared.
+
+This fixes the failures reported by Eric Whiteney when running
+generic/230 and generic/231 in the the nojournal test case.
+
+Fixes: 8844618d8aa7 ("ext4: only look at the bg_flags field if it is valid")
+Reported-by: Eric Whitney <enwlinux@gmail.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ialloc.c | 5 ++++-
+ fs/ext4/super.c | 8 +-------
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1316,7 +1316,10 @@ int ext4_init_inode_table(struct super_b
+ ext4_itable_unused_count(sb, gdp)),
+ sbi->s_inodes_per_block);
+
+- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
++ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
++ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
++ ext4_itable_unused_count(sb, gdp)) <
++ EXT4_FIRST_INO(sb)))) {
+ ext4_error(sb, "Something is wrong with group %u: "
+ "used itable blocks: %d; "
+ "itable unused count: %u",
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3031,14 +3031,8 @@ static ext4_group_t ext4_has_uninit_itab
+ if (!gdp)
+ continue;
+
+- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
+- continue;
+- if (group != 0)
++ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+ break;
+- ext4_error(sb, "Inode table for bg 0 marked as "
+- "needing zeroing");
+- if (sb->s_flags & MS_RDONLY)
+- return ngroups;
+ }
+
+ return group;
--- /dev/null
+From fedb8da96355f5f64353625bf96dc69423ad1826 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Sun, 5 Aug 2018 13:30:31 -0400
+Subject: parisc: Define mb() and add memory barriers to assembler unlock sequences
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit fedb8da96355f5f64353625bf96dc69423ad1826 upstream.
+
+For years I thought all parisc machines executed loads and stores in
+order. However, Jeff Law recently indicated on gcc-patches that this is
+not correct. There are various degrees of out-of-order execution all the
+way back to the PA7xxx processor series (hit-under-miss). The PA8xxx
+series has full out-of-order execution for both integer operations, and
+loads and stores.
+
+This is described in the following article:
+http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml
+
+For this reason, we need to define mb() and to insert a memory barrier
+before the store unlocking spinlocks. This ensures that all memory
+accesses are complete prior to unlocking. The ldcw instruction performs
+the same function on entry.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Cc: stable@vger.kernel.org # 4.0+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/barrier.h | 32 ++++++++++++++++++++++++++++++++
+ arch/parisc/kernel/entry.S | 2 ++
+ arch/parisc/kernel/pacache.S | 1 +
+ arch/parisc/kernel/syscall.S | 4 ++++
+ 4 files changed, 39 insertions(+)
+
+--- /dev/null
++++ b/arch/parisc/include/asm/barrier.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __ASM_BARRIER_H
++#define __ASM_BARRIER_H
++
++#ifndef __ASSEMBLY__
++
++/* The synchronize caches instruction executes as a nop on systems in
++ which all memory references are performed in order. */
++#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
++
++#if defined(CONFIG_SMP)
++#define mb() do { synchronize_caches(); } while (0)
++#define rmb() mb()
++#define wmb() mb()
++#define dma_rmb() mb()
++#define dma_wmb() mb()
++#else
++#define mb() barrier()
++#define rmb() barrier()
++#define wmb() barrier()
++#define dma_rmb() barrier()
++#define dma_wmb() barrier()
++#endif
++
++#define __smp_mb() mb()
++#define __smp_rmb() mb()
++#define __smp_wmb() mb()
++
++#include <asm-generic/barrier.h>
++
++#endif /* !__ASSEMBLY__ */
++#endif /* __ASM_BARRIER_H */
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -482,6 +482,8 @@
+ .macro tlb_unlock0 spc,tmp
+ #ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
++ sync
++ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
+ #endif
+ .endm
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -354,6 +354,7 @@ ENDPROC_CFI(flush_data_cache_local)
+ .macro tlb_unlock la,flags,tmp
+ #ifdef CONFIG_SMP
+ ldi 1,\tmp
++ sync
+ stw \tmp,0(\la)
+ mtsm \flags
+ #endif
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -633,6 +633,7 @@ cas_action:
+ sub,<> %r28, %r25, %r0
+ 2: stw,ma %r24, 0(%r26)
+ /* Free lock */
++ sync
+ stw,ma %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ /* Clear thread register indicator */
+@@ -647,6 +648,7 @@ cas_action:
+ 3:
+ /* Error occurred on load or store */
+ /* Free lock */
++ sync
+ stw %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ stw %r0, 4(%sr2,%r20)
+@@ -848,6 +850,7 @@ cas2_action:
+
+ cas2_end:
+ /* Free lock */
++ sync
+ stw,ma %r20, 0(%sr2,%r20)
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
+@@ -858,6 +861,7 @@ cas2_end:
+ 22:
+ /* Error occurred on load or store */
+ /* Free lock */
++ sync
+ stw %r20, 0(%sr2,%r20)
+ ssm PSW_SM_I, %r0
+ ldo 1(%r0),%r28
--- /dev/null
+From 66509a276c8c1d19ee3f661a41b418d101c57d29 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sat, 28 Jul 2018 11:47:17 +0200
+Subject: parisc: Enable CONFIG_MLONGCALLS by default
+
+From: Helge Deller <deller@gmx.de>
+
+commit 66509a276c8c1d19ee3f661a41b418d101c57d29 upstream.
+
+Enable the -mlong-calls compiler option by default, because otherwise in most
+cases linking the vmlinux binary fails due to truncations of R_PARISC_PCREL22F
+relocations. This fixes building the 64-bit defconfig.
+
+Cc: stable@vger.kernel.org # 4.0+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -184,7 +184,7 @@ config PREFETCH
+
+ config MLONGCALLS
+ bool "Enable the -mlong-calls compiler option for big kernels"
+- def_bool y if (!MODULES)
++ default y
+ depends on PA8X00
+ help
+ If you configure the kernel to include many drivers built-in instead
--- /dev/null
+From 3ab2011ea368ec3433ad49e1b9e1c7b70d2e65df Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+Date: Tue, 22 May 2018 14:37:18 -0700
+Subject: tpm: fix race condition in tpm_common_write()
+
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+
+commit 3ab2011ea368ec3433ad49e1b9e1c7b70d2e65df upstream.
+
+There is a race condition in tpm_common_write function allowing
+two threads on the same /dev/tpm<N>, or two different applications
+on the same /dev/tpmrm<N> to overwrite each other commands/responses.
+Fixed this by taking the priv->buffer_mutex early in the function.
+
+Also converted the priv->data_pending from atomic to a regular size_t
+type. There is no need for it to be atomic since it is only touched
+under the protection of the priv->buffer_mutex.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm-dev.c | 43 ++++++++++++++++++++-----------------------
+ 1 file changed, 20 insertions(+), 23 deletions(-)
+
+--- a/drivers/char/tpm/tpm-dev.c
++++ b/drivers/char/tpm/tpm-dev.c
+@@ -25,7 +25,7 @@ struct file_priv {
+ struct tpm_chip *chip;
+
+ /* Data passed to and from the tpm via the read/write calls */
+- atomic_t data_pending;
++ size_t data_pending;
+ struct mutex buffer_mutex;
+
+ struct timer_list user_read_timer; /* user needs to claim result */
+@@ -46,7 +46,7 @@ static void timeout_work(struct work_str
+ struct file_priv *priv = container_of(work, struct file_priv, work);
+
+ mutex_lock(&priv->buffer_mutex);
+- atomic_set(&priv->data_pending, 0);
++ priv->data_pending = 0;
+ memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
+ mutex_unlock(&priv->buffer_mutex);
+ }
+@@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode,
+ }
+
+ priv->chip = chip;
+- atomic_set(&priv->data_pending, 0);
+ mutex_init(&priv->buffer_mutex);
+ setup_timer(&priv->user_read_timer, user_reader_timeout,
+ (unsigned long)priv);
+@@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *fil
+ size_t size, loff_t *off)
+ {
+ struct file_priv *priv = file->private_data;
+- ssize_t ret_size;
++ ssize_t ret_size = 0;
+ int rc;
+
+ del_singleshot_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->work);
+- ret_size = atomic_read(&priv->data_pending);
+- if (ret_size > 0) { /* relay data */
+- ssize_t orig_ret_size = ret_size;
+- if (size < ret_size)
+- ret_size = size;
++ mutex_lock(&priv->buffer_mutex);
+
+- mutex_lock(&priv->buffer_mutex);
++ if (priv->data_pending) {
++ ret_size = min_t(ssize_t, size, priv->data_pending);
+ rc = copy_to_user(buf, priv->data_buffer, ret_size);
+- memset(priv->data_buffer, 0, orig_ret_size);
++ memset(priv->data_buffer, 0, priv->data_pending);
+ if (rc)
+ ret_size = -EFAULT;
+
+- mutex_unlock(&priv->buffer_mutex);
++ priv->data_pending = 0;
+ }
+
+- atomic_set(&priv->data_pending, 0);
+-
++ mutex_unlock(&priv->buffer_mutex);
+ return ret_size;
+ }
+
+@@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *fi
+ size_t in_size = size;
+ ssize_t out_size;
+
+- /* cannot perform a write until the read has cleared
+- either via tpm_read or a user_read_timer timeout.
+- This also prevents splitted buffered writes from blocking here.
+- */
+- if (atomic_read(&priv->data_pending) != 0)
+- return -EBUSY;
+-
+ if (in_size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
++ /* Cannot perform a write until the read has cleared either via
++ * tpm_read or a user_read_timer timeout. This also prevents split
++ * buffered writes from blocking here.
++ */
++ if (priv->data_pending != 0) {
++ mutex_unlock(&priv->buffer_mutex);
++ return -EBUSY;
++ }
++
+ if (copy_from_user
+ (priv->data_buffer, (void __user *) buf, in_size)) {
+ mutex_unlock(&priv->buffer_mutex);
+@@ -159,7 +156,7 @@ static ssize_t tpm_write(struct file *fi
+ return out_size;
+ }
+
+- atomic_set(&priv->data_pending, out_size);
++ priv->data_pending = out_size;
+ mutex_unlock(&priv->buffer_mutex);
+
+ /* Set a timeout by which the reader must come claim the result */
+@@ -178,7 +175,7 @@ static int tpm_release(struct inode *ino
+ del_singleshot_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->work);
+ file->private_data = NULL;
+- atomic_set(&priv->data_pending, 0);
++ priv->data_pending = 0;
+ clear_bit(0, &priv->chip->is_open);
+ kfree(priv);
+ return 0;