From: Greg Kroah-Hartman Date: Fri, 10 Aug 2018 14:20:13 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v4.18.1~54 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1cb778ba7b41efd4715c14f5fcb4806627dff924;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: ext4-fix-check-to-prevent-initializing-reserved-inodes.patch fork-unconditionally-clear-stack-on-fork.patch ipv4-ipv6-make-inet-_esp-select-crypto_echainiv.patch parisc-define-mb-and-add-memory-barriers-to-assembler-unlock-sequences.patch parisc-enable-config_mlongcalls-by-default.patch tpm-fix-race-condition-in-tpm_common_write.patch --- diff --git a/queue-4.4/ext4-fix-check-to-prevent-initializing-reserved-inodes.patch b/queue-4.4/ext4-fix-check-to-prevent-initializing-reserved-inodes.patch new file mode 100644 index 00000000000..1f71bf29c14 --- /dev/null +++ b/queue-4.4/ext4-fix-check-to-prevent-initializing-reserved-inodes.patch @@ -0,0 +1,71 @@ +From 5012284700775a4e6e3fbe7eac4c543c4874b559 Mon Sep 17 00:00:00 2001 +From: Theodore Ts'o +Date: Sat, 28 Jul 2018 08:12:04 -0400 +Subject: ext4: fix check to prevent initializing reserved inodes + +From: Theodore Ts'o + +commit 5012284700775a4e6e3fbe7eac4c543c4874b559 upstream. + +Commit 8844618d8aa7: "ext4: only look at the bg_flags field if it is +valid" will complain if block group zero does not have the +EXT4_BG_INODE_ZEROED flag set. Unfortunately, this is not correct, +since a freshly created file system has this flag cleared. It gets +almost immediately after the file system is mounted read-write --- but +the following somewhat unlikely sequence will end up triggering a +false positive report of a corrupted file system: + + mkfs.ext4 /dev/vdc + mount -o ro /dev/vdc /vdc + mount -o remount,rw /dev/vdc + +Instead, when initializing the inode table for block group zero, test +to make sure that itable_unused count is not too large, since that is +the case that will result in some or all of the reserved inodes +getting cleared. + +This fixes the failures reported by Eric Whiteney when running +generic/230 and generic/231 in the the nojournal test case. + +Fixes: 8844618d8aa7 ("ext4: only look at the bg_flags field if it is valid") +Reported-by: Eric Whitney +Signed-off-by: Theodore Ts'o +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/ialloc.c | 5 ++++- + fs/ext4/super.c | 8 +------- + 2 files changed, 5 insertions(+), 8 deletions(-) + +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -1308,7 +1308,10 @@ int ext4_init_inode_table(struct super_b + ext4_itable_unused_count(sb, gdp)), + sbi->s_inodes_per_block); + +- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { ++ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || ++ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - ++ ext4_itable_unused_count(sb, gdp)) < ++ EXT4_FIRST_INO(sb)))) { + ext4_error(sb, "Something is wrong with group %u: " + "used itable blocks: %d; " + "itable unused count: %u", +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2875,14 +2875,8 @@ static ext4_group_t ext4_has_uninit_itab + if (!gdp) + continue; + +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) +- continue; +- if (group != 0) ++ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) + break; +- ext4_error(sb, "Inode table for bg 0 marked as " +- "needing zeroing"); +- if (sb->s_flags & MS_RDONLY) +- return ngroups; + } + + return group; diff --git a/queue-4.4/fork-unconditionally-clear-stack-on-fork.patch b/queue-4.4/fork-unconditionally-clear-stack-on-fork.patch new file mode 100644 index 00000000000..667373d0431 --- /dev/null +++ b/queue-4.4/fork-unconditionally-clear-stack-on-fork.patch @@ -0,0 +1,86 @@ +From e01e80634ecdde1dd113ac43b3adad21b47f3957 Mon Sep 17 00:00:00 2001 +From: Kees Cook +Date: Fri, 20 Apr 2018 14:55:31 -0700 +Subject: fork: unconditionally clear stack on fork + +From: Kees Cook + +commit e01e80634ecdde1dd113ac43b3adad21b47f3957 upstream. + +One of the classes of kernel stack content leaks[1] is exposing the +contents of prior heap or stack contents when a new process stack is +allocated. Normally, those stacks are not zeroed, and the old contents +remain in place. In the face of stack content exposure flaws, those +contents can leak to userspace. + +Fixing this will make the kernel no longer vulnerable to these flaws, as +the stack will be wiped each time a stack is assigned to a new process. +There's not a meaningful change in runtime performance; it almost looks +like it provides a benefit. + +Performing back-to-back kernel builds before: + Run times: 157.86 157.09 158.90 160.94 160.80 + Mean: 159.12 + Std Dev: 1.54 + +and after: + Run times: 159.31 157.34 156.71 158.15 160.81 + Mean: 158.46 + Std Dev: 1.46 + +Instead of making this a build or runtime config, Andy Lutomirski +recommended this just be enabled by default. + +[1] A noisy search for many kinds of stack content leaks can be seen here: +https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=linux+kernel+stack+leak + +I did some more with perf and cycle counts on running 100,000 execs of +/bin/true. + +before: +Cycles: 218858861551 218853036130 214727610969 227656844122 224980542841 +Mean: 221015379122.60 +Std Dev: 4662486552.47 + +after: +Cycles: 213868945060 213119275204 211820169456 224426673259 225489986348 +Mean: 217745009865.40 +Std Dev: 5935559279.99 + +It continues to look like it's faster, though the deviation is rather +wide, but I'm not sure what I could do that would be less noisy. I'm +open to ideas! + +Link: http://lkml.kernel.org/r/20180221021659.GA37073@beast +Signed-off-by: Kees Cook +Acked-by: Michal Hocko +Reviewed-by: Andrew Morton +Cc: Andy Lutomirski +Cc: Laura Abbott +Cc: Rasmus Villemoes +Cc: Mel Gorman +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +[ Srivatsa: Backported to 4.4.y ] +Signed-off-by: Srivatsa S. Bhat +Reviewed-by: Srinidhi Rao +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/thread_info.h | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -55,11 +55,7 @@ extern long do_no_restart_syscall(struct + + #ifdef __KERNEL__ + +-#ifdef CONFIG_DEBUG_STACK_USAGE +-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +-#else +-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) +-#endif ++#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) + + /* + * flag set/clear/test wrappers diff --git a/queue-4.4/ipv4-ipv6-make-inet-_esp-select-crypto_echainiv.patch b/queue-4.4/ipv4-ipv6-make-inet-_esp-select-crypto_echainiv.patch new file mode 100644 index 00000000000..9a673ddef4a --- /dev/null +++ b/queue-4.4/ipv4-ipv6-make-inet-_esp-select-crypto_echainiv.patch @@ -0,0 +1,49 @@ +From 32b6170ca59ccf07d0e394561e54b2cd9726038c Mon Sep 17 00:00:00 2001 +From: Thomas Egerer +Date: Mon, 25 Jan 2016 12:58:44 +0100 +Subject: ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV + +From: Thomas Egerer + +commit 32b6170ca59ccf07d0e394561e54b2cd9726038c upstream. + +The ESP algorithms using CBC mode require echainiv. Hence INET*_ESP have +to select CRYPTO_ECHAINIV in order to work properly. This solves the +issues caused by a misconfiguration as described in [1]. +The original approach, patching crypto/Kconfig was turned down by +Herbert Xu [2]. + +[1] https://lists.strongswan.org/pipermail/users/2015-December/009074.html +[2] http://marc.info/?l=linux-crypto-vger&m=145224655809562&w=2 + +Signed-off-by: Thomas Egerer +Acked-by: Herbert Xu +Signed-off-by: David S. Miller +Cc: Yongqin Liu +Signed-off-by: Greg Kroah-Hartman + +--- + net/ipv4/Kconfig | 1 + + net/ipv6/Kconfig | 1 + + 2 files changed, 2 insertions(+) + +--- a/net/ipv4/Kconfig ++++ b/net/ipv4/Kconfig +@@ -354,6 +354,7 @@ config INET_ESP + select CRYPTO_CBC + select CRYPTO_SHA1 + select CRYPTO_DES ++ select CRYPTO_ECHAINIV + ---help--- + Support for IPsec ESP. + +--- a/net/ipv6/Kconfig ++++ b/net/ipv6/Kconfig +@@ -69,6 +69,7 @@ config INET6_ESP + select CRYPTO_CBC + select CRYPTO_SHA1 + select CRYPTO_DES ++ select CRYPTO_ECHAINIV + ---help--- + Support for IPsec ESP. + diff --git a/queue-4.4/parisc-define-mb-and-add-memory-barriers-to-assembler-unlock-sequences.patch b/queue-4.4/parisc-define-mb-and-add-memory-barriers-to-assembler-unlock-sequences.patch new file mode 100644 index 00000000000..f2fff8f6659 --- /dev/null +++ b/queue-4.4/parisc-define-mb-and-add-memory-barriers-to-assembler-unlock-sequences.patch @@ -0,0 +1,129 @@ +From fedb8da96355f5f64353625bf96dc69423ad1826 Mon Sep 17 00:00:00 2001 +From: John David Anglin +Date: Sun, 5 Aug 2018 13:30:31 -0400 +Subject: parisc: Define mb() and add memory barriers to assembler unlock sequences +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: John David Anglin + +commit fedb8da96355f5f64353625bf96dc69423ad1826 upstream. + +For years I thought all parisc machines executed loads and stores in +order. However, Jeff Law recently indicated on gcc-patches that this is +not correct. There are various degrees of out-of-order execution all the +way back to the PA7xxx processor series (hit-under-miss). The PA8xxx +series has full out-of-order execution for both integer operations, and +loads and stores. + +This is described in the following article: +http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml + +For this reason, we need to define mb() and to insert a memory barrier +before the store unlocking spinlocks. This ensures that all memory +accesses are complete prior to unlocking. The ldcw instruction performs +the same function on entry. + +Signed-off-by: John David Anglin +Cc: stable@vger.kernel.org # 4.0+ +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman + +--- + arch/parisc/include/asm/barrier.h | 32 ++++++++++++++++++++++++++++++++ + arch/parisc/kernel/entry.S | 2 ++ + arch/parisc/kernel/pacache.S | 1 + + arch/parisc/kernel/syscall.S | 4 ++++ + 4 files changed, 39 insertions(+) + +--- /dev/null ++++ b/arch/parisc/include/asm/barrier.h +@@ -0,0 +1,32 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __ASM_BARRIER_H ++#define __ASM_BARRIER_H ++ ++#ifndef __ASSEMBLY__ ++ ++/* The synchronize caches instruction executes as a nop on systems in ++ which all memory references are performed in order. */ ++#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") ++ ++#if defined(CONFIG_SMP) ++#define mb() do { synchronize_caches(); } while (0) ++#define rmb() mb() ++#define wmb() mb() ++#define dma_rmb() mb() ++#define dma_wmb() mb() ++#else ++#define mb() barrier() ++#define rmb() barrier() ++#define wmb() barrier() ++#define dma_rmb() barrier() ++#define dma_wmb() barrier() ++#endif ++ ++#define __smp_mb() mb() ++#define __smp_rmb() mb() ++#define __smp_wmb() mb() ++ ++#include ++ ++#endif /* !__ASSEMBLY__ */ ++#endif /* __ASM_BARRIER_H */ +--- a/arch/parisc/kernel/entry.S ++++ b/arch/parisc/kernel/entry.S +@@ -482,6 +482,8 @@ + .macro tlb_unlock0 spc,tmp + #ifdef CONFIG_SMP + or,COND(=) %r0,\spc,%r0 ++ sync ++ or,COND(=) %r0,\spc,%r0 + stw \spc,0(\tmp) + #endif + .endm +--- a/arch/parisc/kernel/pacache.S ++++ b/arch/parisc/kernel/pacache.S +@@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local) + .macro tlb_unlock la,flags,tmp + #ifdef CONFIG_SMP + ldi 1,\tmp ++ sync + stw \tmp,0(\la) + mtsm \flags + #endif +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -631,6 +631,7 @@ cas_action: + sub,<> %r28, %r25, %r0 + 2: stw,ma %r24, 0(%r26) + /* Free lock */ ++ sync + stw,ma %r20, 0(%sr2,%r20) + #if ENABLE_LWS_DEBUG + /* Clear thread register indicator */ +@@ -645,6 +646,7 @@ cas_action: + 3: + /* Error occurred on load or store */ + /* Free lock */ ++ sync + stw %r20, 0(%sr2,%r20) + #if ENABLE_LWS_DEBUG + stw %r0, 4(%sr2,%r20) +@@ -846,6 +848,7 @@ cas2_action: + + cas2_end: + /* Free lock */ ++ sync + stw,ma %r20, 0(%sr2,%r20) + /* Enable interrupts */ + ssm PSW_SM_I, %r0 +@@ -856,6 +859,7 @@ cas2_end: + 22: + /* Error occurred on load or store */ + /* Free lock */ ++ sync + stw %r20, 0(%sr2,%r20) + ssm PSW_SM_I, %r0 + ldo 1(%r0),%r28 diff --git a/queue-4.4/parisc-enable-config_mlongcalls-by-default.patch b/queue-4.4/parisc-enable-config_mlongcalls-by-default.patch new file mode 100644 index 00000000000..55a8679b437 --- /dev/null +++ b/queue-4.4/parisc-enable-config_mlongcalls-by-default.patch @@ -0,0 +1,32 @@ +From 66509a276c8c1d19ee3f661a41b418d101c57d29 Mon Sep 17 00:00:00 2001 +From: Helge Deller +Date: Sat, 28 Jul 2018 11:47:17 +0200 +Subject: parisc: Enable CONFIG_MLONGCALLS by default + +From: Helge Deller + +commit 66509a276c8c1d19ee3f661a41b418d101c57d29 upstream. + +Enable the -mlong-calls compiler option by default, because otherwise in most +cases linking the vmlinux binary fails due to truncations of R_PARISC_PCREL22F +relocations. This fixes building the 64-bit defconfig. + +Cc: stable@vger.kernel.org # 4.0+ +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman + +--- + arch/parisc/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/parisc/Kconfig ++++ b/arch/parisc/Kconfig +@@ -177,7 +177,7 @@ config PREFETCH + + config MLONGCALLS + bool "Enable the -mlong-calls compiler option for big kernels" +- def_bool y if (!MODULES) ++ default y + depends on PA8X00 + help + If you configure the kernel to include many drivers built-in instead diff --git a/queue-4.4/tpm-fix-race-condition-in-tpm_common_write.patch b/queue-4.4/tpm-fix-race-condition-in-tpm_common_write.patch new file mode 100644 index 00000000000..eebe29d4704 --- /dev/null +++ b/queue-4.4/tpm-fix-race-condition-in-tpm_common_write.patch @@ -0,0 +1,139 @@ +From 3ab2011ea368ec3433ad49e1b9e1c7b70d2e65df Mon Sep 17 00:00:00 2001 +From: Tadeusz Struk +Date: Tue, 22 May 2018 14:37:18 -0700 +Subject: tpm: fix race condition in tpm_common_write() + +From: Tadeusz Struk + +commit 3ab2011ea368ec3433ad49e1b9e1c7b70d2e65df upstream. + +There is a race condition in tpm_common_write function allowing +two threads on the same /dev/tpm, or two different applications +on the same /dev/tpmrm to overwrite each other commands/responses. +Fixed this by taking the priv->buffer_mutex early in the function. + +Also converted the priv->data_pending from atomic to a regular size_t +type. There is no need for it to be atomic since it is only touched +under the protection of the priv->buffer_mutex. + +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Signed-off-by: Tadeusz Struk +Reviewed-by: Jarkko Sakkinen +Signed-off-by: Jarkko Sakkinen +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/char/tpm/tpm-dev.c | 43 ++++++++++++++++++++----------------------- + 1 file changed, 20 insertions(+), 23 deletions(-) + +--- a/drivers/char/tpm/tpm-dev.c ++++ b/drivers/char/tpm/tpm-dev.c +@@ -25,7 +25,7 @@ struct file_priv { + struct tpm_chip *chip; + + /* Data passed to and from the tpm via the read/write calls */ +- atomic_t data_pending; ++ size_t data_pending; + struct mutex buffer_mutex; + + struct timer_list user_read_timer; /* user needs to claim result */ +@@ -46,7 +46,7 @@ static void timeout_work(struct work_str + struct file_priv *priv = container_of(work, struct file_priv, work); + + mutex_lock(&priv->buffer_mutex); +- atomic_set(&priv->data_pending, 0); ++ priv->data_pending = 0; + memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); + mutex_unlock(&priv->buffer_mutex); + } +@@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, + } + + priv->chip = chip; +- atomic_set(&priv->data_pending, 0); + mutex_init(&priv->buffer_mutex); + setup_timer(&priv->user_read_timer, user_reader_timeout, + (unsigned long)priv); +@@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *fil + size_t size, loff_t *off) + { + struct file_priv *priv = file->private_data; +- ssize_t ret_size; ++ ssize_t ret_size = 0; + int rc; + + del_singleshot_timer_sync(&priv->user_read_timer); + flush_work(&priv->work); +- ret_size = atomic_read(&priv->data_pending); +- if (ret_size > 0) { /* relay data */ +- ssize_t orig_ret_size = ret_size; +- if (size < ret_size) +- ret_size = size; ++ mutex_lock(&priv->buffer_mutex); + +- mutex_lock(&priv->buffer_mutex); ++ if (priv->data_pending) { ++ ret_size = min_t(ssize_t, size, priv->data_pending); + rc = copy_to_user(buf, priv->data_buffer, ret_size); +- memset(priv->data_buffer, 0, orig_ret_size); ++ memset(priv->data_buffer, 0, priv->data_pending); + if (rc) + ret_size = -EFAULT; + +- mutex_unlock(&priv->buffer_mutex); ++ priv->data_pending = 0; + } + +- atomic_set(&priv->data_pending, 0); +- ++ mutex_unlock(&priv->buffer_mutex); + return ret_size; + } + +@@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *fi + size_t in_size = size; + ssize_t out_size; + +- /* cannot perform a write until the read has cleared +- either via tpm_read or a user_read_timer timeout. +- This also prevents splitted buffered writes from blocking here. +- */ +- if (atomic_read(&priv->data_pending) != 0) +- return -EBUSY; +- + if (in_size > TPM_BUFSIZE) + return -E2BIG; + + mutex_lock(&priv->buffer_mutex); + ++ /* Cannot perform a write until the read has cleared either via ++ * tpm_read or a user_read_timer timeout. This also prevents split ++ * buffered writes from blocking here. ++ */ ++ if (priv->data_pending != 0) { ++ mutex_unlock(&priv->buffer_mutex); ++ return -EBUSY; ++ } ++ + if (copy_from_user + (priv->data_buffer, (void __user *) buf, in_size)) { + mutex_unlock(&priv->buffer_mutex); +@@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *fi + return out_size; + } + +- atomic_set(&priv->data_pending, out_size); ++ priv->data_pending = out_size; + mutex_unlock(&priv->buffer_mutex); + + /* Set a timeout by which the reader must come claim the result */ +@@ -172,7 +169,7 @@ static int tpm_release(struct inode *ino + del_singleshot_timer_sync(&priv->user_read_timer); + flush_work(&priv->work); + file->private_data = NULL; +- atomic_set(&priv->data_pending, 0); ++ priv->data_pending = 0; + clear_bit(0, &priv->chip->is_open); + kfree(priv); + return 0;