]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Jan 2021 10:41:51 +0000 (11:41 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Jan 2021 10:41:51 +0000 (11:41 +0100)
added patches:
arm64-mm-use-single-quantity-to-represent-the-pa-to-va-translation.patch
dm-integrity-conditionally-disable-recalculate-feature.patch
fs-fix-lazytime-expiration-handling-in-__writeback_single_inode.patch
smb3.1.1-do-not-log-warning-message-if-server-doesn-t-populate-salt.patch
tools-factor-hostcc-hostld-hostar-definitions.patch
writeback-drop-i_dirty_time_expire.patch

queue-5.4/arm64-mm-use-single-quantity-to-represent-the-pa-to-va-translation.patch [new file with mode: 0644]
queue-5.4/dm-integrity-conditionally-disable-recalculate-feature.patch [new file with mode: 0644]
queue-5.4/fs-fix-lazytime-expiration-handling-in-__writeback_single_inode.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/smb3.1.1-do-not-log-warning-message-if-server-doesn-t-populate-salt.patch [new file with mode: 0644]
queue-5.4/tools-factor-hostcc-hostld-hostar-definitions.patch [new file with mode: 0644]
queue-5.4/writeback-drop-i_dirty_time_expire.patch [new file with mode: 0644]

diff --git a/queue-5.4/arm64-mm-use-single-quantity-to-represent-the-pa-to-va-translation.patch b/queue-5.4/arm64-mm-use-single-quantity-to-represent-the-pa-to-va-translation.patch
new file mode 100644 (file)
index 0000000..e68c2d2
--- /dev/null
@@ -0,0 +1,147 @@
+From 7bc1a0f9e1765830e945669c99c59c35cf9bca82 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Thu, 8 Oct 2020 17:35:59 +0200
+Subject: arm64: mm: use single quantity to represent the PA to VA translation
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 7bc1a0f9e1765830e945669c99c59c35cf9bca82 upstream.
+
+On arm64, the global variable memstart_addr represents the physical
+address of PAGE_OFFSET, and so physical to virtual translations or
+vice versa used to come down to simple additions or subtractions
+involving the values of PAGE_OFFSET and memstart_addr.
+
+When support for 52-bit virtual addressing was introduced, we had to
+deal with PAGE_OFFSET potentially being outside of the region that
+can be covered by the virtual range (as the 52-bit VA capable build
+needs to be able to run on systems that are only 48-bit VA capable),
+and for this reason, another translation was introduced, and recorded
+in the global variable physvirt_offset.
+
+However, if we go back to the original definition of memstart_addr,
+i.e., the physical address of PAGE_OFFSET, it turns out that there is
+no need for two separate translations: instead, we can simply subtract
+the size of the unaddressable VA space from memstart_addr to make the
+available physical memory appear in the 48-bit addressable VA region.
+
+This simplifies things, but also fixes a bug on KASLR builds, which
+may update memstart_addr later on in arm64_memblock_init(), but fails
+to update vmemmap and physvirt_offset accordingly.
+
+Fixes: 5383cc6efed1 ("arm64: mm: Introduce vabits_actual")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Steve Capper <steve.capper@arm.com>
+Link: https://lore.kernel.org/r/20201008153602.9467-2-ardb@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/memory.h  |    5 ++---
+ arch/arm64/include/asm/pgtable.h |    4 ++--
+ arch/arm64/mm/init.c             |   30 ++++++++++--------------------
+ 3 files changed, 14 insertions(+), 25 deletions(-)
+
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -178,7 +178,6 @@ extern u64                 vabits_actual;
+ #include <linux/bitops.h>
+ #include <linux/mmdebug.h>
+-extern s64                    physvirt_offset;
+ extern s64                    memstart_addr;
+ /* PHYS_OFFSET - the physical address of the start of memory. */
+ #define PHYS_OFFSET           ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
+@@ -254,7 +253,7 @@ static inline const void *__tag_set(cons
+  */
+ #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
+-#define __lm_to_phys(addr)    (((addr) + physvirt_offset))
++#define __lm_to_phys(addr)    (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+ #define __kimg_to_phys(addr)  ((addr) - kimage_voffset)
+ #define __virt_to_phys_nodebug(x) ({                                  \
+@@ -272,7 +271,7 @@ extern phys_addr_t __phys_addr_symbol(un
+ #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
+ #endif /* CONFIG_DEBUG_VIRTUAL */
+-#define __phys_to_virt(x)     ((unsigned long)((x) - physvirt_offset))
++#define __phys_to_virt(x)     ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+ #define __phys_to_kimg(x)     ((unsigned long)((x) + kimage_voffset))
+ /*
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -23,6 +23,8 @@
+ #define VMALLOC_START         (MODULES_END)
+ #define VMALLOC_END           (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
++#define vmemmap                       ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
++
+ #define FIRST_USER_ADDRESS    0UL
+ #ifndef __ASSEMBLY__
+@@ -33,8 +35,6 @@
+ #include <linux/mm_types.h>
+ #include <linux/sched.h>
+-extern struct page *vmemmap;
+-
+ extern void __pte_error(const char *file, int line, unsigned long val);
+ extern void __pmd_error(const char *file, int line, unsigned long val);
+ extern void __pud_error(const char *file, int line, unsigned long val);
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -50,12 +50,6 @@
+ s64 memstart_addr __ro_after_init = -1;
+ EXPORT_SYMBOL(memstart_addr);
+-s64 physvirt_offset __ro_after_init;
+-EXPORT_SYMBOL(physvirt_offset);
+-
+-struct page *vmemmap __ro_after_init;
+-EXPORT_SYMBOL(vmemmap);
+-
+ phys_addr_t arm64_dma_phys_limit __ro_after_init;
+ #ifdef CONFIG_KEXEC_CORE
+@@ -321,20 +315,6 @@ void __init arm64_memblock_init(void)
+       memstart_addr = round_down(memblock_start_of_DRAM(),
+                                  ARM64_MEMSTART_ALIGN);
+-      physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
+-
+-      vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
+-
+-      /*
+-       * If we are running with a 52-bit kernel VA config on a system that
+-       * does not support it, we have to offset our vmemmap and physvirt_offset
+-       * s.t. we avoid the 52-bit portion of the direct linear map
+-       */
+-      if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
+-              vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
+-              physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
+-      }
+-
+       /*
+        * Remove the memory that we will not be able to cover with the
+        * linear mapping. Take care not to clip the kernel which may be
+@@ -350,6 +330,16 @@ void __init arm64_memblock_init(void)
+       }
+       /*
++       * If we are running with a 52-bit kernel VA config on a system that
++       * does not support it, we have to place the available physical
++       * memory in the 48-bit addressable part of the linear region, i.e.,
++       * we have to move it upward. Since memstart_addr represents the
++       * physical address of PAGE_OFFSET, we have to *subtract* from it.
++       */
++      if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
++              memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
++
++      /*
+        * Apply the memory limit if it was set. Since the kernel may be loaded
+        * high up in memory, add back the kernel region that must be accessible
+        * via the linear mapping.
diff --git a/queue-5.4/dm-integrity-conditionally-disable-recalculate-feature.patch b/queue-5.4/dm-integrity-conditionally-disable-recalculate-feature.patch
new file mode 100644 (file)
index 0000000..5103cb6
--- /dev/null
@@ -0,0 +1,124 @@
+From 5c02406428d5219c367c5f53457698c58bc5f917 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 20 Jan 2021 13:59:11 -0500
+Subject: dm integrity: conditionally disable "recalculate" feature
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 5c02406428d5219c367c5f53457698c58bc5f917 upstream.
+
+Otherwise a malicious user could (ab)use the "recalculate" feature
+that makes dm-integrity calculate the checksums in the background
+while the device is already usable. When the system restarts before all
+checksums have been calculated, the calculation continues where it was
+interrupted even if the recalculate feature is not requested the next
+time the dm device is set up.
+
+Disable recalculating if we use internal_hash or journal_hash with a
+key (e.g. HMAC) and we don't have the "legacy_recalculate" flag.
+
+This may break activation of a volume, created by an older kernel,
+that is not yet fully recalculated -- if this happens, the user should
+add the "legacy_recalculate" flag to constructor parameters.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reported-by: Daniel Glockner <dg@emlix.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/admin-guide/device-mapper/dm-integrity.rst |    6 +++
+ drivers/md/dm-integrity.c                                |   24 ++++++++++++++-
+ 2 files changed, 29 insertions(+), 1 deletion(-)
+
+--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
++++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
+@@ -177,6 +177,12 @@ bitmap_flush_interval:number
+       The bitmap flush interval in milliseconds. The metadata buffers
+       are synchronized when this interval expires.
++legacy_recalculate
++      Allow recalculating of volumes with HMAC keys. This is disabled by
++      default for security reasons - an attacker could modify the volume,
++      set recalc_sector to zero, and the kernel would not detect the
++      modification.
++
+ The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
+ be changed when reloading the target (load an inactive table and swap the
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -254,6 +254,7 @@ struct dm_integrity_c {
+       bool journal_uptodate;
+       bool just_formatted;
+       bool recalculate_flag;
++      bool legacy_recalculate;
+       struct alg_spec internal_hash_alg;
+       struct alg_spec journal_crypt_alg;
+@@ -381,6 +382,14 @@ static int dm_integrity_failed(struct dm
+       return READ_ONCE(ic->failed);
+ }
++static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
++{
++      if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
++          !ic->legacy_recalculate)
++              return true;
++      return false;
++}
++
+ static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
+                                         unsigned j, unsigned char seq)
+ {
+@@ -2998,6 +3007,7 @@ static void dm_integrity_status(struct d
+               arg_count += !!ic->internal_hash_alg.alg_string;
+               arg_count += !!ic->journal_crypt_alg.alg_string;
+               arg_count += !!ic->journal_mac_alg.alg_string;
++              arg_count += ic->legacy_recalculate;
+               DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
+                      ic->tag_size, ic->mode, arg_count);
+               if (ic->meta_dev)
+@@ -3017,6 +3027,8 @@ static void dm_integrity_status(struct d
+                       DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
+                       DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
+               }
++              if (ic->legacy_recalculate)
++                      DMEMIT(" legacy_recalculate");
+ #define EMIT_ALG(a, n)                                                        \
+               do {                                                    \
+@@ -3625,7 +3637,7 @@ static int dm_integrity_ctr(struct dm_ta
+       unsigned extra_args;
+       struct dm_arg_set as;
+       static const struct dm_arg _args[] = {
+-              {0, 15, "Invalid number of feature args"},
++              {0, 14, "Invalid number of feature args"},
+       };
+       unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+       bool should_write_sb;
+@@ -3769,6 +3781,8 @@ static int dm_integrity_ctr(struct dm_ta
+                               goto bad;
+               } else if (!strcmp(opt_string, "recalculate")) {
+                       ic->recalculate_flag = true;
++              } else if (!strcmp(opt_string, "legacy_recalculate")) {
++                      ic->legacy_recalculate = true;
+               } else {
+                       r = -EINVAL;
+                       ti->error = "Invalid argument";
+@@ -4067,6 +4081,14 @@ try_smaller_buffer:
+               }
+       }
++      if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
++          le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
++          dm_integrity_disable_recalculate(ic)) {
++              ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
++              r = -EOPNOTSUPP;
++              goto bad;
++      }
++
+       ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
+                       1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
+       if (IS_ERR(ic->bufio)) {
diff --git a/queue-5.4/fs-fix-lazytime-expiration-handling-in-__writeback_single_inode.patch b/queue-5.4/fs-fix-lazytime-expiration-handling-in-__writeback_single_inode.patch
new file mode 100644 (file)
index 0000000..e96bc6c
--- /dev/null
@@ -0,0 +1,113 @@
+From 1e249cb5b7fc09ff216aa5a12f6c302e434e88f9 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 12 Jan 2021 11:02:43 -0800
+Subject: fs: fix lazytime expiration handling in __writeback_single_inode()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 1e249cb5b7fc09ff216aa5a12f6c302e434e88f9 upstream.
+
+When lazytime is enabled and an inode is being written due to its
+in-memory updated timestamps having expired, either due to a sync() or
+syncfs() system call or due to dirtytime_expire_interval having elapsed,
+the VFS needs to inform the filesystem so that the filesystem can copy
+the inode's timestamps out to the on-disk data structures.
+
+This is done by __writeback_single_inode() calling
+mark_inode_dirty_sync(), which then calls ->dirty_inode(I_DIRTY_SYNC).
+
+However, this occurs after __writeback_single_inode() has already
+cleared the dirty flags from ->i_state.  This causes two bugs:
+
+- mark_inode_dirty_sync() redirties the inode, causing it to remain
+  dirty.  This wastefully causes the inode to be written twice.  But
+  more importantly, it breaks cases where sync_filesystem() is expected
+  to clean dirty inodes.  This includes the FS_IOC_REMOVE_ENCRYPTION_KEY
+  ioctl (as reported at
+  https://lore.kernel.org/r/20200306004555.GB225345@gmail.com), as well
+  as possibly filesystem freezing (freeze_super()).
+
+- Since ->i_state doesn't contain I_DIRTY_TIME when ->dirty_inode() is
+  called from __writeback_single_inode() for lazytime expiration,
+  xfs_fs_dirty_inode() ignores the notification.  (XFS only cares about
+  lazytime expirations, and it assumes that i_state will contain
+  I_DIRTY_TIME during those.)  Therefore, lazy timestamps aren't
+  persisted by sync(), syncfs(), or dirtytime_expire_interval on XFS.
+
+Fix this by moving the call to mark_inode_dirty_sync() to earlier in
+__writeback_single_inode(), before the dirty flags are cleared from
+i_state.  This makes filesystems be properly notified of the timestamp
+expiration, and it avoids incorrectly redirtying the inode.
+
+This fixes xfstest generic/580 (which tests
+FS_IOC_REMOVE_ENCRYPTION_KEY) when run on ext4 or f2fs with lazytime
+enabled.  It also fixes the new lazytime xfstest I've proposed, which
+reproduces the above-mentioned XFS bug
+(https://lore.kernel.org/r/20210105005818.92978-1-ebiggers@kernel.org).
+
+Alternatively, we could call ->dirty_inode(I_DIRTY_SYNC) directly.  But
+due to the introduction of I_SYNC_QUEUED, mark_inode_dirty_sync() is the
+right thing to do because mark_inode_dirty_sync() now knows not to move
+the inode to a writeback list if it is currently queued for sync.
+
+Fixes: 0ae45f63d4ef ("vfs: add support for a lazytime mount option")
+Cc: stable@vger.kernel.org
+Depends-on: 5afced3bf281 ("writeback: Avoid skipping inode writeback")
+Link: https://lore.kernel.org/r/20210112190253.64307-2-ebiggers@kernel.org
+Suggested-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fs-writeback.c |   24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1474,21 +1474,25 @@ __writeback_single_inode(struct inode *i
+       }
+       /*
+-       * Some filesystems may redirty the inode during the writeback
+-       * due to delalloc, clear dirty metadata flags right before
+-       * write_inode()
++       * If the inode has dirty timestamps and we need to write them, call
++       * mark_inode_dirty_sync() to notify the filesystem about it and to
++       * change I_DIRTY_TIME into I_DIRTY_SYNC.
+        */
+-      spin_lock(&inode->i_lock);
+-
+-      dirty = inode->i_state & I_DIRTY;
+       if ((inode->i_state & I_DIRTY_TIME) &&
+-          ((dirty & I_DIRTY_INODE) ||
+-           wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
++          (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+            time_after(jiffies, inode->dirtied_time_when +
+                       dirtytime_expire_interval * HZ))) {
+-              dirty |= I_DIRTY_TIME;
+               trace_writeback_lazytime(inode);
++              mark_inode_dirty_sync(inode);
+       }
++
++      /*
++       * Some filesystems may redirty the inode during the writeback
++       * due to delalloc, clear dirty metadata flags right before
++       * write_inode()
++       */
++      spin_lock(&inode->i_lock);
++      dirty = inode->i_state & I_DIRTY;
+       inode->i_state &= ~dirty;
+       /*
+@@ -1509,8 +1513,6 @@ __writeback_single_inode(struct inode *i
+       spin_unlock(&inode->i_lock);
+-      if (dirty & I_DIRTY_TIME)
+-              mark_inode_dirty_sync(inode);
+       /* Don't write the inode if only I_DIRTY_PAGES was set */
+       if (dirty & ~I_DIRTY_PAGES) {
+               int err = write_inode(inode, wbc);
index 008360ee7cdc5be6a87b7c577fbf6cda899b0106..1f94b0ac2caf76c3d292ab127fb4cade11e81654 100644 (file)
@@ -10,3 +10,9 @@ futex_Handle_faults_correctly_for_PI_futexes.patch
 hid-wacom-correct-null-dereference-on-aes-pen-proximity.patch
 io_uring-fix-current-fs-handling-in-io_sq_wq_submit_work.patch
 tracing-fix-race-in-trace_open-and-buffer-resize-call.patch
+arm64-mm-use-single-quantity-to-represent-the-pa-to-va-translation.patch
+smb3.1.1-do-not-log-warning-message-if-server-doesn-t-populate-salt.patch
+tools-factor-hostcc-hostld-hostar-definitions.patch
+dm-integrity-conditionally-disable-recalculate-feature.patch
+writeback-drop-i_dirty_time_expire.patch
+fs-fix-lazytime-expiration-handling-in-__writeback_single_inode.patch
diff --git a/queue-5.4/smb3.1.1-do-not-log-warning-message-if-server-doesn-t-populate-salt.patch b/queue-5.4/smb3.1.1-do-not-log-warning-message-if-server-doesn-t-populate-salt.patch
new file mode 100644 (file)
index 0000000..942688d
--- /dev/null
@@ -0,0 +1,87 @@
+From 7955f105afb6034af344038d663bc98809483cdd Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Wed, 9 Dec 2020 22:19:00 -0600
+Subject: SMB3.1.1: do not log warning message if server doesn't populate salt
+
+From: Steve French <stfrench@microsoft.com>
+
+commit 7955f105afb6034af344038d663bc98809483cdd upstream.
+
+In the negotiate protocol preauth context, the server is not required
+to populate the salt (although it is done by most servers) so do
+not warn on mount.
+
+We retain the checks (warn) that the preauth context is the minimum
+size and that the salt does not exceed DataLength of the SMB response.
+Although we use the defaults in the case that the preauth context
+response is invalid, these checks may be useful in the future
+as servers add support for additional mechanisms.
+
+CC: Stable <stable@vger.kernel.org>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c |    7 +++++--
+ fs/cifs/smb2pdu.h |   14 +++++++++++---
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -490,8 +490,8 @@ build_preauth_ctxt(struct smb2_preauth_n
+       pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
+       pneg_ctxt->DataLength = cpu_to_le16(38);
+       pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
+-      pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
+-      get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
++      pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE);
++      get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE);
+       pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
+ }
+@@ -617,6 +617,9 @@ static void decode_preauth_context(struc
+       if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
+               printk_once(KERN_WARNING "server sent bad preauth context\n");
+               return;
++      } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
++              pr_warn_once("server sent invalid SaltLength\n");
++              return;
+       }
+       if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
+               printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -271,12 +271,20 @@ struct smb2_neg_context {
+       /* Followed by array of data */
+ } __packed;
+-#define SMB311_SALT_SIZE                      32
++#define SMB311_LINUX_CLIENT_SALT_SIZE                 32
+ /* Hash Algorithm Types */
+ #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
+ #define SMB2_PREAUTH_HASH_SIZE 64
+-#define MIN_PREAUTH_CTXT_DATA_LEN     (SMB311_SALT_SIZE + 6)
++/*
++ * SaltLength that the server send can be zero, so the only three required
++ * fields (all __le16) end up six bytes total, so the minimum context data len
++ * in the response is six bytes which accounts for
++ *
++ *      HashAlgorithmCount, SaltLength, and 1 HashAlgorithm.
++ */
++#define MIN_PREAUTH_CTXT_DATA_LEN 6
++
+ struct smb2_preauth_neg_context {
+       __le16  ContextType; /* 1 */
+       __le16  DataLength;
+@@ -284,7 +292,7 @@ struct smb2_preauth_neg_context {
+       __le16  HashAlgorithmCount; /* 1 */
+       __le16  SaltLength;
+       __le16  HashAlgorithms; /* HashAlgorithms[0] since only one defined */
+-      __u8    Salt[SMB311_SALT_SIZE];
++      __u8    Salt[SMB311_LINUX_CLIENT_SALT_SIZE];
+ } __packed;
+ /* Encryption Algorithms Ciphers */
diff --git a/queue-5.4/tools-factor-hostcc-hostld-hostar-definitions.patch b/queue-5.4/tools-factor-hostcc-hostld-hostar-definitions.patch
new file mode 100644 (file)
index 0000000..4fb2b62
--- /dev/null
@@ -0,0 +1,101 @@
+From c8a950d0d3b926a02c7b2e713850d38217cec3d1 Mon Sep 17 00:00:00 2001
+From: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Date: Tue, 10 Nov 2020 17:43:05 +0100
+Subject: tools: Factor HOSTCC, HOSTLD, HOSTAR definitions
+
+From: Jean-Philippe Brucker <jean-philippe@linaro.org>
+
+commit c8a950d0d3b926a02c7b2e713850d38217cec3d1 upstream.
+
+Several Makefiles in tools/ need to define the host toolchain variables.
+Move their definition to tools/scripts/Makefile.include
+
+Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Link: https://lore.kernel.org/bpf/20201110164310.2600671-2-jean-philippe@linaro.org
+Cc: Alistair Delva <adelva@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/build/Makefile             |    4 ----
+ tools/objtool/Makefile           |    9 ---------
+ tools/perf/Makefile.perf         |    4 ----
+ tools/power/acpi/Makefile.config |    1 -
+ tools/scripts/Makefile.include   |   10 ++++++++++
+ 5 files changed, 10 insertions(+), 18 deletions(-)
+
+--- a/tools/build/Makefile
++++ b/tools/build/Makefile
+@@ -15,10 +15,6 @@ endef
+ $(call allow-override,CC,$(CROSS_COMPILE)gcc)
+ $(call allow-override,LD,$(CROSS_COMPILE)ld)
+-HOSTCC ?= gcc
+-HOSTLD ?= ld
+-HOSTAR ?= ar
+-
+ export HOSTCC HOSTLD HOSTAR
+ ifeq ($(V),1)
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -3,15 +3,6 @@ include ../scripts/Makefile.include
+ include ../scripts/Makefile.arch
+ # always use the host compiler
+-ifneq ($(LLVM),)
+-HOSTAR        ?= llvm-ar
+-HOSTCC        ?= clang
+-HOSTLD        ?= ld.lld
+-else
+-HOSTAR        ?= ar
+-HOSTCC        ?= gcc
+-HOSTLD        ?= ld
+-endif
+ AR     = $(HOSTAR)
+ CC     = $(HOSTCC)
+ LD     = $(HOSTLD)
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -163,10 +163,6 @@ endef
+ LD += $(EXTRA_LDFLAGS)
+-HOSTCC  ?= gcc
+-HOSTLD  ?= ld
+-HOSTAR  ?= ar
+-
+ PKG_CONFIG = $(CROSS_COMPILE)pkg-config
+ LLVM_CONFIG ?= llvm-config
+--- a/tools/power/acpi/Makefile.config
++++ b/tools/power/acpi/Makefile.config
+@@ -54,7 +54,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+ CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
+ CROSS_COMPILE ?= $(CROSS)
+ LD = $(CC)
+-HOSTCC = gcc
+ # check if compiler option is supported
+ cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+--- a/tools/scripts/Makefile.include
++++ b/tools/scripts/Makefile.include
+@@ -59,6 +59,16 @@ $(call allow-override,LD,$(CROSS_COMPILE
+ $(call allow-override,CXX,$(CROSS_COMPILE)g++)
+ $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
++ifneq ($(LLVM),)
++HOSTAR  ?= llvm-ar
++HOSTCC  ?= clang
++HOSTLD  ?= ld.lld
++else
++HOSTAR  ?= ar
++HOSTCC  ?= gcc
++HOSTLD  ?= ld
++endif
++
+ ifeq ($(CC_NO_CLANG), 1)
+ EXTRA_WARNINGS += -Wstrict-aliasing=3
+ endif
diff --git a/queue-5.4/writeback-drop-i_dirty_time_expire.patch b/queue-5.4/writeback-drop-i_dirty_time_expire.patch
new file mode 100644 (file)
index 0000000..3febda4
--- /dev/null
@@ -0,0 +1,135 @@
+From 5fcd57505c002efc5823a7355e21f48dd02d5a51 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 29 May 2020 16:24:43 +0200
+Subject: writeback: Drop I_DIRTY_TIME_EXPIRE
+
+From: Jan Kara <jack@suse.cz>
+
+commit 5fcd57505c002efc5823a7355e21f48dd02d5a51 upstream.
+
+The only use of I_DIRTY_TIME_EXPIRE is to detect in
+__writeback_single_inode() that inode got there because flush worker
+decided it's time to writeback the dirty inode time stamps (either
+because we are syncing or because of age). However we can detect this
+directly in __writeback_single_inode() and there's no need for the
+strange propagation with I_DIRTY_TIME_EXPIRE flag.
+
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Cc: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c                  |    2 +-
+ fs/fs-writeback.c                |   28 +++++++++++-----------------
+ fs/xfs/libxfs/xfs_trans_inode.c  |    4 ++--
+ include/linux/fs.h               |    1 -
+ include/trace/events/writeback.h |    1 -
+ 5 files changed, 14 insertions(+), 22 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5209,7 +5209,7 @@ static int other_inode_match(struct inod
+           (inode->i_state & I_DIRTY_TIME)) {
+               struct ext4_inode_info  *ei = EXT4_I(inode);
+-              inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
++              inode->i_state &= ~I_DIRTY_TIME;
+               spin_unlock(&inode->i_lock);
+               spin_lock(&ei->i_raw_lock);
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1238,7 +1238,7 @@ static bool inode_dirtied_after(struct i
+  */
+ static int move_expired_inodes(struct list_head *delaying_queue,
+                              struct list_head *dispatch_queue,
+-                             int flags, unsigned long dirtied_before)
++                             unsigned long dirtied_before)
+ {
+       LIST_HEAD(tmp);
+       struct list_head *pos, *node;
+@@ -1254,8 +1254,6 @@ static int move_expired_inodes(struct li
+               list_move(&inode->i_io_list, &tmp);
+               moved++;
+               spin_lock(&inode->i_lock);
+-              if (flags & EXPIRE_DIRTY_ATIME)
+-                      inode->i_state |= I_DIRTY_TIME_EXPIRED;
+               inode->i_state |= I_SYNC_QUEUED;
+               spin_unlock(&inode->i_lock);
+               if (sb_is_blkdev_sb(inode->i_sb))
+@@ -1303,11 +1301,11 @@ static void queue_io(struct bdi_writebac
+       assert_spin_locked(&wb->list_lock);
+       list_splice_init(&wb->b_more_io, &wb->b_io);
+-      moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before);
++      moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
+       if (!work->for_sync)
+               time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
+       moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
+-                                   EXPIRE_DIRTY_ATIME, time_expire_jif);
++                                   time_expire_jif);
+       if (moved)
+               wb_io_lists_populated(wb);
+       trace_writeback_queue_io(wb, work, dirtied_before, moved);
+@@ -1483,18 +1481,14 @@ __writeback_single_inode(struct inode *i
+       spin_lock(&inode->i_lock);
+       dirty = inode->i_state & I_DIRTY;
+-      if (inode->i_state & I_DIRTY_TIME) {
+-              if ((dirty & I_DIRTY_INODE) ||
+-                  wbc->sync_mode == WB_SYNC_ALL ||
+-                  unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
+-                  unlikely(time_after(jiffies,
+-                                      (inode->dirtied_time_when +
+-                                       dirtytime_expire_interval * HZ)))) {
+-                      dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
+-                      trace_writeback_lazytime(inode);
+-              }
+-      } else
+-              inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
++      if ((inode->i_state & I_DIRTY_TIME) &&
++          ((dirty & I_DIRTY_INODE) ||
++           wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
++           time_after(jiffies, inode->dirtied_time_when +
++                      dirtytime_expire_interval * HZ))) {
++              dirty |= I_DIRTY_TIME;
++              trace_writeback_lazytime(inode);
++      }
+       inode->i_state &= ~dirty;
+       /*
+--- a/fs/xfs/libxfs/xfs_trans_inode.c
++++ b/fs/xfs/libxfs/xfs_trans_inode.c
+@@ -100,9 +100,9 @@ xfs_trans_log_inode(
+        * to log the timestamps, or will clear already cleared fields in the
+        * worst case.
+        */
+-      if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) {
++      if (inode->i_state & I_DIRTY_TIME) {
+               spin_lock(&inode->i_lock);
+-              inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
++              inode->i_state &= ~I_DIRTY_TIME;
+               spin_unlock(&inode->i_lock);
+       }
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2161,7 +2161,6 @@ static inline void init_sync_kiocb(struc
+ #define I_DIO_WAKEUP          (1 << __I_DIO_WAKEUP)
+ #define I_LINKABLE            (1 << 10)
+ #define I_DIRTY_TIME          (1 << 11)
+-#define I_DIRTY_TIME_EXPIRED  (1 << 12)
+ #define I_WB_SWITCH           (1 << 13)
+ #define I_OVL_INUSE           (1 << 14)
+ #define I_CREATING            (1 << 15)
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -20,7 +20,6 @@
+               {I_CLEAR,               "I_CLEAR"},             \
+               {I_SYNC,                "I_SYNC"},              \
+               {I_DIRTY_TIME,          "I_DIRTY_TIME"},        \
+-              {I_DIRTY_TIME_EXPIRED,  "I_DIRTY_TIME_EXPIRED"}, \
+               {I_REFERENCED,          "I_REFERENCED"}         \
+       )