--- /dev/null
+From 0c4395fb2aa77341269ea619c5419ea48171883f Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Tue, 14 Apr 2020 10:01:31 +0200
+Subject: evm: Fix possible memory leak in evm_calc_hmac_or_hash()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 0c4395fb2aa77341269ea619c5419ea48171883f upstream.
+
+Don't immediately return if the signature is portable and security.ima is
+not present. Just set error so that memory allocated is freed before
+returning from evm_calc_hmac_or_hash().
+
+Fixes: 50b977481fce9 ("EVM: Add support for portable signature format")
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/evm/evm_crypto.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -243,7 +243,7 @@ static int evm_calc_hmac_or_hash(struct
+
+ /* Portable EVM signatures must include an IMA hash */
+ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
+- return -EPERM;
++ error = -EPERM;
+ out:
+ kfree(xattr_value);
+ kfree(desc);
--- /dev/null
+From 8418897f1bf87da0cb6936489d57a4320c32c0af Mon Sep 17 00:00:00 2001
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+Date: Thu, 23 Apr 2020 15:46:44 +0800
+Subject: ext4: fix error pointer dereference
+
+From: Jeffle Xu <jefflexu@linux.alibaba.com>
+
+commit 8418897f1bf87da0cb6936489d57a4320c32c0af upstream.
+
+Don't pass error pointers to brelse().
+
+commit 7159a986b420 ("ext4: fix some error pointer dereferences") has fixed
+some cases, fix the remaining one case.
+
+Once ext4_xattr_block_find()->ext4_sb_bread() failed, error pointer is
+stored in @bs->bh, which will be passed to brelse() in the cleanup
+routine of ext4_xattr_set_handle(). This will then cause a NULL panic
+crash in __brelse().
+
+BUG: unable to handle kernel NULL pointer dereference at 000000000000005b
+RIP: 0010:__brelse+0x1b/0x50
+Call Trace:
+ ext4_xattr_set_handle+0x163/0x5d0
+ ext4_xattr_set+0x95/0x110
+ __vfs_setxattr+0x6b/0x80
+ __vfs_setxattr_noperm+0x68/0x1b0
+ vfs_setxattr+0xa0/0xb0
+ setxattr+0x12c/0x1a0
+ path_setxattr+0x8d/0xc0
+ __x64_sys_setxattr+0x27/0x30
+ do_syscall_64+0x60/0x250
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+In this case, @bs->bh stores '-EIO' actually.
+
+Fixes: fb265c9cb49e ("ext4: add ext4_sb_bread() to disambiguate ENOMEM cases")
+Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: stable@kernel.org # 2.6.19
+Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/1587628004-95123-1-git-send-email-jefflexu@linux.alibaba.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/xattr.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1820,8 +1820,11 @@ ext4_xattr_block_find(struct inode *inod
+ if (EXT4_I(inode)->i_file_acl) {
+ /* The inode already has an extended attribute block. */
+ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+- if (IS_ERR(bs->bh))
+- return PTR_ERR(bs->bh);
++ if (IS_ERR(bs->bh)) {
++ error = PTR_ERR(bs->bh);
++ bs->bh = NULL;
++ return error;
++ }
+ ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
+ atomic_read(&(bs->bh->b_count)),
+ le32_to_cpu(BHDR(bs->bh)->h_refcount));
--- /dev/null
+From c36a71b4e35ab35340facdd6964a00956b9fef0a Mon Sep 17 00:00:00 2001
+From: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+Date: Mon, 20 Apr 2020 19:39:59 -0700
+Subject: ext4: fix EXT_MAX_EXTENT/INDEX to check for zeroed eh_max
+
+From: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+
+commit c36a71b4e35ab35340facdd6964a00956b9fef0a upstream.
+
+If eh->eh_max is 0, EXT_MAX_EXTENT/INDEX would evaluate to unsigned
+(-1) resulting in illegal memory accesses. Although there is no
+consistent repro, we see that generic/019 sometimes crashes because of
+this bug.
+
+Ran gce-xfstests smoke and verified that there were no regressions.
+
+Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+Link: https://lore.kernel.org/r/20200421023959.20879-2-harshadshirwadkar@gmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ext4_extents.h | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/ext4_extents.h
++++ b/fs/ext4/ext4_extents.h
+@@ -170,10 +170,13 @@ struct partial_cluster {
+ (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
+ #define EXT_LAST_INDEX(__hdr__) \
+ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
+-#define EXT_MAX_EXTENT(__hdr__) \
+- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
++#define EXT_MAX_EXTENT(__hdr__) \
++ ((le16_to_cpu((__hdr__)->eh_max)) ? \
++ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \
++ : 0)
+ #define EXT_MAX_INDEX(__hdr__) \
+- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
++ ((le16_to_cpu((__hdr__)->eh_max)) ? \
++ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0)
+
+ static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode)
+ {
--- /dev/null
+From 08adf452e628b0e2ce9a01048cfbec52353703d7 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 6 May 2020 11:31:40 -0700
+Subject: ext4: fix race between ext4_sync_parent() and rename()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 08adf452e628b0e2ce9a01048cfbec52353703d7 upstream.
+
+'igrab(d_inode(dentry->d_parent))' without holding dentry->d_lock is
+broken because without d_lock, d_parent can be concurrently changed due
+to a rename(). Then if the old directory is immediately deleted, old
+d_parent->inode can be NULL. That causes a NULL dereference in igrab().
+
+To fix this, use dget_parent() to safely grab a reference to the parent
+dentry, which pins the inode. This also eliminates the need to use
+d_find_any_alias() other than for the initial inode, as we no longer
+throw away the dentry at each step.
+
+This is an extremely hard race to hit, but it is possible. Adding a
+udelay() in between the reads of ->d_parent and its ->d_inode makes it
+reproducible on a no-journal filesystem using the following program:
+
+ #include <fcntl.h>
+ #include <unistd.h>
+
+ int main()
+ {
+ if (fork()) {
+ for (;;) {
+ mkdir("dir1", 0700);
+ int fd = open("dir1/file", O_RDWR|O_CREAT|O_SYNC);
+ write(fd, "X", 1);
+ close(fd);
+ }
+ } else {
+ mkdir("dir2", 0700);
+ for (;;) {
+ rename("dir1/file", "dir2/file");
+ rmdir("dir1");
+ }
+ }
+ }
+
+Fixes: d59729f4e794 ("ext4: fix races in ext4_sync_parent()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Link: https://lore.kernel.org/r/20200506183140.541194-1-ebiggers@kernel.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/fsync.c | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+--- a/fs/ext4/fsync.c
++++ b/fs/ext4/fsync.c
+@@ -44,30 +44,28 @@
+ */
+ static int ext4_sync_parent(struct inode *inode)
+ {
+- struct dentry *dentry = NULL;
+- struct inode *next;
++ struct dentry *dentry, *next;
+ int ret = 0;
+
+ if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
+ return 0;
+- inode = igrab(inode);
++ dentry = d_find_any_alias(inode);
++ if (!dentry)
++ return 0;
+ while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
+ ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
+- dentry = d_find_any_alias(inode);
+- if (!dentry)
+- break;
+- next = igrab(d_inode(dentry->d_parent));
++
++ next = dget_parent(dentry);
+ dput(dentry);
+- if (!next)
+- break;
+- iput(inode);
+- inode = next;
++ dentry = next;
++ inode = dentry->d_inode;
++
+ /*
+ * The directory inode may have gone through rmdir by now. But
+ * the inode itself and its blocks are still allocated (we hold
+- * a reference to the inode so it didn't go through
+- * ext4_evict_inode()) and so we are safe to flush metadata
+- * blocks and the inode.
++ * a reference to the inode via its dentry), so it didn't go
++ * through ext4_evict_inode()) and so we are safe to flush
++ * metadata blocks and the inode.
+ */
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (ret)
+@@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode
+ if (ret)
+ break;
+ }
+- iput(inode);
++ dput(dentry);
+ return ret;
+ }
+
--- /dev/null
+From 6cc7c266e5b47d3cd2b5bb7fd3aac4e6bb2dd1d2 Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Wed, 3 Jun 2020 17:08:21 +0200
+Subject: ima: Call ima_calc_boot_aggregate() in ima_eventdigest_init()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 6cc7c266e5b47d3cd2b5bb7fd3aac4e6bb2dd1d2 upstream.
+
+If the template field 'd' is chosen and the digest to be added to the
+measurement entry was not calculated with SHA1 or MD5, it is
+recalculated with SHA1, by using the passed file descriptor. However, this
+cannot be done for boot_aggregate, because there is no file descriptor.
+
+This patch adds a call to ima_calc_boot_aggregate() in
+ima_eventdigest_init(), so that the digest can be recalculated also for the
+boot_aggregate entry.
+
+Cc: stable@vger.kernel.org # 3.13.x
+Fixes: 3ce1217d6cd5d ("ima: define template fields library and new helpers")
+Reported-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima.h | 3 ++-
+ security/integrity/ima/ima_crypto.c | 6 +++---
+ security/integrity/ima/ima_init.c | 2 +-
+ security/integrity/ima/ima_template_lib.c | 18 ++++++++++++++++++
+ 4 files changed, 24 insertions(+), 5 deletions(-)
+
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -52,6 +52,7 @@ extern int ima_policy_flag;
+ extern int ima_hash_algo;
+ extern int ima_appraise;
+ extern struct tpm_chip *ima_tpm_chip;
++extern const char boot_aggregate_name[];
+
+ /* IMA event related data */
+ struct ima_event_data {
+@@ -140,7 +141,7 @@ int ima_calc_buffer_hash(const void *buf
+ int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash);
+-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
++int ima_calc_boot_aggregate(struct ima_digest_data *hash);
+ void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct integrity_iint_cache *iint,
+ const char *op, const char *cause);
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -665,8 +665,8 @@ static void __init ima_pcrread(u32 idx,
+ * hash algorithm for reading the TPM PCRs as for calculating the boot
+ * aggregate digest as stored in the measurement list.
+ */
+-static int __init ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+- struct crypto_shash *tfm)
++static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
++ struct crypto_shash *tfm)
+ {
+ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
+ int rc;
+@@ -694,7 +694,7 @@ static int __init ima_calc_boot_aggregat
+ return rc;
+ }
+
+-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
++int ima_calc_boot_aggregate(struct ima_digest_data *hash)
+ {
+ struct crypto_shash *tfm;
+ u16 crypto_id, alg_id;
+--- a/security/integrity/ima/ima_init.c
++++ b/security/integrity/ima/ima_init.c
+@@ -21,7 +21,7 @@
+ #include "ima.h"
+
+ /* name for boot aggregate entry */
+-static const char boot_aggregate_name[] = "boot_aggregate";
++const char boot_aggregate_name[] = "boot_aggregate";
+ struct tpm_chip *ima_tpm_chip;
+
+ /* Add the boot aggregate to the IMA measurement list and extend
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -288,6 +288,24 @@ int ima_eventdigest_init(struct ima_even
+ goto out;
+ }
+
++ if ((const char *)event_data->filename == boot_aggregate_name) {
++ if (ima_tpm_chip) {
++ hash.hdr.algo = HASH_ALGO_SHA1;
++ result = ima_calc_boot_aggregate(&hash.hdr);
++
++ /* algo can change depending on available PCR banks */
++ if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
++ result = -EINVAL;
++
++ if (result < 0)
++ memset(&hash, 0, sizeof(hash));
++ }
++
++ cur_digest = hash.hdr.digest;
++ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
++ goto out;
++ }
++
+ if (!event_data->file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
--- /dev/null
+From 067a436b1b0aafa593344fddd711a755a58afb3b Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Wed, 3 Jun 2020 17:08:20 +0200
+Subject: ima: Directly assign the ima_default_policy pointer to ima_rules
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 067a436b1b0aafa593344fddd711a755a58afb3b upstream.
+
+This patch prevents the following oops:
+
+[ 10.771813] BUG: kernel NULL pointer dereference, address: 0000000000000
+[...]
+[ 10.779790] RIP: 0010:ima_match_policy+0xf7/0xb80
+[...]
+[ 10.798576] Call Trace:
+[ 10.798993] ? ima_lsm_policy_change+0x2b0/0x2b0
+[ 10.799753] ? inode_init_owner+0x1a0/0x1a0
+[ 10.800484] ? _raw_spin_lock+0x7a/0xd0
+[ 10.801592] ima_must_appraise.part.0+0xb6/0xf0
+[ 10.802313] ? ima_fix_xattr.isra.0+0xd0/0xd0
+[ 10.803167] ima_must_appraise+0x4f/0x70
+[ 10.804004] ima_post_path_mknod+0x2e/0x80
+[ 10.804800] do_mknodat+0x396/0x3c0
+
+It occurs when there is a failure during IMA initialization, and
+ima_init_policy() is not called. IMA hooks still call ima_match_policy()
+but ima_rules is NULL. This patch prevents the crash by directly assigning
+the ima_default_policy pointer to ima_rules when ima_rules is defined. This
+wouldn't alter the existing behavior, as ima_rules is always set at the end
+of ima_init_policy().
+
+Cc: stable@vger.kernel.org # 3.7.x
+Fixes: 07f6a79415d7d ("ima: add appraise action keywords and default rules")
+Reported-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_policy.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -204,7 +204,7 @@ static struct ima_rule_entry *arch_polic
+ static LIST_HEAD(ima_default_rules);
+ static LIST_HEAD(ima_policy_rules);
+ static LIST_HEAD(ima_temp_rules);
+-static struct list_head *ima_rules;
++static struct list_head *ima_rules = &ima_default_rules;
+
+ static int ima_policy __initdata;
+
+@@ -712,7 +712,6 @@ void __init ima_init_policy(void)
+ ARRAY_SIZE(default_appraise_rules),
+ IMA_DEFAULT_POLICY);
+
+- ima_rules = &ima_default_rules;
+ ima_update_policy_flag();
+ }
+
--- /dev/null
+From e144d6b265415ddbdc54b3f17f4f95133effa5a8 Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Wed, 25 Mar 2020 11:47:07 +0100
+Subject: ima: Evaluate error in init_ima()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit e144d6b265415ddbdc54b3f17f4f95133effa5a8 upstream.
+
+Evaluate error in init_ima() before register_blocking_lsm_notifier() and
+return if not zero.
+
+Cc: stable@vger.kernel.org # 5.3.x
+Fixes: b16942455193 ("ima: use the lsm policy update notifier")
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Reviewed-by: James Morris <jamorris@linux.microsoft.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -712,6 +712,9 @@ static int __init init_ima(void)
+ error = ima_init();
+ }
+
++ if (error)
++ return error;
++
+ error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
+ if (error)
+ pr_warn("Couldn't register LSM notifier, error %d\n", error);
--- /dev/null
+From 1129d31b55d509f15e72dc68e4b5c3a4d7b4da8d Mon Sep 17 00:00:00 2001
+From: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
+Date: Tue, 28 Apr 2020 09:30:10 +0200
+Subject: ima: Fix ima digest hash table key calculation
+
+From: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
+
+commit 1129d31b55d509f15e72dc68e4b5c3a4d7b4da8d upstream.
+
+Function hash_long() accepts unsigned long, while currently only one byte
+is passed from ima_hash_key(), which calculates a key for ima_htable.
+
+Given that hashing the digest does not give clear benefits compared to
+using the digest itself, remove hash_long() and return the modulus
+calculated on the first two bytes of the digest with the number of slots.
+Also reduce the depth of the hash table by doubling the number of slots.
+
+Cc: stable@vger.kernel.org
+Fixes: 3323eec921ef ("integrity: IMA as an integrity service provider")
+Co-developed-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
+Acked-by: David.Laight@aculab.com (big endian system concerns)
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima.h | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 =
+ #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
+ #define IMA_EVENT_NAME_LEN_MAX 255
+
+-#define IMA_HASH_BITS 9
++#define IMA_HASH_BITS 10
+ #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+
+ #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+@@ -175,9 +175,10 @@ struct ima_h_table {
+ };
+ extern struct ima_h_table ima_htable;
+
+-static inline unsigned long ima_hash_key(u8 *digest)
++static inline unsigned int ima_hash_key(u8 *digest)
+ {
+- return hash_long(*digest, IMA_HASH_BITS);
++ /* there is no point in taking a hash of part of a digest */
++ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
+ }
+
+ #define __ima_hooks(hook) \
--- /dev/null
+From 8b8c704d913b0fe490af370631a4200e26334ec0 Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Sun, 7 Jun 2020 23:00:29 +0200
+Subject: ima: Remove __init annotation from ima_pcrread()
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 8b8c704d913b0fe490af370631a4200e26334ec0 upstream.
+
+Commit 6cc7c266e5b4 ("ima: Call ima_calc_boot_aggregate() in
+ima_eventdigest_init()") added a call to ima_calc_boot_aggregate() so that
+the digest can be recalculated for the boot_aggregate measurement entry if
+the 'd' template field has been requested. For the 'd' field, only SHA1 and
+MD5 digests are accepted.
+
+Given that ima_eventdigest_init() does not have the __init annotation, all
+functions called should not have it. This patch removes __init from
+ima_pcrread().
+
+Cc: stable@vger.kernel.org
+Fixes: 6cc7c266e5b4 ("ima: Call ima_calc_boot_aggregate() in ima_eventdigest_init()")
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_crypto.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -645,7 +645,7 @@ int ima_calc_buffer_hash(const void *buf
+ return calc_buffer_shash(buf, len, hash);
+ }
+
+-static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
++static void ima_pcrread(u32 idx, struct tpm_digest *d)
+ {
+ if (!ima_tpm_chip)
+ return;
--- /dev/null
+From 6f1a1d103b48b1533a9c804e7a069e2c8e937ce7 Mon Sep 17 00:00:00 2001
+From: Roberto Sassu <roberto.sassu@huawei.com>
+Date: Wed, 25 Mar 2020 11:47:06 +0100
+Subject: ima: Switch to ima_hash_algo for boot aggregate
+
+From: Roberto Sassu <roberto.sassu@huawei.com>
+
+commit 6f1a1d103b48b1533a9c804e7a069e2c8e937ce7 upstream.
+
+boot_aggregate is the first entry of IMA measurement list. Its purpose is
+to link pre-boot measurements to IMA measurements. As IMA was designed to
+work with a TPM 1.2, the SHA1 PCR bank was always selected even if a
+TPM 2.0 with support for stronger hash algorithms is available.
+
+This patch first tries to find a PCR bank with the IMA default hash
+algorithm. If it does not find it, it selects the SHA256 PCR bank for
+TPM 2.0 and SHA1 for TPM 1.2. Ultimately, it selects SHA1 also for TPM 2.0
+if the SHA256 PCR bank is not found.
+
+If none of the PCR banks above can be found, boot_aggregate file digest is
+filled with zeros, as for TPM bypass, making it impossible to perform a
+remote attestation of the system.
+
+Cc: stable@vger.kernel.org # 5.1.x
+Fixes: 879b589210a9 ("tpm: retrieve digest size of unknown algorithms with PCR read")
+Reported-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Suggested-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/integrity/ima/ima_crypto.c | 47 +++++++++++++++++++++++++++++++-----
+ security/integrity/ima/ima_init.c | 20 ++++++++++++---
+ 2 files changed, 57 insertions(+), 10 deletions(-)
+
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -655,18 +655,29 @@ static void __init ima_pcrread(u32 idx,
+ }
+
+ /*
+- * Calculate the boot aggregate hash
++ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
++ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
++ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
++ * allowing firmware to configure and enable different banks.
++ *
++ * Knowing which TPM bank is read to calculate the boot_aggregate digest
++ * needs to be conveyed to a verifier. For this reason, use the same
++ * hash algorithm for reading the TPM PCRs as for calculating the boot
++ * aggregate digest as stored in the measurement list.
+ */
+-static int __init ima_calc_boot_aggregate_tfm(char *digest,
++static int __init ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ struct crypto_shash *tfm)
+ {
+- struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
++ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
+ int rc;
+ u32 i;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
++ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
++ d.alg_id);
++
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+@@ -675,7 +686,8 @@ static int __init ima_calc_boot_aggregat
+ for (i = TPM_PCR0; i < TPM_PCR8; i++) {
+ ima_pcrread(i, &d);
+ /* now accumulate with current aggregate */
+- rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
++ rc = crypto_shash_update(shash, d.digest,
++ crypto_shash_digestsize(tfm));
+ }
+ if (!rc)
+ crypto_shash_final(shash, digest);
+@@ -685,14 +697,37 @@ static int __init ima_calc_boot_aggregat
+ int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+ {
+ struct crypto_shash *tfm;
+- int rc;
++ u16 crypto_id, alg_id;
++ int rc, i, bank_idx = -1;
++
++ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
++ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
++ if (crypto_id == hash->algo) {
++ bank_idx = i;
++ break;
++ }
++
++ if (crypto_id == HASH_ALGO_SHA256)
++ bank_idx = i;
++
++ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
++ bank_idx = i;
++ }
++
++ if (bank_idx == -1) {
++ pr_err("No suitable TPM algorithm for boot aggregate\n");
++ return 0;
++ }
++
++ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+- rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
++ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
++ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
+
+ ima_free_tfm(tfm);
+
+--- a/security/integrity/ima/ima_init.c
++++ b/security/integrity/ima/ima_init.c
+@@ -27,7 +27,7 @@ struct tpm_chip *ima_tpm_chip;
+ /* Add the boot aggregate to the IMA measurement list and extend
+ * the PCR register.
+ *
+- * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
++ * Calculate the boot aggregate, a hash over tpm registers 0-7,
+ * assuming a TPM chip exists, and zeroes if the TPM chip does not
+ * exist. Add the boot aggregate measurement to the measurement
+ * list and extend the PCR register.
+@@ -51,15 +51,27 @@ static int __init ima_add_boot_aggregate
+ int violation = 0;
+ struct {
+ struct ima_digest_data hdr;
+- char digest[TPM_DIGEST_SIZE];
++ char digest[TPM_MAX_DIGEST_SIZE];
+ } hash;
+
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+- iint->ima_hash->algo = HASH_ALGO_SHA1;
+- iint->ima_hash->length = SHA1_DIGEST_SIZE;
++ iint->ima_hash->algo = ima_hash_algo;
++ iint->ima_hash->length = hash_digest_size[ima_hash_algo];
+
++ /*
++ * With TPM 2.0 hash agility, TPM chips could support multiple TPM
++ * PCR banks, allowing firmware to configure and enable different
++ * banks. The SHA1 bank is not necessarily enabled.
++ *
++ * Use the same hash algorithm for reading the TPM PCRs as for
++ * calculating the boot aggregate digest. Preference is given to
++ * the configured IMA default hash algorithm. Otherwise, use the
++ * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2.
++ * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank
++ * is not found.
++ */
+ if (ima_tpm_chip) {
+ result = ima_calc_boot_aggregate(&hash.hdr);
+ if (result < 0) {
--- /dev/null
+From a202bf71f08b3ef15356db30535e30b03cf23aec Mon Sep 17 00:00:00 2001
+From: Lichao Liu <liulichao@loongson.cn>
+Date: Thu, 28 May 2020 09:10:31 +0800
+Subject: MIPS: CPU_LOONGSON2EF need software to maintain cache consistency
+
+From: Lichao Liu <liulichao@loongson.cn>
+
+commit a202bf71f08b3ef15356db30535e30b03cf23aec upstream.
+
+CPU_LOONGSON2EF need software to maintain cache consistency,
+so modify the 'cpu_needs_post_dma_flush' function to return true
+when the cpu type is CPU_LOONGSON2EF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Lichao Liu <liulichao@loongson.cn>
+Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/dma-noncoherent.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/mm/dma-noncoherent.c
++++ b/arch/mips/mm/dma-noncoherent.c
+@@ -33,6 +33,7 @@ static inline bool cpu_needs_post_dma_fl
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_BMIPS5000:
++ case CPU_LOONGSON2EF:
+ return true;
+ default:
+ /*
--- /dev/null
+From da97f2d56bbd880b4138916a7ef96f9881a551b2 Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+Date: Wed, 3 Jun 2020 15:59:27 -0700
+Subject: mm: call cond_resched() from deferred_init_memmap()
+
+From: Pavel Tatashin <pasha.tatashin@soleen.com>
+
+commit da97f2d56bbd880b4138916a7ef96f9881a551b2 upstream.
+
+Now that deferred pages are initialized with interrupts enabled we can
+replace touch_nmi_watchdog() with cond_resched(), as it was before
+3a2d7fa8a3d5.
+
+For now, we cannot do the same in deferred_grow_zone() as it is still
+initializes pages with interrupts disabled.
+
+This change fixes RCU problem described in
+https://lkml.kernel.org/r/20200401104156.11564-2-david@redhat.com
+
+[ 60.474005] rcu: INFO: rcu_sched detected stalls on CPUs/tasks:
+[ 60.475000] rcu: 1-...0: (0 ticks this GP) idle=02a/1/0x4000000000000000 softirq=1/1 fqs=15000
+[ 60.475000] rcu: (detected by 0, t=60002 jiffies, g=-1199, q=1)
+[ 60.475000] Sending NMI from CPU 0 to CPUs 1:
+[ 1.760091] NMI backtrace for cpu 1
+[ 1.760091] CPU: 1 PID: 20 Comm: pgdatinit0 Not tainted 4.18.0-147.9.1.el8_1.x86_64 #1
+[ 1.760091] Hardware name: Red Hat KVM, BIOS 1.13.0-1.module+el8.2.0+5520+4e5817f3 04/01/2014
+[ 1.760091] RIP: 0010:__init_single_page.isra.65+0x10/0x4f
+[ 1.760091] Code: 48 83 cf 63 48 89 f8 0f 1f 40 00 48 89 c6 48 89 d7 e8 6b 18 80 ff 66 90 5b c3 31 c0 b9 10 00 00 00 49 89 f8 48 c1 e6 33 f3 ab <b8> 07 00 00 00 48 c1 e2 36 41 c7 40 34 01 00 00 00 48 c1 e0 33 41
+[ 1.760091] RSP: 0000:ffffba783123be40 EFLAGS: 00000006
+[ 1.760091] RAX: 0000000000000000 RBX: fffffad34405e300 RCX: 0000000000000000
+[ 1.760091] RDX: 0000000000000000 RSI: 0010000000000000 RDI: fffffad34405e340
+[ 1.760091] RBP: 0000000033f3177e R08: fffffad34405e300 R09: 0000000000000002
+[ 1.760091] R10: 000000000000002b R11: ffff98afb691a500 R12: 0000000000000002
+[ 1.760091] R13: 0000000000000000 R14: 000000003f03ea00 R15: 000000003e10178c
+[ 1.760091] FS: 0000000000000000(0000) GS:ffff9c9ebeb00000(0000) knlGS:0000000000000000
+[ 1.760091] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 1.760091] CR2: 00000000ffffffff CR3: 000000a1cf20a001 CR4: 00000000003606e0
+[ 1.760091] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 1.760091] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 1.760091] Call Trace:
+[ 1.760091] deferred_init_pages+0x8f/0xbf
+[ 1.760091] deferred_init_memmap+0x184/0x29d
+[ 1.760091] ? deferred_free_pages.isra.97+0xba/0xba
+[ 1.760091] kthread+0x112/0x130
+[ 1.760091] ? kthread_flush_work_fn+0x10/0x10
+[ 1.760091] ret_from_fork+0x35/0x40
+[ 89.123011] node 0 initialised, 1055935372 pages in 88650ms
+
+Fixes: 3a2d7fa8a3d5 ("mm: disable interrupts while initializing deferred pages")
+Reported-by: Yiqian Wei <yiwei@redhat.com>
+Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Tested-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: James Morris <jmorris@namei.org>
+Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
+Cc: Sasha Levin <sashal@kernel.org>
+Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org> [4.17+]
+Link: http://lkml.kernel.org/r/20200403140952.17177-4-pasha.tatashin@soleen.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1817,7 +1817,7 @@ static int __init deferred_init_memmap(v
+ */
+ while (spfn < epfn) {
+ nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
+- touch_nmi_watchdog();
++ cond_resched();
+ }
+ zone_empty:
+ /* Sanity check that the next zone really is unpopulated */
--- /dev/null
+From 117003c32771df617acf66e140fbdbdeb0ac71f5 Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Wed, 3 Jun 2020 15:59:20 -0700
+Subject: mm/pagealloc.c: call touch_nmi_watchdog() on max order boundaries in deferred init
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit 117003c32771df617acf66e140fbdbdeb0ac71f5 upstream.
+
+Patch series "initialize deferred pages with interrupts enabled", v4.
+
+Keep interrupts enabled during deferred page initialization in order to
+make code more modular and allow jiffies to update.
+
+Original approach, and discussion can be found here:
+ http://lkml.kernel.org/r/20200311123848.118638-1-shile.zhang@linux.alibaba.com
+
+This patch (of 3):
+
+deferred_init_memmap() disables interrupts the entire time, so it calls
+touch_nmi_watchdog() periodically to avoid soft lockup splats. Soon it
+will run with interrupts enabled, at which point cond_resched() should be
+used instead.
+
+deferred_grow_zone() makes the same watchdog calls through code shared
+with deferred init but will continue to run with interrupts disabled, so
+it can't call cond_resched().
+
+Pull the watchdog calls up to these two places to allow the first to be
+changed later, independently of the second. The frequency reduces from
+twice per pageblock (init and free) to once per max order block.
+
+Fixes: 3a2d7fa8a3d5 ("mm: disable interrupts while initializing deferred pages")
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
+Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
+Cc: James Morris <jmorris@namei.org>
+Cc: Sasha Levin <sashal@kernel.org>
+Cc: Yiqian Wei <yiwei@redhat.com>
+Cc: <stable@vger.kernel.org> [4.17+]
+Link: http://lkml.kernel.org/r/20200403140952.17177-2-pasha.tatashin@soleen.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1640,7 +1640,6 @@ static void __init deferred_free_pages(u
+ } else if (!(pfn & nr_pgmask)) {
+ deferred_free_range(pfn - nr_free, nr_free);
+ nr_free = 1;
+- touch_nmi_watchdog();
+ } else {
+ nr_free++;
+ }
+@@ -1670,7 +1669,6 @@ static unsigned long __init deferred_in
+ continue;
+ } else if (!page || !(pfn & nr_pgmask)) {
+ page = pfn_to_page(pfn);
+- touch_nmi_watchdog();
+ } else {
+ page++;
+ }
+@@ -1817,8 +1815,10 @@ static int __init deferred_init_memmap(v
+ * that we can avoid introducing any issues with the buddy
+ * allocator.
+ */
+- while (spfn < epfn)
++ while (spfn < epfn) {
+ nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
++ touch_nmi_watchdog();
++ }
+ zone_empty:
+ /* Sanity check that the next zone really is unpopulated */
+ WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
+@@ -1889,6 +1889,7 @@ deferred_grow_zone(struct zone *zone, un
+ first_deferred_pfn = spfn;
+
+ nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
++ touch_nmi_watchdog();
+
+ /* We should only stop along section boundaries */
+ if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
powerpc-mm-fix-conditions-to-perform-mmu-specific-management-by-blocks-on-ppc32.patch
mm-thp-make-the-thp-mapcount-atomic-against-__split_huge_pmd_locked.patch
mm-initialize-deferred-pages-with-interrupts-enabled.patch
+mips-cpu_loongson2ef-need-software-to-maintain-cache-consistency.patch
+mm-pagealloc.c-call-touch_nmi_watchdog-on-max-order-boundaries-in-deferred-init.patch
+mm-call-cond_resched-from-deferred_init_memmap.patch
+ima-fix-ima-digest-hash-table-key-calculation.patch
+ima-switch-to-ima_hash_algo-for-boot-aggregate.patch
+ima-evaluate-error-in-init_ima.patch
+ima-directly-assign-the-ima_default_policy-pointer-to-ima_rules.patch
+ima-call-ima_calc_boot_aggregate-in-ima_eventdigest_init.patch
+ima-remove-__init-annotation-from-ima_pcrread.patch
+evm-fix-possible-memory-leak-in-evm_calc_hmac_or_hash.patch
+ext4-fix-ext_max_extent-index-to-check-for-zeroed-eh_max.patch
+ext4-fix-error-pointer-dereference.patch
+ext4-fix-race-between-ext4_sync_parent-and-rename.patch