--- /dev/null
+From 553b76be868257ba89775b87fa933e43a1bce7b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 11:57:32 -0400
+Subject: ima: extend boot_aggregate with kernel measurements
+
+From: Maurizio Drocco <maurizio.drocco@ibm.com>
+
+[ Upstream commit 20c59ce010f84300f6c655d32db2610d3433f85c ]
+
+Registers 8-9 are used to store measurements of the kernel and its
+command line (e.g., grub2 bootloader with tpm module enabled). IMA
+should include them in the boot aggregate. Registers 8-9 should be
+only included in non-SHA1 digests to avoid ambiguity.
+
+Signed-off-by: Maurizio Drocco <maurizio.drocco@ibm.com>
+Reviewed-by: Bruno Meneguele <bmeneg@redhat.com>
+Tested-by: Bruno Meneguele <bmeneg@redhat.com> (TPM 1.2, TPM 2.0)
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/integrity/ima/ima.h | 2 +-
+ security/integrity/ima/ima_crypto.c | 15 ++++++++++++++-
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index 8173982e00ab5..5fae6cfe8d910 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -30,7 +30,7 @@
+
+ enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
+-enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
++enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
+
+ /* digest size for IMA, fits SHA1 or MD5 */
+ #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index d86825261b515..b06baf5d3cd32 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -682,7 +682,7 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ if (rc != 0)
+ return rc;
+
+- /* cumulative sha1 over tpm registers 0-7 */
++ /* cumulative digest over TPM registers 0-7 */
+ for (i = TPM_PCR0; i < TPM_PCR8; i++) {
+ ima_pcrread(i, &d);
+ /* now accumulate with current aggregate */
+@@ -691,6 +691,19 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ if (rc != 0)
+ return rc;
+ }
++ /*
++ * Extend cumulative digest over TPM registers 8-9, which contain
++ * measurement for the kernel command line (reg. 8) and image (reg. 9)
++ * in a typical PCR allocation. Registers 8-9 are only included in
++ * non-SHA1 boot_aggregate digests to avoid ambiguity.
++ */
++ if (alg_id != TPM_ALG_SHA1) {
++ for (i = TPM_PCR8; i < TPM_PCR10; i++) {
++ ima_pcrread(i, &d);
++ rc = crypto_shash_update(shash, d.digest,
++ crypto_shash_digestsize(tfm));
++ }
++ }
+ if (!rc)
+ crypto_shash_final(shash, digest);
+ return rc;
+--
+2.27.0
+
--- /dev/null
+From 9606e522f217cd13aaeb262501da1b0245adf94a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Dec 2020 19:44:23 -0500
+Subject: sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+[ Upstream commit 39f23ce07b9355d05a64ae303ce20d1c4b92b957 ]
+
+Although not exactly identical, unthrottle_cfs_rq() and enqueue_task_fair()
+are quite close and follow the same sequence for enqueuing an entity in the
+cfs hierarchy. Modify unthrottle_cfs_rq() to use the same pattern as
+enqueue_task_fair(). This fixes a problem already faced with the latter and
+add an optimization in the last for_each_sched_entity loop.
+
+Fixes: fe61468b2cb (sched/fair: Fix enqueue_task_fair warning)
+Reported-by Tao Zhou <zohooouoto@zoho.com.cn>
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Phil Auld <pauld@redhat.com>
+Reviewed-by: Ben Segall <bsegall@google.com>
+Link: https://lkml.kernel.org/r/20200513135528.4742-1-vincent.guittot@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 36 ++++++++++++++++++++++++++++--------
+ 1 file changed, 28 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 200e121101097..3dd7c10d6a582 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4580,7 +4580,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ struct rq *rq = rq_of(cfs_rq);
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+ struct sched_entity *se;
+- int enqueue = 1;
+ long task_delta, idle_task_delta;
+
+ se = cfs_rq->tg->se[cpu_of(rq)];
+@@ -4604,21 +4603,41 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ idle_task_delta = cfs_rq->idle_h_nr_running;
+ for_each_sched_entity(se) {
+ if (se->on_rq)
+- enqueue = 0;
++ break;
++ cfs_rq = cfs_rq_of(se);
++ enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
++
++ cfs_rq->h_nr_running += task_delta;
++ cfs_rq->idle_h_nr_running += idle_task_delta;
+
++ /* end evaluation on encountering a throttled cfs_rq */
++ if (cfs_rq_throttled(cfs_rq))
++ goto unthrottle_throttle;
++ }
++
++ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+- if (enqueue)
+- enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
++
+ cfs_rq->h_nr_running += task_delta;
+ cfs_rq->idle_h_nr_running += idle_task_delta;
+
++
++ /* end evaluation on encountering a throttled cfs_rq */
+ if (cfs_rq_throttled(cfs_rq))
+- break;
++ goto unthrottle_throttle;
++
++ /*
++ * One parent has been throttled and cfs_rq removed from the
++ * list. Add it back to not break the leaf list.
++ */
++ if (throttled_hierarchy(cfs_rq))
++ list_add_leaf_cfs_rq(cfs_rq);
+ }
+
+- if (!se)
+- add_nr_running(rq, task_delta);
++ /* At this point se is NULL and we are at root level*/
++ add_nr_running(rq, task_delta);
+
++unthrottle_throttle:
+ /*
+ * The cfs_rq_throttled() breaks in the above iteration can result in
+ * incomplete leaf list maintenance, resulting in triggering the
+@@ -4627,7 +4646,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+- list_add_leaf_cfs_rq(cfs_rq);
++ if (list_add_leaf_cfs_rq(cfs_rq))
++ break;
+ }
+
+ assert_list_leaf_cfs_rq(rq);
+--
+2.27.0
+