]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 19 Aug 2022 14:46:03 +0000 (16:46 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 19 Aug 2022 14:46:03 +0000 (16:46 +0200)
added patches:
arm64-kexec_file-use-more-system-keyrings-to-verify-kernel-image-signature.patch
btrfs-only-write-the-sectors-in-the-vertical-stripe-which-has-data-stripes.patch
btrfs-raid56-don-t-trust-any-cached-sector-in-__raid56_parity_recover.patch
kexec-clean-up-arch_kexec_kernel_verify_sig.patch
kexec-keys-make-the-code-in-bzimage64_verify_sig-generic.patch
kexec_file-drop-weak-attribute-from-functions.patch

queue-5.10/arm64-kexec_file-use-more-system-keyrings-to-verify-kernel-image-signature.patch [new file with mode: 0644]
queue-5.10/btrfs-only-write-the-sectors-in-the-vertical-stripe-which-has-data-stripes.patch [new file with mode: 0644]
queue-5.10/btrfs-raid56-don-t-trust-any-cached-sector-in-__raid56_parity_recover.patch [new file with mode: 0644]
queue-5.10/kexec-clean-up-arch_kexec_kernel_verify_sig.patch [new file with mode: 0644]
queue-5.10/kexec-keys-make-the-code-in-bzimage64_verify_sig-generic.patch [new file with mode: 0644]
queue-5.10/kexec_file-drop-weak-attribute-from-functions.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-kexec_file-use-more-system-keyrings-to-verify-kernel-image-signature.patch b/queue-5.10/arm64-kexec_file-use-more-system-keyrings-to-verify-kernel-image-signature.patch
new file mode 100644 (file)
index 0000000..1104243
--- /dev/null
@@ -0,0 +1,68 @@
+From 0d519cadf75184a24313568e7f489a7fc9b1be3b Mon Sep 17 00:00:00 2001
+From: Coiby Xu <coxu@redhat.com>
+Date: Thu, 14 Jul 2022 21:40:26 +0800
+Subject: arm64: kexec_file: use more system keyrings to verify kernel image signature
+
+From: Coiby Xu <coxu@redhat.com>
+
+commit 0d519cadf75184a24313568e7f489a7fc9b1be3b upstream.
+
+Currently, when loading a kernel image via the kexec_file_load() system
+call, arm64 can only use the .builtin_trusted_keys keyring to verify
+a signature whereas x86 can use three more keyrings i.e.
+.secondary_trusted_keys, .machine and .platform keyrings. For example,
+one resulting problem is kexec'ing a kernel image  would be rejected
+with the error "Lockdown: kexec: kexec of unsigned images is restricted;
+see man kernel_lockdown.7".
+
+This patch set enables arm64 to make use of the same keyrings as x86 to
+verify the signature kexec'ed kernel image.
+
+Fixes: 732b7b93d849 ("arm64: kexec_file: add kernel signature verification support")
+Cc: stable@vger.kernel.org # 105e10e2cf1c: kexec_file: drop weak attribute from functions
+Cc: stable@vger.kernel.org # 34d5960af253: kexec: clean up arch_kexec_kernel_verify_sig
+Cc: stable@vger.kernel.org # 83b7bb2d49ae: kexec, KEYS: make the code in bzImage64_verify_sig generic
+Acked-by: Baoquan He <bhe@redhat.com>
+Cc: kexec@lists.infradead.org
+Cc: keyrings@vger.kernel.org
+Cc: linux-security-module@vger.kernel.org
+Co-developed-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Acked-by: Will Deacon <will@kernel.org>
+Signed-off-by: Coiby Xu <coxu@redhat.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/kexec_image.c |   11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/arch/arm64/kernel/kexec_image.c
++++ b/arch/arm64/kernel/kexec_image.c
+@@ -14,7 +14,6 @@
+ #include <linux/kexec.h>
+ #include <linux/pe.h>
+ #include <linux/string.h>
+-#include <linux/verification.h>
+ #include <asm/byteorder.h>
+ #include <asm/cpufeature.h>
+ #include <asm/image.h>
+@@ -130,18 +129,10 @@ static void *image_load(struct kimage *i
+       return NULL;
+ }
+-#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+-static int image_verify_sig(const char *kernel, unsigned long kernel_len)
+-{
+-      return verify_pefile_signature(kernel, kernel_len, NULL,
+-                                     VERIFYING_KEXEC_PE_SIGNATURE);
+-}
+-#endif
+-
+ const struct kexec_file_ops kexec_image_ops = {
+       .probe = image_probe,
+       .load = image_load,
+ #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+-      .verify_sig = image_verify_sig,
++      .verify_sig = kexec_kernel_verify_pe_sig,
+ #endif
+ };
diff --git a/queue-5.10/btrfs-only-write-the-sectors-in-the-vertical-stripe-which-has-data-stripes.patch b/queue-5.10/btrfs-only-write-the-sectors-in-the-vertical-stripe-which-has-data-stripes.patch
new file mode 100644 (file)
index 0000000..46fd6f3
--- /dev/null
@@ -0,0 +1,166 @@
+From foo@baz Fri Aug 19 04:38:52 PM CEST 2022
+From: Qu Wenruo <wqu@suse.com>
+Date: Fri, 19 Aug 2022 20:01:09 +0800
+Subject: btrfs: only write the sectors in the vertical stripe which has data stripes
+To: linux-btrfs@vger.kernel.org, stable@vger.kernel.org
+Cc: David Sterba <dsterba@suse.com>
+Message-ID: <5d21e3178f2932c1a4c73899a6f8adce12341ba3.1660906975.git.wqu@suse.com>
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit bd8f7e627703ca5707833d623efcd43f104c7b3f upstream.
+
+If we have only 8K partial write at the beginning of a full RAID56
+stripe, we will write the following contents:
+
+                    0  8K           32K             64K
+Disk 1 (data):     |XX|            |               |
+Disk 2  (data):     |               |               |
+Disk 3  (parity):   |XXXXXXXXXXXXXXX|XXXXXXXXXXXXXXX|
+
+|X| means the sector will be written back to disk.
+
+Note that, although we won't write any sectors from disk 2, but we will
+write the full 64KiB of parity to disk.
+
+This behavior is fine for now, but not for the future (especially for
+RAID56J, as we waste quite some space to journal the unused parity
+stripes).
+
+So here we will also utilize the btrfs_raid_bio::dbitmap, anytime we
+queue a higher level bio into an rbio, we will update rbio::dbitmap to
+indicate which vertical stripes we need to writeback.
+
+And at finish_rmw(), we also check dbitmap to see if we need to write
+any sector in the vertical stripe.
+
+So after the patch, above example will only lead to the following
+writeback pattern:
+
+                    0  8K           32K             64K
+Disk 1 (data):     |XX|            |               |
+Disk 2  (data):     |               |               |
+Disk 3  (parity):   |XX|            |               |
+
+Acked-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/raid56.c |   55 ++++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 51 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -332,6 +332,9 @@ static void merge_rbio(struct btrfs_raid
+ {
+       bio_list_merge(&dest->bio_list, &victim->bio_list);
+       dest->bio_list_bytes += victim->bio_list_bytes;
++      /* Also inherit the bitmaps from @victim. */
++      bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
++                dest->stripe_npages);
+       dest->generic_bio_cnt += victim->generic_bio_cnt;
+       bio_list_init(&victim->bio_list);
+ }
+@@ -874,6 +877,12 @@ static void rbio_orig_end_io(struct btrf
+       if (rbio->generic_bio_cnt)
+               btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
++      /*
++       * Clear the data bitmap, as the rbio may be cached for later usage.
++       * do this before before unlock_stripe() so there will be no new bio
++       * for this bio.
++       */
++      bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
+       /*
+        * At this moment, rbio->bio_list is empty, however since rbio does not
+@@ -1207,6 +1216,9 @@ static noinline void finish_rmw(struct b
+       else
+               BUG();
++      /* We should have at least one data sector. */
++      ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
++
+       /* at this point we either have a full stripe,
+        * or we've read the full stripe from the drive.
+        * recalculate the parity and write the new results.
+@@ -1280,6 +1292,11 @@ static noinline void finish_rmw(struct b
+       for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
+               for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
+                       struct page *page;
++
++                      /* This vertical stripe has no data, skip it. */
++                      if (!test_bit(pagenr, rbio->dbitmap))
++                              continue;
++
+                       if (stripe < rbio->nr_data) {
+                               page = page_in_rbio(rbio, stripe, pagenr, 1);
+                               if (!page)
+@@ -1304,6 +1321,11 @@ static noinline void finish_rmw(struct b
+               for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
+                       struct page *page;
++
++                      /* This vertical stripe has no data, skip it. */
++                      if (!test_bit(pagenr, rbio->dbitmap))
++                              continue;
++
+                       if (stripe < rbio->nr_data) {
+                               page = page_in_rbio(rbio, stripe, pagenr, 1);
+                               if (!page)
+@@ -1729,6 +1751,33 @@ static void btrfs_raid_unplug(struct blk
+       run_plug(plug);
+ }
++/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
++static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
++{
++      const struct btrfs_fs_info *fs_info = rbio->fs_info;
++      const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
++      const u64 full_stripe_start = rbio->bbio->raid_map[0];
++      const u32 orig_len = orig_bio->bi_iter.bi_size;
++      const u32 sectorsize = fs_info->sectorsize;
++      u64 cur_logical;
++
++      ASSERT(orig_logical >= full_stripe_start &&
++             orig_logical + orig_len <= full_stripe_start +
++             rbio->nr_data * rbio->stripe_len);
++
++      bio_list_add(&rbio->bio_list, orig_bio);
++      rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
++
++      /* Update the dbitmap. */
++      for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
++           cur_logical += sectorsize) {
++              int bit = ((u32)(cur_logical - full_stripe_start) >>
++                         PAGE_SHIFT) % rbio->stripe_npages;
++
++              set_bit(bit, rbio->dbitmap);
++      }
++}
++
+ /*
+  * our main entry point for writes from the rest of the FS.
+  */
+@@ -1745,9 +1794,8 @@ int raid56_parity_write(struct btrfs_fs_
+               btrfs_put_bbio(bbio);
+               return PTR_ERR(rbio);
+       }
+-      bio_list_add(&rbio->bio_list, bio);
+-      rbio->bio_list_bytes = bio->bi_iter.bi_size;
+       rbio->operation = BTRFS_RBIO_WRITE;
++      rbio_add_bio(rbio, bio);
+       btrfs_bio_counter_inc_noblocked(fs_info);
+       rbio->generic_bio_cnt = 1;
+@@ -2144,8 +2192,7 @@ int raid56_parity_recover(struct btrfs_f
+       }
+       rbio->operation = BTRFS_RBIO_READ_REBUILD;
+-      bio_list_add(&rbio->bio_list, bio);
+-      rbio->bio_list_bytes = bio->bi_iter.bi_size;
++      rbio_add_bio(rbio, bio);
+       rbio->faila = find_logical_bio_stripe(rbio, bio);
+       if (rbio->faila == -1) {
diff --git a/queue-5.10/btrfs-raid56-don-t-trust-any-cached-sector-in-__raid56_parity_recover.patch b/queue-5.10/btrfs-raid56-don-t-trust-any-cached-sector-in-__raid56_parity_recover.patch
new file mode 100644 (file)
index 0000000..a0d2c21
--- /dev/null
@@ -0,0 +1,207 @@
+From foo@baz Fri Aug 19 04:38:52 PM CEST 2022
+From: Qu Wenruo <wqu@suse.com>
+Date: Fri, 19 Aug 2022 20:01:10 +0800
+Subject: btrfs: raid56: don't trust any cached sector in __raid56_parity_recover()
+To: linux-btrfs@vger.kernel.org, stable@vger.kernel.org
+Cc: David Sterba <dsterba@suse.com>
+Message-ID: <9127f6cbf1d95f0591ca87c3fcee257b68442f1e.1660906975.git.wqu@suse.com>
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit f6065f8edeb25f4a9dfe0b446030ad995a84a088 upstream.
+
+[BUG]
+There is a small workload which will always fail with recent kernel:
+(A simplified version from btrfs/125 test case)
+
+  mkfs.btrfs -f -m raid5 -d raid5 -b 1G $dev1 $dev2 $dev3
+  mount $dev1 $mnt
+  xfs_io -f -c "pwrite -S 0xee 0 1M" $mnt/file1
+  sync
+  umount $mnt
+  btrfs dev scan -u $dev3
+  mount -o degraded $dev1 $mnt
+  xfs_io -f -c "pwrite -S 0xff 0 128M" $mnt/file2
+  umount $mnt
+  btrfs dev scan
+  mount $dev1 $mnt
+  btrfs balance start --full-balance $mnt
+  umount $mnt
+
+The failure is always failed to read some tree blocks:
+
+  BTRFS info (device dm-4): relocating block group 217710592 flags data|raid5
+  BTRFS error (device dm-4): parent transid verify failed on 38993920 wanted 9 found 7
+  BTRFS error (device dm-4): parent transid verify failed on 38993920 wanted 9 found 7
+  ...
+
+[CAUSE]
+With the recently added debug output, we can see all RAID56 operations
+related to full stripe 38928384:
+
+  56.1183: raid56_read_partial: full_stripe=38928384 devid=2 type=DATA1 offset=0 opf=0x0 physical=9502720 len=65536
+  56.1185: raid56_read_partial: full_stripe=38928384 devid=3 type=DATA2 offset=16384 opf=0x0 physical=9519104 len=16384
+  56.1185: raid56_read_partial: full_stripe=38928384 devid=3 type=DATA2 offset=49152 opf=0x0 physical=9551872 len=16384
+  56.1187: raid56_write_stripe: full_stripe=38928384 devid=3 type=DATA2 offset=0 opf=0x1 physical=9502720 len=16384
+  56.1188: raid56_write_stripe: full_stripe=38928384 devid=3 type=DATA2 offset=32768 opf=0x1 physical=9535488 len=16384
+  56.1188: raid56_write_stripe: full_stripe=38928384 devid=1 type=PQ1 offset=0 opf=0x1 physical=30474240 len=16384
+  56.1189: raid56_write_stripe: full_stripe=38928384 devid=1 type=PQ1 offset=32768 opf=0x1 physical=30507008 len=16384
+  56.1218: raid56_write_stripe: full_stripe=38928384 devid=3 type=DATA2 offset=49152 opf=0x1 physical=9551872 len=16384
+  56.1219: raid56_write_stripe: full_stripe=38928384 devid=1 type=PQ1 offset=49152 opf=0x1 physical=30523392 len=16384
+  56.2721: raid56_parity_recover: full stripe=38928384 eb=39010304 mirror=2
+  56.2723: raid56_parity_recover: full stripe=38928384 eb=39010304 mirror=2
+  56.2724: raid56_parity_recover: full stripe=38928384 eb=39010304 mirror=2
+
+Before we enter raid56_parity_recover(), we have triggered some metadata
+write for the full stripe 38928384, this leads to us to read all the
+sectors from disk.
+
+Furthermore, btrfs raid56 write will cache its calculated P/Q sectors to
+avoid unnecessary read.
+
+This means, for that full stripe, after any partial write, we will have
+stale data, along with P/Q calculated using that stale data.
+
+Thankfully due to patch "btrfs: only write the sectors in the vertical stripe
+which has data stripes" we haven't submitted all the corrupted P/Q to disk.
+
+When we really need to recover certain range, aka in
+raid56_parity_recover(), we will use the cached rbio, along with its
+cached sectors (the full stripe is all cached).
+
+This explains why we have no event raid56_scrub_read_recover()
+triggered.
+
+Since we have the cached P/Q which is calculated using the stale data,
+the recovered one will just be stale.
+
+In our particular test case, it will always return the same incorrect
+metadata, thus causing the same error message "parent transid verify
+failed on 39010304 wanted 9 found 7" again and again.
+
+[BTRFS DESTRUCTIVE RMW PROBLEM]
+
+Test case btrfs/125 (and above workload) always has its trouble with
+the destructive read-modify-write (RMW) cycle:
+
+        0       32K     64K
+Data1:  | Good  | Good  |
+Data2:  | Bad   | Bad   |
+Parity: | Good  | Good  |
+
+In above case, if we trigger any write into Data1, we will use the bad
+data in Data2 to re-generate parity, killing the only chance to recovery
+Data2, thus Data2 is lost forever.
+
+This destructive RMW cycle is not specific to btrfs RAID56, but there
+are some btrfs specific behaviors making the case even worse:
+
+- Btrfs will cache sectors for unrelated vertical stripes.
+
+  In above example, if we're only writing into 0~32K range, btrfs will
+  still read data range (32K ~ 64K) of Data1, and (64K~128K) of Data2.
+  This behavior is to cache sectors for later update.
+
+  Incidentally commit d4e28d9b5f04 ("btrfs: raid56: make steal_rbio()
+  subpage compatible") has a bug which makes RAID56 to never trust the
+  cached sectors, thus slightly improve the situation for recovery.
+
+  Unfortunately, follow up fix "btrfs: update stripe_sectors::uptodate in
+  steal_rbio" will revert the behavior back to the old one.
+
+- Btrfs raid56 partial write will update all P/Q sectors and cache them
+
+  This means, even if data at (64K ~ 96K) of Data2 is free space, and
+  only (96K ~ 128K) of Data2 is really stale data.
+  And we write into that (96K ~ 128K), we will update all the parity
+  sectors for the full stripe.
+
+  This unnecessary behavior will completely kill the chance of recovery.
+
+  Thankfully, an unrelated optimization "btrfs: only write the sectors
+  in the vertical stripe which has data stripes" will prevent
+  submitting the write bio for untouched vertical sectors.
+
+  That optimization will keep the on-disk P/Q untouched for a chance for
+  later recovery.
+
+[FIX]
+Although we have no good way to completely fix the destructive RMW
+(unless we go full scrub for each partial write), we can still limit the
+damage.
+
+With patch "btrfs: only write the sectors in the vertical stripe which
+has data stripes" now we won't really submit the P/Q of unrelated
+vertical stripes, so the on-disk P/Q should still be fine.
+
+Now we really need to do is just drop all the cached sectors when doing
+recovery.
+
+By this, we have a chance to read the original P/Q from disk, and have a
+chance to recover the stale data, while still keep the cache to speed up
+regular write path.
+
+In fact, just dropping all the cache for recovery path is good enough to
+allow the test case btrfs/125 along with the small script to pass
+reliably.
+
+The lack of metadata write after the degraded mount, and forced metadata
+COW is saving us this time.
+
+So this patch will fix the behavior by not trust any cache in
+__raid56_parity_recover(), to solve the problem while still keep the
+cache useful.
+
+But please note that this test pass DOES NOT mean we have solved the
+destructive RMW problem, we just do better damage control a little
+better.
+
+Related patches:
+
+- btrfs: only write the sectors in the vertical stripe
+- d4e28d9b5f04 ("btrfs: raid56: make steal_rbio() subpage compatible")
+- btrfs: update stripe_sectors::uptodate in steal_rbio
+
+Acked-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/raid56.c |   19 ++++++-------------
+ 1 file changed, 6 insertions(+), 13 deletions(-)
+
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -2094,9 +2094,12 @@ static int __raid56_parity_recover(struc
+       atomic_set(&rbio->error, 0);
+       /*
+-       * read everything that hasn't failed.  Thanks to the
+-       * stripe cache, it is possible that some or all of these
+-       * pages are going to be uptodate.
++       * Read everything that hasn't failed. However this time we will
++       * not trust any cached sector.
++       * As we may read out some stale data but higher layer is not reading
++       * that stale part.
++       *
++       * So here we always re-read everything in recovery path.
+        */
+       for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
+               if (rbio->faila == stripe || rbio->failb == stripe) {
+@@ -2105,16 +2108,6 @@ static int __raid56_parity_recover(struc
+               }
+               for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
+-                      struct page *p;
+-
+-                      /*
+-                       * the rmw code may have already read this
+-                       * page in
+-                       */
+-                      p = rbio_stripe_page(rbio, stripe, pagenr);
+-                      if (PageUptodate(p))
+-                              continue;
+-
+                       ret = rbio_add_io_page(rbio, &bio_list,
+                                      rbio_stripe_page(rbio, stripe, pagenr),
+                                      stripe, pagenr, rbio->stripe_len);
diff --git a/queue-5.10/kexec-clean-up-arch_kexec_kernel_verify_sig.patch b/queue-5.10/kexec-clean-up-arch_kexec_kernel_verify_sig.patch
new file mode 100644 (file)
index 0000000..858c9a1
--- /dev/null
@@ -0,0 +1,100 @@
+From 689a71493bd2f31c024f8c0395f85a1fd4b2138e Mon Sep 17 00:00:00 2001
+From: Coiby Xu <coxu@redhat.com>
+Date: Thu, 14 Jul 2022 21:40:24 +0800
+Subject: kexec: clean up arch_kexec_kernel_verify_sig
+
+From: Coiby Xu <coxu@redhat.com>
+
+commit 689a71493bd2f31c024f8c0395f85a1fd4b2138e upstream.
+
+Before commit 105e10e2cf1c ("kexec_file: drop weak attribute from
+functions"), there was already no arch-specific implementation
+of arch_kexec_kernel_verify_sig. With weak attribute dropped by that
+commit, arch_kexec_kernel_verify_sig is completely useless. So clean it
+up.
+
+Note later patches are dependent on this patch so it should be backported
+to the stable tree as well.
+
+Cc: stable@vger.kernel.org
+Suggested-by: Eric W. Biederman <ebiederm@xmission.com>
+Reviewed-by: Michal Suchanek <msuchanek@suse.de>
+Acked-by: Baoquan He <bhe@redhat.com>
+Signed-off-by: Coiby Xu <coxu@redhat.com>
+[zohar@linux.ibm.com: reworded patch description "Note"]
+Link: https://lore.kernel.org/linux-integrity/20220714134027.394370-1-coxu@redhat.com/
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kexec.h |    5 -----
+ kernel/kexec_file.c   |   33 +++++++++++++--------------------
+ 2 files changed, 13 insertions(+), 25 deletions(-)
+
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -206,11 +206,6 @@ static inline void *arch_kexec_kernel_im
+ }
+ #endif
+-#ifdef CONFIG_KEXEC_SIG
+-int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+-                               unsigned long buf_len);
+-#endif
+-
+ extern int kexec_add_buffer(struct kexec_buf *kbuf);
+ int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -81,24 +81,6 @@ int kexec_image_post_load_cleanup_defaul
+       return image->fops->cleanup(image->image_loader_data);
+ }
+-#ifdef CONFIG_KEXEC_SIG
+-static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
+-                                        unsigned long buf_len)
+-{
+-      if (!image->fops || !image->fops->verify_sig) {
+-              pr_debug("kernel loader does not support signature verification.\n");
+-              return -EKEYREJECTED;
+-      }
+-
+-      return image->fops->verify_sig(buf, buf_len);
+-}
+-
+-int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len)
+-{
+-      return kexec_image_verify_sig_default(image, buf, buf_len);
+-}
+-#endif
+-
+ /*
+  * Free up memory used by kernel, initrd, and command line. This is temporary
+  * memory allocation which is not needed any more after these buffers have
+@@ -141,13 +123,24 @@ void kimage_file_post_load_cleanup(struc
+ }
+ #ifdef CONFIG_KEXEC_SIG
++static int kexec_image_verify_sig(struct kimage *image, void *buf,
++                                unsigned long buf_len)
++{
++      if (!image->fops || !image->fops->verify_sig) {
++              pr_debug("kernel loader does not support signature verification.\n");
++              return -EKEYREJECTED;
++      }
++
++      return image->fops->verify_sig(buf, buf_len);
++}
++
+ static int
+ kimage_validate_signature(struct kimage *image)
+ {
+       int ret;
+-      ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
+-                                         image->kernel_buf_len);
++      ret = kexec_image_verify_sig(image, image->kernel_buf,
++                                   image->kernel_buf_len);
+       if (ret) {
+               if (sig_enforce) {
diff --git a/queue-5.10/kexec-keys-make-the-code-in-bzimage64_verify_sig-generic.patch b/queue-5.10/kexec-keys-make-the-code-in-bzimage64_verify_sig-generic.patch
new file mode 100644 (file)
index 0000000..0ba76fd
--- /dev/null
@@ -0,0 +1,120 @@
+From c903dae8941deb55043ee46ded29e84e97cd84bb Mon Sep 17 00:00:00 2001
+From: Coiby Xu <coxu@redhat.com>
+Date: Thu, 14 Jul 2022 21:40:25 +0800
+Subject: kexec, KEYS: make the code in bzImage64_verify_sig generic
+
+From: Coiby Xu <coxu@redhat.com>
+
+commit c903dae8941deb55043ee46ded29e84e97cd84bb upstream.
+
+commit 278311e417be ("kexec, KEYS: Make use of platform keyring for
+signature verify") adds platform keyring support on x86 kexec but not
+arm64.
+
+The code in bzImage64_verify_sig uses the keys on the
+.builtin_trusted_keys, .machine, if configured and enabled,
+.secondary_trusted_keys, also if configured, and .platform keyrings
+to verify the signed kernel image as PE file.
+
+Cc: kexec@lists.infradead.org
+Cc: keyrings@vger.kernel.org
+Cc: linux-security-module@vger.kernel.org
+Reviewed-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Coiby Xu <coxu@redhat.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kexec-bzimage64.c |   20 +-------------------
+ include/linux/kexec.h             |    7 +++++++
+ kernel/kexec_file.c               |   17 +++++++++++++++++
+ 3 files changed, 25 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -17,7 +17,6 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/efi.h>
+-#include <linux/verification.h>
+ #include <asm/bootparam.h>
+ #include <asm/setup.h>
+@@ -528,28 +527,11 @@ static int bzImage64_cleanup(void *loade
+       return 0;
+ }
+-#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
+-static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
+-{
+-      int ret;
+-
+-      ret = verify_pefile_signature(kernel, kernel_len,
+-                                    VERIFY_USE_SECONDARY_KEYRING,
+-                                    VERIFYING_KEXEC_PE_SIGNATURE);
+-      if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
+-              ret = verify_pefile_signature(kernel, kernel_len,
+-                                            VERIFY_USE_PLATFORM_KEYRING,
+-                                            VERIFYING_KEXEC_PE_SIGNATURE);
+-      }
+-      return ret;
+-}
+-#endif
+-
+ const struct kexec_file_ops kexec_bzImage64_ops = {
+       .probe = bzImage64_probe,
+       .load = bzImage64_load,
+       .cleanup = bzImage64_cleanup,
+ #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
+-      .verify_sig = bzImage64_verify_sig,
++      .verify_sig = kexec_kernel_verify_pe_sig,
+ #endif
+ };
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -19,6 +19,7 @@
+ #include <asm/io.h>
+ #include <uapi/linux/kexec.h>
++#include <linux/verification.h>
+ #ifdef CONFIG_KEXEC_CORE
+ #include <linux/list.h>
+@@ -206,6 +207,12 @@ static inline void *arch_kexec_kernel_im
+ }
+ #endif
++#ifdef CONFIG_KEXEC_SIG
++#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
++int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
++#endif
++#endif
++
+ extern int kexec_add_buffer(struct kexec_buf *kbuf);
+ int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -123,6 +123,23 @@ void kimage_file_post_load_cleanup(struc
+ }
+ #ifdef CONFIG_KEXEC_SIG
++#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
++int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len)
++{
++      int ret;
++
++      ret = verify_pefile_signature(kernel, kernel_len,
++                                    VERIFY_USE_SECONDARY_KEYRING,
++                                    VERIFYING_KEXEC_PE_SIGNATURE);
++      if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
++              ret = verify_pefile_signature(kernel, kernel_len,
++                                            VERIFY_USE_PLATFORM_KEYRING,
++                                            VERIFYING_KEXEC_PE_SIGNATURE);
++      }
++      return ret;
++}
++#endif
++
+ static int kexec_image_verify_sig(struct kimage *image, void *buf,
+                                 unsigned long buf_len)
+ {
diff --git a/queue-5.10/kexec_file-drop-weak-attribute-from-functions.patch b/queue-5.10/kexec_file-drop-weak-attribute-from-functions.patch
new file mode 100644 (file)
index 0000000..ca3694a
--- /dev/null
@@ -0,0 +1,246 @@
+From 65d9a9a60fd71be964effb2e94747a6acb6e7015 Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Fri, 1 Jul 2022 13:04:04 +0530
+Subject: kexec_file: drop weak attribute from functions
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 65d9a9a60fd71be964effb2e94747a6acb6e7015 upstream.
+
+As requested
+(http://lkml.kernel.org/r/87ee0q7b92.fsf@email.froward.int.ebiederm.org),
+this series converts weak functions in kexec to use the #ifdef approach.
+
+Quoting the 3e35142ef99fe ("kexec_file: drop weak attribute from
+arch_kexec_apply_relocations[_add]") changelog:
+
+: Since commit d1bcae833b32f1 ("ELF: Don't generate unused section symbols")
+: [1], binutils (v2.36+) started dropping section symbols that it thought
+: were unused.  This isn't an issue in general, but with kexec_file.c, gcc
+: is placing kexec_arch_apply_relocations[_add] into a separate
+: .text.unlikely section and the section symbol ".text.unlikely" is being
+: dropped.  Due to this, recordmcount is unable to find a non-weak symbol in
+: .text.unlikely to generate a relocation record against.
+
+This patch (of 2);
+
+Drop __weak attribute from functions in kexec_file.c:
+- arch_kexec_kernel_image_probe()
+- arch_kimage_file_post_load_cleanup()
+- arch_kexec_kernel_image_load()
+- arch_kexec_locate_mem_hole()
+- arch_kexec_kernel_verify_sig()
+
+arch_kexec_kernel_image_load() calls into kexec_image_load_default(), so
+drop the static attribute for the latter.
+
+arch_kexec_kernel_verify_sig() is not overridden by any architecture, so
+drop the __weak attribute.
+
+Link: https://lkml.kernel.org/r/cover.1656659357.git.naveen.n.rao@linux.vnet.ibm.com
+Link: https://lkml.kernel.org/r/2cd7ca1fe4d6bb6ca38e3283c717878388ed6788.1656659357.git.naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Suggested-by: Eric Biederman <ebiederm@xmission.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kexec.h   |    4 ++-
+ arch/powerpc/include/asm/kexec.h |    9 +++++++
+ arch/s390/include/asm/kexec.h    |    3 ++
+ arch/x86/include/asm/kexec.h     |    6 +++++
+ include/linux/kexec.h            |   44 +++++++++++++++++++++++++++++++++------
+ kernel/kexec_file.c              |   35 +------------------------------
+ 6 files changed, 61 insertions(+), 40 deletions(-)
+
+--- a/arch/arm64/include/asm/kexec.h
++++ b/arch/arm64/include/asm/kexec.h
+@@ -106,7 +106,9 @@ extern const struct kexec_file_ops kexec
+ struct kimage;
+-extern int arch_kimage_file_post_load_cleanup(struct kimage *image);
++int arch_kimage_file_post_load_cleanup(struct kimage *image);
++#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
++
+ extern int load_other_segments(struct kimage *image,
+               unsigned long kernel_load_addr, unsigned long kernel_size,
+               char *initrd, unsigned long initrd_len,
+--- a/arch/powerpc/include/asm/kexec.h
++++ b/arch/powerpc/include/asm/kexec.h
+@@ -131,6 +131,15 @@ int delete_fdt_mem_rsv(void *fdt, unsign
+ #ifdef CONFIG_PPC64
+ struct kexec_buf;
++int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len);
++#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe
++
++int arch_kimage_file_post_load_cleanup(struct kimage *image);
++#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
++
++int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
++#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
++
+ int load_crashdump_segments_ppc64(struct kimage *image,
+                                 struct kexec_buf *kbuf);
+ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+--- a/arch/s390/include/asm/kexec.h
++++ b/arch/s390/include/asm/kexec.h
+@@ -92,5 +92,8 @@ int arch_kexec_apply_relocations_add(str
+                                    const Elf_Shdr *relsec,
+                                    const Elf_Shdr *symtab);
+ #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
++
++int arch_kimage_file_post_load_cleanup(struct kimage *image);
++#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+ #endif
+ #endif /*_S390_KEXEC_H */
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -198,6 +198,12 @@ int arch_kexec_apply_relocations_add(str
+                                    const Elf_Shdr *relsec,
+                                    const Elf_Shdr *symtab);
+ #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
++
++void *arch_kexec_kernel_image_load(struct kimage *image);
++#define arch_kexec_kernel_image_load arch_kexec_kernel_image_load
++
++int arch_kimage_file_post_load_cleanup(struct kimage *image);
++#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+ #endif
+ #endif
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -182,21 +182,53 @@ int kexec_purgatory_get_set_symbol(struc
+                                  void *buf, unsigned int size,
+                                  bool get_value);
+ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
++void *kexec_image_load_default(struct kimage *image);
++
++#ifndef arch_kexec_kernel_image_probe
++static inline int
++arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
++{
++      return kexec_image_probe_default(image, buf, buf_len);
++}
++#endif
++
++#ifndef arch_kimage_file_post_load_cleanup
++static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
++{
++      return kexec_image_post_load_cleanup_default(image);
++}
++#endif
++
++#ifndef arch_kexec_kernel_image_load
++static inline void *arch_kexec_kernel_image_load(struct kimage *image)
++{
++      return kexec_image_load_default(image);
++}
++#endif
+-/* Architectures may override the below functions */
+-int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+-                                unsigned long buf_len);
+-void *arch_kexec_kernel_image_load(struct kimage *image);
+-int arch_kimage_file_post_load_cleanup(struct kimage *image);
+ #ifdef CONFIG_KEXEC_SIG
+ int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+                                unsigned long buf_len);
+ #endif
+-int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
+ extern int kexec_add_buffer(struct kexec_buf *kbuf);
+ int kexec_locate_mem_hole(struct kexec_buf *kbuf);
++#ifndef arch_kexec_locate_mem_hole
++/**
++ * arch_kexec_locate_mem_hole - Find free memory to place the segments.
++ * @kbuf:                       Parameters for the memory search.
++ *
++ * On success, kbuf->mem will have the start address of the memory region found.
++ *
++ * Return: 0 on success, negative errno on error.
++ */
++static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
++{
++      return kexec_locate_mem_hole(kbuf);
++}
++#endif
++
+ /* Alignment required for elf header segment */
+ #define ELF_CORE_HEADER_ALIGN   4096
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -62,14 +62,7 @@ int kexec_image_probe_default(struct kim
+       return ret;
+ }
+-/* Architectures can provide this probe function */
+-int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+-                                       unsigned long buf_len)
+-{
+-      return kexec_image_probe_default(image, buf, buf_len);
+-}
+-
+-static void *kexec_image_load_default(struct kimage *image)
++void *kexec_image_load_default(struct kimage *image)
+ {
+       if (!image->fops || !image->fops->load)
+               return ERR_PTR(-ENOEXEC);
+@@ -80,11 +73,6 @@ static void *kexec_image_load_default(st
+                                image->cmdline_buf_len);
+ }
+-void * __weak arch_kexec_kernel_image_load(struct kimage *image)
+-{
+-      return kexec_image_load_default(image);
+-}
+-
+ int kexec_image_post_load_cleanup_default(struct kimage *image)
+ {
+       if (!image->fops || !image->fops->cleanup)
+@@ -93,11 +81,6 @@ int kexec_image_post_load_cleanup_defaul
+       return image->fops->cleanup(image->image_loader_data);
+ }
+-int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
+-{
+-      return kexec_image_post_load_cleanup_default(image);
+-}
+-
+ #ifdef CONFIG_KEXEC_SIG
+ static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
+                                         unsigned long buf_len)
+@@ -110,8 +93,7 @@ static int kexec_image_verify_sig_defaul
+       return image->fops->verify_sig(buf, buf_len);
+ }
+-int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+-                                      unsigned long buf_len)
++int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len)
+ {
+       return kexec_image_verify_sig_default(image, buf, buf_len);
+ }
+@@ -617,19 +599,6 @@ int kexec_locate_mem_hole(struct kexec_b
+ }
+ /**
+- * arch_kexec_locate_mem_hole - Find free memory to place the segments.
+- * @kbuf:                       Parameters for the memory search.
+- *
+- * On success, kbuf->mem will have the start address of the memory region found.
+- *
+- * Return: 0 on success, negative errno on error.
+- */
+-int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+-{
+-      return kexec_locate_mem_hole(kbuf);
+-}
+-
+-/**
+  * kexec_add_buffer - place a buffer in a kexec segment
+  * @kbuf:     Buffer contents and memory parameters.
+  *
index dc1468948a5223ef77b299f57a8b51b8ad632d19..7c21b17f35c2dc46fe5bccfdae6fa5388da5c6fe 100644 (file)
@@ -541,3 +541,9 @@ tee-add-overflow-check-in-register_shm_helper.patch
 net-9p-initialize-the-iounit-field-during-fid-creation.patch
 net_sched-cls_route-disallow-handle-of-0.patch
 sched-fair-fix-fault-in-reweight_entity.patch
+btrfs-only-write-the-sectors-in-the-vertical-stripe-which-has-data-stripes.patch
+btrfs-raid56-don-t-trust-any-cached-sector-in-__raid56_parity_recover.patch
+kexec_file-drop-weak-attribute-from-functions.patch
+kexec-clean-up-arch_kexec_kernel_verify_sig.patch
+kexec-keys-make-the-code-in-bzimage64_verify_sig-generic.patch
+arm64-kexec_file-use-more-system-keyrings-to-verify-kernel-image-signature.patch