From 6a2e57ad227ac21cbe0ed941dbedd3b81b22ce7e Mon Sep 17 00:00:00 2001 From: Justinien Bouron Date: Mon, 29 Sep 2025 09:02:20 -0700 Subject: [PATCH] kexec_core: remove superfluous page offset handling in segment loading During kexec_segment loading, when copying the content of the segment (i.e. kexec_segment::kbuf or kexec_segment::buf) to its associated pages, kimage_load_{cma,normal,crash}_segment handle the case where the physical address of the segment is not page aligned, e.g. in kimage_load_normal_segment: page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); // ... ptr = kmap_local_page(page); // ... ptr += maddr & ~PAGE_MASK; mchunk = min_t(size_t, mbytes, PAGE_SIZE - (maddr & ~PAGE_MASK)); // ^^^^ Non page-aligned segments handled here ^^^ // ... if (image->file_mode) memcpy(ptr, kbuf, uchunk); else result = copy_from_user(ptr, buf, uchunk); (similar logic is present in kimage_load_{cma,crash}_segment). This is actually not needed because, prior to their loading, all kexec_segments first go through a vetting step in `sanity_check_segment_list`, which rejects any segment that is not page-aligned: for (i = 0; i < nr_segments; i++) { unsigned long mstart, mend; mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; // ... if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) return -EADDRNOTAVAIL; // ... } In case `sanity_check_segment_list` finds a non-page aligned the whole kexec load is aborted and no segment is loaded. This means that `kimage_load_{cma,normal,crash}_segment` never actually have to handle non page-aligned segments and `(maddr & ~PAGE_MASK) == 0` is always true no matter if the segment is coming from a file (i.e. `kexec_file_load` syscall), from a user-space buffer (i.e. `kexec_load` syscall) or created by the kernel through `kexec_add_buffer`. In the latter case, `kexec_add_buffer` actually enforces the page alignment: /* Ensure minimum alignment needed for segments. */ kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE); kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); [jbouron@amazon.com: v3] Link: https://lkml.kernel.org/r/20251024155009.39502-1-jbouron@amazon.com Link: https://lkml.kernel.org/r/20250929160220.47616-1-jbouron@amazon.com Signed-off-by: Justinien Bouron Reviewed-by: Gunnar Kudrjavets Reviewed-by: Andy Shevchenko Acked-by: Baoquan He Cc: Alexander Graf Cc: Marcos Paulo de Souza Cc: Mario Limonciello Cc: Petr Mladek Cc: Yan Zhao Signed-off-by: Andrew Morton --- kernel/kexec_core.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index fa00b239c5d9a..5ed7a2383d5d3 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -742,7 +742,6 @@ static int kimage_load_cma_segment(struct kimage *image, int idx) struct kexec_segment *segment = &image->segment[idx]; struct page *cma = image->segment_cma[idx]; char *ptr = page_address(cma); - unsigned long maddr; size_t ubytes, mbytes; int result = 0; unsigned char __user *buf = NULL; @@ -754,15 +753,12 @@ static int kimage_load_cma_segment(struct kimage *image, int idx) buf = segment->buf; ubytes = segment->bufsz; mbytes = segment->memsz; - maddr = segment->mem; /* Then copy from source buffer to the CMA one */ while (mbytes) { size_t uchunk, mchunk; - ptr += maddr & ~PAGE_MASK; - mchunk = min_t(size_t, mbytes, - PAGE_SIZE - (maddr & ~PAGE_MASK)); + mchunk = min_t(size_t, mbytes, PAGE_SIZE); uchunk = min(ubytes, mchunk); if (uchunk) { @@ -784,7 +780,6 @@ static int kimage_load_cma_segment(struct kimage *image, int idx) } ptr += mchunk; - maddr += mchunk; mbytes -= mchunk; cond_resched(); @@ -839,9 +834,7 @@ static int kimage_load_normal_segment(struct kimage *image, int idx) ptr = kmap_local_page(page); /* Start with a clear page */ clear_page(ptr); - ptr += maddr & ~PAGE_MASK; - mchunk = min_t(size_t, mbytes, - PAGE_SIZE - (maddr & ~PAGE_MASK)); + mchunk = min_t(size_t, mbytes, PAGE_SIZE); uchunk = min(ubytes, mchunk); if (uchunk) { @@ -904,9 +897,7 @@ static int kimage_load_crash_segment(struct kimage *image, int idx) } arch_kexec_post_alloc_pages(page_address(page), 1, 0); ptr = kmap_local_page(page); - ptr += maddr & ~PAGE_MASK; - mchunk = min_t(size_t, mbytes, - PAGE_SIZE - (maddr & ~PAGE_MASK)); + mchunk = min_t(size_t, mbytes, PAGE_SIZE); uchunk = min(ubytes, mchunk); if (mchunk > uchunk) { /* Zero the trailing part of the page */ -- 2.47.3