]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390/boot: Rename physmem_alloc_top_down() to physmem_alloc_or_die()
authorVasily Gorbik <gor@linux.ibm.com>
Thu, 12 Dec 2024 23:59:26 +0000 (00:59 +0100)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Sun, 26 Jan 2025 16:23:58 +0000 (17:23 +0100)
The new name better reflects the function's behavior, emphasizing that
it will terminate execution if allocation fails.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/boot/boot.h
arch/s390/boot/ipl_report.c
arch/s390/boot/kaslr.c
arch/s390/boot/physmem_info.c
arch/s390/boot/startup.c
arch/s390/boot/vmem.c

index 56244fe78182c2c75579f3cc0dbc3de95b29020f..f269026246501bcfe9a2372d5a49f0c45b19303b 100644 (file)
@@ -47,8 +47,8 @@ void physmem_set_usable_limit(unsigned long limit);
 void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
 void physmem_free(enum reserved_range_type type);
 /* for continuous/multiple allocations per type */
-unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
-                                    unsigned long align);
+unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
+                                  unsigned long align);
 /* for single allocations, 1 per type */
 unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
                                  unsigned long align, unsigned long min, unsigned long max,
index d00898852a88acf6da79c845ece77dfb0dd33c54..aea22e268c65d2e3c8f3e75827795e2c4dd68e64 100644 (file)
@@ -155,7 +155,7 @@ void save_ipl_cert_comp_list(void)
                return;
 
        size = get_cert_comp_list_size();
-       early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int));
+       early_ipl_comp_list_addr = physmem_alloc_or_die(RR_CERT_COMP_LIST, size, sizeof(int));
        ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
 
        copy_components_bootdata();
index f864d2bff7754eaeb87e651077632b7afda02b45..d6761e1e2f8d445f47036e2ffddaebada0c8dec5 100644 (file)
@@ -168,7 +168,7 @@ static unsigned long iterate_valid_positions(unsigned long size, unsigned long a
  * cannot have chains.
  *
  * On the other hand, "dynamic" or "repetitive" allocations are done via
- * physmem_alloc_top_down(). These allocations are tightly packed together
+ * physmem_alloc_or_die(). These allocations are tightly packed together
  * top down from the end of online memory. physmem_alloc_pos represents
  * current position where those allocations start.
  *
index 7617aa2d2f7e97406b6da75885d1c68214a158bf..34d310a6d8a523809dcc4846c00aee5b5e036402 100644 (file)
@@ -343,8 +343,8 @@ unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long s
        return addr;
 }
 
-unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size,
-                                    unsigned long align)
+unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
+                                  unsigned long align)
 {
        struct reserved_range *range = &physmem_info.reserved[type];
        struct reserved_range *new_range;
index e00aed22d0d40cb743de46f66c0b707f8fc3cbd0..70f90a19007a0a834520bfadc43d0447625321ea 100644 (file)
@@ -143,7 +143,7 @@ static void rescue_initrd(unsigned long min, unsigned long max)
                return;
        old_addr = addr;
        physmem_free(RR_INITRD);
-       addr = physmem_alloc_top_down(RR_INITRD, size, 0);
+       addr = physmem_alloc_or_die(RR_INITRD, size, 0);
        memmove((void *)addr, (void *)old_addr, size);
 }
 
index d755e5340fcd6b1703b517dfd71bafa598b715d2..3e69e4405d70b5eecba926e7301396c5ec98f11b 100644 (file)
@@ -201,7 +201,7 @@ static void *boot_crst_alloc(unsigned long val)
        unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
        unsigned long *table;
 
-       table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
+       table = (unsigned long *)physmem_alloc_or_die(RR_VMEM, size, size);
        crst_table_init(table, val);
        __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
        return table;
@@ -217,7 +217,7 @@ static pte_t *boot_pte_alloc(void)
         * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
         */
        if (!pte_leftover) {
-               pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
+               pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
                pte = pte_leftover + _PAGE_TABLE_SIZE;
                __arch_set_page_dat(pte, 1);
        } else {
@@ -247,7 +247,7 @@ static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size
                return __identity_pa(addr);
 #ifdef CONFIG_KASAN
        case POPULATE_KASAN_MAP_SHADOW:
-               addr = physmem_alloc_top_down(RR_VMEM, size, size);
+               addr = physmem_alloc_or_die(RR_VMEM, size, size);
                memset((void *)addr, 0, size);
                return addr;
 #endif