]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
powerpc/irq: use memblock functions returning virtual address
authorChristophe Leroy <christophe.leroy@c-s.fr>
Thu, 31 Jan 2019 10:08:44 +0000 (10:08 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sat, 23 Feb 2019 11:31:39 +0000 (22:31 +1100)
Since only the virtual address of allocated blocks is used,
lets use functions returning directly virtual address.

Those functions have the advantage of also zeroing the block.

Suggested-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c

index bb299613a462cdccf02d20c697fa1c112a31e0a5..4a5dd8800946d844fedceca4406f7201a02f71da 100644 (file)
@@ -725,18 +725,15 @@ void exc_lvl_ctx_init(void)
 #endif
 #endif
 
-               memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
                tp = critirq_ctx[cpu_nr];
                tp->cpu = cpu_nr;
                tp->preempt_count = 0;
 
 #ifdef CONFIG_BOOKE
-               memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
                tp = dbgirq_ctx[cpu_nr];
                tp->cpu = cpu_nr;
                tp->preempt_count = 0;
 
-               memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
                tp = mcheckirq_ctx[cpu_nr];
                tp->cpu = cpu_nr;
                tp->preempt_count = HARDIRQ_OFFSET;
@@ -754,12 +751,10 @@ void irq_ctx_init(void)
        int i;
 
        for_each_possible_cpu(i) {
-               memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
                tp = softirq_ctx[i];
                tp->cpu = i;
                klp_init_thread_info(tp);
 
-               memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
                tp = hardirq_ctx[i];
                tp->cpu = i;
                klp_init_thread_info(tp);
index 947f904688b0e9d6e8bdb06ef31f571fbb85e415..1f0b7629c1a652694c3ed45dfd3510555a931780 100644 (file)
@@ -196,6 +196,17 @@ static int __init ppc_init(void)
 }
 arch_initcall(ppc_init);
 
+static void *__init alloc_stack(void)
+{
+       void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+
+       if (!ptr)
+               panic("cannot allocate %d bytes for stack at %pS\n",
+                     THREAD_SIZE, (void *)_RET_IP_);
+
+       return ptr;
+}
+
 void __init irqstack_early_init(void)
 {
        unsigned int i;
@@ -203,10 +214,8 @@ void __init irqstack_early_init(void)
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
         * as the memblock is limited to lowmem by default */
        for_each_possible_cpu(i) {
-               softirq_ctx[i] = (struct thread_info *)
-                       __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
-               hardirq_ctx[i] = (struct thread_info *)
-                       __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
+               softirq_ctx[i] = alloc_stack();
+               hardirq_ctx[i] = alloc_stack();
        }
 }
 
@@ -224,13 +233,10 @@ void __init exc_lvl_early_init(void)
                hw_cpu = 0;
 #endif
 
-               critirq_ctx[hw_cpu] = (struct thread_info *)
-                       __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
+               critirq_ctx[hw_cpu] = alloc_stack();
 #ifdef CONFIG_BOOKE
-               dbgirq_ctx[hw_cpu] = (struct thread_info *)
-                       __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
-               mcheckirq_ctx[hw_cpu] = (struct thread_info *)
-                       __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
+               dbgirq_ctx[hw_cpu] = alloc_stack();
+               mcheckirq_ctx[hw_cpu] = alloc_stack();
 #endif
        }
 }
index 236c1151a3a77057013313ed5da588673f5f3419..080dd515d5873d4cf4a0ea9c627ac30cd52b3d52 100644 (file)
@@ -634,19 +634,17 @@ __init u64 ppc64_bolted_size(void)
 
 static void *__init alloc_stack(unsigned long limit, int cpu)
 {
-       unsigned long pa;
+       void *ptr;
 
        BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
 
-       pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
-                                       early_cpu_to_node(cpu), MEMBLOCK_NONE);
-       if (!pa) {
-               pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
-               if (!pa)
-                       panic("cannot allocate stacks");
-       }
+       ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
+                                    MEMBLOCK_LOW_LIMIT, limit,
+                                    early_cpu_to_node(cpu));
+       if (!ptr)
+               panic("cannot allocate stacks");
 
-       return __va(pa);
+       return ptr;
 }
 
 void __init irqstack_early_init(void)
@@ -739,20 +737,17 @@ void __init emergency_stack_init(void)
                struct thread_info *ti;
 
                ti = alloc_stack(limit, i);
-               memset(ti, 0, THREAD_SIZE);
                emerg_stack_init_thread_info(ti, i);
                paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
                /* emergency stack for NMI exception handling. */
                ti = alloc_stack(limit, i);
-               memset(ti, 0, THREAD_SIZE);
                emerg_stack_init_thread_info(ti, i);
                paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 
                /* emergency stack for machine check exception handling. */
                ti = alloc_stack(limit, i);
-               memset(ti, 0, THREAD_SIZE);
                emerg_stack_init_thread_info(ti, i);
                paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif