#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/machvec.h>
#include <asm/setup.h>
+#include <linux/page_table_check.h>
struct mm_struct;
struct vm_area_struct;
{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
+extern void migrate_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr);
+
extern inline unsigned long
pmd_page_vaddr(pmd_t pmd)
{
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_val(*ptep) = 0;
+ WRITE_ONCE(pte_val(*ptep), 0);
}
extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
extern pgd_t swapper_pg_dir[1024];
+#ifdef CONFIG_COMPACTION
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = READ_ONCE(*ptep);
+
+ pte_clear(mm, address, ptep);
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+
+static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+
+ page_table_check_pte_clear(mm, pte);
+ migrate_flush_tlb_page(vma, addr);
+ return pte;
+}
+
+#endif
/*
* The Alpha doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alpha TLB shootdown helpers
+ *
+ * Copyright (C) 2025 Magnus Lindholm <linmag7@gmail.com>
+ *
+ * Alpha-specific TLB flush helpers that cannot be expressed purely
+ * as inline functions.
+ *
+ * These helpers provide combined MM context handling (ASN rollover)
+ * and immediate TLB invalidation for page migration and memory
+ * compaction paths, where lazy shootdowns are insufficient.
+ */
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+#include <asm/pal.h>
+#include <asm/mmu_context.h>
+
+#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
+
+/*
+ * Migration/compaction helper: combine mm context (ASN) handling with an
+ * immediate per-page TLB invalidate and (for exec) an instruction barrier.
+ *
+ * This mirrors the SMP combined IPI handler semantics, but runs locally on UP.
+ */
+#ifndef CONFIG_SMP
+void migrate_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2;
+
+ /*
+ * First do the mm-context side:
+ * If we're currently running this mm, reload a fresh context ASN.
+ * Otherwise, mark context invalid.
+ *
+ * On UP, this is mostly about matching the SMP semantics and ensuring
+ * exec/i-cache tagging assumptions hold when compaction migrates pages.
+ */
+ if (mm == current->active_mm)
+ flush_tlb_current(mm);
+ else
+ flush_tlb_other(mm);
+
+ /*
+ * Then do the immediate translation kill for this VA.
+ * For exec mappings, order instruction fetch after invalidation.
+ */
+ tbi(tbi_type, addr);
+}
+
+#else
+struct tlb_mm_and_addr {
+ struct mm_struct *mm;
+ unsigned long addr;
+ int tbi_type; /* 2 = DTB, 3 = ITB+DTB */
+};
+
+static void ipi_flush_mm_and_page(void *x)
+{
+ struct tlb_mm_and_addr *d = x;
+
+ /* Part 1: mm context side (Alpha uses ASN/context as a key mechanism). */
+ if (d->mm == current->active_mm && !asn_locked())
+ __load_new_mm_context(d->mm);
+ else
+ flush_tlb_other(d->mm);
+
+ /* Part 2: immediate per-VA invalidation on this CPU. */
+ tbi(d->tbi_type, d->addr);
+}
+
+void migrate_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct tlb_mm_and_addr d = {
+ .mm = mm,
+ .addr = addr,
+ .tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2,
+ };
+
+ /*
+ * One synchronous rendezvous: every CPU runs ipi_flush_mm_and_page().
+ * This is the "combined" version of flush_tlb_mm + per-page invalidate.
+ */
+ preempt_disable();
+ on_each_cpu(ipi_flush_mm_and_page, &d, 1);
+
+ /*
+ * mimic flush_tlb_mm()'s mm_users<=1 optimization.
+ */
+ if (atomic_read(&mm->mm_users) <= 1) {
+
+ int cpu, this_cpu;
+ this_cpu = smp_processor_id();
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ if (!cpu_online(cpu) || cpu == this_cpu)
+ continue;
+ if (READ_ONCE(mm->context[cpu]))
+ WRITE_ONCE(mm->context[cpu], 0);
+ }
+ }
+ preempt_enable();
+}
+
+#endif