]>
git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/metag/include/asm/mmu_context.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __METAG_MMU_CONTEXT_H
3 #define __METAG_MMU_CONTEXT_H
5 #include <asm-generic/mm_hooks.h>
9 #include <asm/tlbflush.h>
10 #include <asm/cacheflush.h>
13 #include <linux/mm_types.h>
15 static inline void enter_lazy_tlb(struct mm_struct
*mm
,
16 struct task_struct
*tsk
)
20 static inline int init_new_context(struct task_struct
*tsk
,
23 #ifndef CONFIG_METAG_META21_MMU
24 /* We use context to store a pointer to the page holding the
25 * pgd of a process while it is running. While a process is not
26 * running the pgd and context fields should be equal.
28 mm
->context
.pgd_base
= (unsigned long) mm
->pgd
;
30 #ifdef CONFIG_METAG_USER_TCM
31 INIT_LIST_HEAD(&mm
->context
.tcm
);
36 #ifdef CONFIG_METAG_USER_TCM
38 #include <linux/slab.h>
41 static inline void destroy_context(struct mm_struct
*mm
)
43 struct tcm_allocation
*pos
, *n
;
45 list_for_each_entry_safe(pos
, n
, &mm
->context
.tcm
, list
) {
46 tcm_free(pos
->tag
, pos
->addr
, pos
->size
);
52 #define destroy_context(mm) do { } while (0)
55 #ifdef CONFIG_METAG_META21_MMU
56 static inline void load_pgd(pgd_t
*pgd
, int thread
)
58 unsigned long phys0
= mmu_phys0_addr(thread
);
59 unsigned long phys1
= mmu_phys1_addr(thread
);
62 * 0x900 2Gb address space
63 * The permission bits apply to MMU table region which gives a 2MB
64 * window into physical memory. We especially don't want userland to be
65 * able to access this.
67 metag_out32(0x900 | _PAGE_CACHEABLE
| _PAGE_PRIV
| _PAGE_WRITE
|
68 _PAGE_PRESENT
, phys0
);
69 /* Set new MMU base address */
70 metag_out32(__pa(pgd
) & MMCU_TBLPHYS1_ADDR_BITS
, phys1
);
74 static inline void switch_mmu(struct mm_struct
*prev
, struct mm_struct
*next
)
76 #ifdef CONFIG_METAG_META21_MMU
77 load_pgd(next
->pgd
, hard_processor_id());
81 /* prev->context == prev->pgd in the case where we are initially
82 switching from the init task to the first process. */
83 if (prev
->context
.pgd_base
!= (unsigned long) prev
->pgd
) {
84 for (i
= FIRST_USER_PGD_NR
; i
< USER_PTRS_PER_PGD
; i
++)
85 ((pgd_t
*) prev
->context
.pgd_base
)[i
] = prev
->pgd
[i
];
87 prev
->pgd
= (pgd_t
*)mmu_get_base();
89 next
->pgd
= prev
->pgd
;
90 prev
->pgd
= (pgd_t
*) prev
->context
.pgd_base
;
92 for (i
= FIRST_USER_PGD_NR
; i
< USER_PTRS_PER_PGD
; i
++)
93 next
->pgd
[i
] = ((pgd_t
*) next
->context
.pgd_base
)[i
];
100 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
101 struct task_struct
*tsk
)
104 switch_mmu(prev
, next
);
107 static inline void activate_mm(struct mm_struct
*prev_mm
,
108 struct mm_struct
*next_mm
)
110 switch_mmu(prev_mm
, next_mm
);
113 #define deactivate_mm(tsk, mm) do { } while (0)