]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
047ea784 PM |
2 | #ifndef __ASM_POWERPC_MMU_CONTEXT_H |
3 | #define __ASM_POWERPC_MMU_CONTEXT_H | |
88ced031 | 4 | #ifdef __KERNEL__ |
047ea784 | 5 | |
5e696617 BH |
6 | #include <linux/kernel.h> |
7 | #include <linux/mm.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/spinlock.h> | |
80a7cc6c KG |
10 | #include <asm/mmu.h> |
11 | #include <asm/cputable.h> | |
5e696617 | 12 | #include <asm/cputhreads.h> |
80a7cc6c KG |
13 | |
14 | /* | |
5e696617 | 15 | * Most if the context management is out of line |
80a7cc6c | 16 | */ |
1da177e4 LT |
17 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
18 | extern void destroy_context(struct mm_struct *mm); | |
15b244a8 AK |
19 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
20 | struct mm_iommu_table_group_mem_t; | |
21 | ||
2e5bbb54 | 22 | extern int isolate_lru_page(struct page *page); /* from internal.h */ |
d7baee69 AK |
23 | extern bool mm_iommu_preregistered(struct mm_struct *mm); |
24 | extern long mm_iommu_get(struct mm_struct *mm, | |
25 | unsigned long ua, unsigned long entries, | |
15b244a8 | 26 | struct mm_iommu_table_group_mem_t **pmem); |
d7baee69 AK |
27 | extern long mm_iommu_put(struct mm_struct *mm, |
28 | struct mm_iommu_table_group_mem_t *mem); | |
88f54a35 AK |
29 | extern void mm_iommu_init(struct mm_struct *mm); |
30 | extern void mm_iommu_cleanup(struct mm_struct *mm); | |
d7baee69 AK |
31 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
32 | unsigned long ua, unsigned long size); | |
6b5c19c5 AK |
33 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
34 | struct mm_struct *mm, unsigned long ua, unsigned long size); | |
d7baee69 AK |
35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
36 | unsigned long ua, unsigned long entries); | |
15b244a8 AK |
37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
38 | unsigned long ua, unsigned long *hpa); | |
6b5c19c5 AK |
39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
40 | unsigned long ua, unsigned long *hpa); | |
15b244a8 AK |
41 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
42 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | |
43 | #endif | |
1da177e4 | 44 | extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); |
5e696617 | 45 | extern void set_context(unsigned long id, pgd_t *pgd); |
1da177e4 | 46 | |
6f0ef0f5 | 47 | #ifdef CONFIG_PPC_BOOK3S_64 |
7e381c0f | 48 | extern void radix__switch_mmu_context(struct mm_struct *prev, |
a25bd72b | 49 | struct mm_struct *next); |
d2adba3f AK |
50 | static inline void switch_mmu_context(struct mm_struct *prev, |
51 | struct mm_struct *next, | |
52 | struct task_struct *tsk) | |
53 | { | |
7e381c0f AK |
54 | if (radix_enabled()) |
55 | return radix__switch_mmu_context(prev, next); | |
d2adba3f AK |
56 | return switch_slb(tsk, next); |
57 | } | |
58 | ||
a336f2f5 | 59 | extern int hash__alloc_context_id(void); |
82228e36 | 60 | extern void hash__reserve_context_id(int id); |
e85a4710 | 61 | extern void __destroy_context(int context_id); |
6f0ef0f5 BH |
62 | static inline void mmu_context_init(void) { } |
63 | #else | |
d2adba3f AK |
64 | extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, |
65 | struct task_struct *tsk); | |
c83ec269 AG |
66 | extern unsigned long __init_new_context(void); |
67 | extern void __destroy_context(unsigned long context_id); | |
6f0ef0f5 BH |
68 | extern void mmu_context_init(void); |
69 | #endif | |
70 | ||
a25bd72b BH |
71 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) |
72 | extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); | |
73 | #else | |
74 | static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { } | |
75 | #endif | |
76 | ||
851d2e2f THFL |
77 | extern void switch_cop(struct mm_struct *next); |
78 | extern int use_cop(unsigned long acop, struct mm_struct *mm); | |
79 | extern void drop_cop(unsigned long acop, struct mm_struct *mm); | |
80 | ||
3a2df379 BH |
81 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
82 | struct task_struct *tsk); | |
1da177e4 | 83 | |
9765ad13 DG |
84 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
85 | struct task_struct *tsk) | |
86 | { | |
87 | unsigned long flags; | |
88 | ||
89 | local_irq_save(flags); | |
90 | switch_mm_irqs_off(prev, next, tsk); | |
91 | local_irq_restore(flags); | |
92 | } | |
93 | #define switch_mm_irqs_off switch_mm_irqs_off | |
94 | ||
95 | ||
1da177e4 LT |
96 | #define deactivate_mm(tsk,mm) do { } while (0) |
97 | ||
98 | /* | |
99 | * After we have set current->mm to a new value, this activates | |
100 | * the context for the new mm so we see the new mappings. | |
101 | */ | |
102 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
103 | { | |
1da177e4 | 104 | switch_mm(prev, next, current); |
1da177e4 LT |
105 | } |
106 | ||
5e696617 BH |
107 | /* We don't currently use enter_lazy_tlb() for anything */ |
108 | static inline void enter_lazy_tlb(struct mm_struct *mm, | |
109 | struct task_struct *tsk) | |
110 | { | |
25d21ad6 BH |
111 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
112 | #ifdef CONFIG_PPC_BOOK3E_64 | |
113 | get_paca()->pgd = NULL; | |
114 | #endif | |
5e696617 BH |
115 | } |
116 | ||
83d3f0e9 LD |
117 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
118 | struct mm_struct *mm) | |
119 | { | |
120 | } | |
121 | ||
122 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
123 | { | |
124 | } | |
125 | ||
126 | static inline void arch_unmap(struct mm_struct *mm, | |
127 | struct vm_area_struct *vma, | |
128 | unsigned long start, unsigned long end) | |
129 | { | |
130 | if (start <= mm->context.vdso_base && mm->context.vdso_base < end) | |
131 | mm->context.vdso_base = 0; | |
132 | } | |
133 | ||
134 | static inline void arch_bprm_mm_init(struct mm_struct *mm, | |
135 | struct vm_area_struct *vma) | |
136 | { | |
137 | } | |
138 | ||
1b2ee126 | 139 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 140 | bool write, bool execute, bool foreign) |
33a709b2 DH |
141 | { |
142 | /* by default, allow everything */ | |
143 | return true; | |
144 | } | |
88ced031 | 145 | #endif /* __KERNEL__ */ |
047ea784 | 146 | #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ |