]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/powerpc/include/asm/mmu.h
powerpc: Add option to use jump label for mmu_has_feature()
[thirdparty/kernel/stable.git] / arch / powerpc / include / asm / mmu.h
CommitLineData
047ea784
PM
1#ifndef _ASM_POWERPC_MMU_H_
2#define _ASM_POWERPC_MMU_H_
88ced031 3#ifdef __KERNEL__
047ea784 4
cd3db0c4
BH
5#include <linux/types.h>
6
7c03d653
BH
7#include <asm/asm-compat.h>
8#include <asm/feature-fixups.h>
9
10/*
11 * MMU features bit definitions
12 */
13
14/*
5a25b6f5 15 * MMU families
7c03d653
BH
16 */
17#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
18#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
cd68098b 22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
7c03d653 23
5a25b6f5
AK
24/* Radix page table supported and enabled */
25#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040)
26
7c03d653 27/*
5a25b6f5 28 * Individual features below.
7c03d653 29 */
5a25b6f5 30
accfad7d
AK
31/*
32 * We need to clear top 16bits of va (from the remaining 64 bits )in
33 * tlbie* instructions
34 */
35#define MMU_FTR_TLBIE_CROP_VA ASM_CONST(0x00008000)
7c03d653
BH
36
37/* Enable use of high BAT registers */
38#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
39
40/* Enable >32-bit physical addresses on 32-bit processor, only used
41 * by CONFIG_6xx currently as BookE supports that from day 1
42 */
43#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
44
f048aace
BH
45/* Enable use of broadcast TLB invalidations. We don't always set it
46 * on processors that support it due to other constraints with the
47 * use of such invalidations
48 */
49#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
50
c3071951 51/* Enable use of tlbilx invalidate instructions.
f048aace 52 */
c3071951 53#define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000)
f048aace
BH
54
55/* This indicates that the processor cannot handle multiple outstanding
56 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
57 * around such invalidate forms.
58 */
59#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
60
2319f123
KG
61/* This indicates that the processor doesn't handle way selection
62 * properly and needs SW to track and update the LRU state. This
63 * is specific to an errata on e300c2/c3/c4 class parts
64 */
65#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
66
df5d6ecf
KG
67/* Enable use of TLB reservation. Processor should support tlbsrx.
68 * instruction and MAS0[WQ].
69 */
70#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
71
72/* Use paired MAS registers (MAS7||MAS3, etc.)
73 */
74#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
75
13b3d13b 76/* Doesn't support the B bit (1T segment) in SLBIE
44ae3ab3 77 */
13b3d13b 78#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
44ae3ab3
ME
79
80/* Support 16M large pages
81 */
82#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000)
83
84/* Supports TLBIEL variant
85 */
86#define MMU_FTR_TLBIEL ASM_CONST(0x08000000)
87
88/* Supports tlbies w/o locking
89 */
90#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000)
91
92/* Large pages can be marked CI
93 */
94#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000)
95
96/* 1T segments available
97 */
98#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
99
44ae3ab3
ME
100/* MMU feature bit sets for various CPUs */
101#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
102 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
103#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
accfad7d 104#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
44ae3ab3
ME
105#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
106#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
a32e252f 107#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
71e18497 108#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
c3ab300e 109#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
44ae3ab3
ME
110#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
111 MMU_FTR_CI_LARGE_PAGE
112#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
113 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
7c03d653 114#ifndef __ASSEMBLY__
4db73271 115#include <linux/bug.h>
7c03d653
BH
116#include <asm/cputable.h>
117
3160b097
BB
118#ifdef CONFIG_PPC_FSL_BOOK3E
119#include <asm/percpu.h>
120DECLARE_PER_CPU(int, next_tlbcam_idx);
121#endif
122
773edead
ME
123enum {
124 MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx |
125 MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E |
126 MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS |
127 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX |
128 MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU |
129 MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
130 MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
131 MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
accfad7d 132 MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
a8ed87c9 133#ifdef CONFIG_PPC_RADIX_MMU
5a25b6f5 134 MMU_FTR_TYPE_RADIX |
a8ed87c9
AK
135#endif
136 0,
773edead
ME
137};
138
a141cca3 139static inline bool early_mmu_has_feature(unsigned long feature)
7c03d653 140{
a81dc9d9 141 return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
7c03d653
BH
142}
143
c12e6f24
KH
144#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
145#include <linux/jump_label.h>
146
147#define NUM_MMU_FTR_KEYS 32
148
149extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
150
151extern void mmu_feature_keys_init(void);
152
153static __always_inline bool mmu_has_feature(unsigned long feature)
154{
155 int i;
156
157 BUILD_BUG_ON(!__builtin_constant_p(feature));
158
159 if (!(MMU_FTRS_POSSIBLE & feature))
160 return false;
161
162 i = __builtin_ctzl(feature);
163 return static_branch_likely(&mmu_feature_keys[i]);
164}
165
166static inline void mmu_clear_feature(unsigned long feature)
167{
168 int i;
169
170 i = __builtin_ctzl(feature);
171 cur_cpu_spec->mmu_features &= ~feature;
172 static_branch_disable(&mmu_feature_keys[i]);
173}
174#else
175
176static inline void mmu_feature_keys_init(void)
177{
178
179}
180
a141cca3
ME
181static inline bool mmu_has_feature(unsigned long feature)
182{
183 return early_mmu_has_feature(feature);
184}
185
91b191c7
DK
186static inline void mmu_clear_feature(unsigned long feature)
187{
188 cur_cpu_spec->mmu_features &= ~feature;
189}
c12e6f24 190#endif /* CONFIG_JUMP_LABEL */
91b191c7 191
7c03d653
BH
192extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
193
cd3db0c4
BH
194#ifdef CONFIG_PPC64
195/* This is our real memory area size on ppc64 server, on embedded, we
196 * make it match the size our of bolted TLB area
197 */
198extern u64 ppc64_rma_size;
199#endif /* CONFIG_PPC64 */
200
78f1dbde
AK
201struct mm_struct;
202#ifdef CONFIG_DEBUG_VM
203extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
204#else /* CONFIG_DEBUG_VM */
205static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
206{
207}
208#endif /* !CONFIG_DEBUG_VM */
209
bab4c8de
ME
210#ifdef CONFIG_PPC_RADIX_MMU
211static inline bool radix_enabled(void)
212{
213 return mmu_has_feature(MMU_FTR_TYPE_RADIX);
214}
a141cca3
ME
215
216static inline bool early_radix_enabled(void)
217{
218 return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
219}
bab4c8de
ME
220#else
221static inline bool radix_enabled(void)
222{
223 return false;
224}
a141cca3
ME
225
226static inline bool early_radix_enabled(void)
227{
228 return false;
229}
bab4c8de
ME
230#endif
231
7c03d653
BH
232#endif /* !__ASSEMBLY__ */
233
57e2a99f
BH
234/* The kernel use the constants below to index in the page sizes array.
235 * The use of fixed constants for this purpose is better for performances
236 * of the low level hash refill handlers.
237 *
238 * A non supported page size has a "shift" field set to 0
239 *
240 * Any new page size being implemented can get a new entry in here. Whether
241 * the kernel will use it or not is a different matter though. The actual page
242 * size used by hugetlbfs is not defined here and may be made variable
243 *
244 * Note: This array ended up being a false good idea as it's growing to the
245 * point where I wonder if we should replace it with something different,
246 * to think about, feedback welcome. --BenH.
247 */
248
a8b91e43 249/* These are #defines as they have to be used in assembly */
57e2a99f
BH
250#define MMU_PAGE_4K 0
251#define MMU_PAGE_16K 1
252#define MMU_PAGE_64K 2
253#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
254#define MMU_PAGE_256K 4
255#define MMU_PAGE_1M 5
28efc35f
SW
256#define MMU_PAGE_2M 6
257#define MMU_PAGE_4M 7
258#define MMU_PAGE_8M 8
259#define MMU_PAGE_16M 9
260#define MMU_PAGE_64M 10
261#define MMU_PAGE_256M 11
262#define MMU_PAGE_1G 12
263#define MMU_PAGE_16G 13
264#define MMU_PAGE_64G 14
265
266#define MMU_PAGE_COUNT 15
7c03d653 267
11a6f6ab
AK
268#ifdef CONFIG_PPC_BOOK3S_64
269#include <asm/book3s/64/mmu.h>
270#else /* CONFIG_PPC_BOOK3S_64 */
271
756d08d1
AK
272#ifndef __ASSEMBLY__
273/* MMU initialization */
274extern void early_init_mmu(void);
275extern void early_init_mmu_secondary(void);
276extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
277 phys_addr_t first_memblock_size);
1a01dc87 278static inline void mmu_early_init_devtree(void) { }
756d08d1 279#endif /* __ASSEMBLY__ */
11a6f6ab
AK
280#endif
281
282#if defined(CONFIG_PPC_STD_MMU_32)
4db68bfe 283/* 32-bit classic hash table MMU */
f64e8084 284#include <asm/book3s/32/mmu-hash.h>
4d922c8d
JB
285#elif defined(CONFIG_40x)
286/* 40x-style software loaded TLB */
287# include <asm/mmu-40x.h>
57d7909e
DG
288#elif defined(CONFIG_44x)
289/* 44x-style software loaded TLB */
290# include <asm/mmu-44x.h>
70fe3af8
KG
291#elif defined(CONFIG_PPC_BOOK3E_MMU)
292/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
293# include <asm/mmu-book3e.h>
31202345
DG
294#elif defined (CONFIG_PPC_8xx)
295/* Motorola/Freescale 8xx software loaded TLB */
296# include <asm/mmu-8xx.h>
1f8d419e 297#endif
1f8d419e 298
88ced031 299#endif /* __KERNEL__ */
047ea784 300#endif /* _ASM_POWERPC_MMU_H_ */