3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
27 #include <asm-generic/5level-fixup.h>
28 #include <linux/sched.h>
29 #include <linux/mm_types.h>
30 #include <linux/page-flags.h>
31 #include <linux/radix-tree.h>
32 #include <linux/atomic.h>
36 extern pgd_t swapper_pg_dir
[];
37 extern void paging_init(void);
38 extern void vmem_map_init(void);
39 pmd_t
*vmem_pmd_alloc(void);
40 pte_t
*vmem_pte_alloc(void);
49 extern atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
51 static inline void update_page_count(int level
, long count
)
53 if (IS_ENABLED(CONFIG_PROC_FS
))
54 atomic_long_add(count
, &direct_pages_count
[level
]);
58 void arch_report_meminfo(struct seq_file
*m
);
61 * The S390 doesn't have any external MMU info: the kernel page
62 * tables contain all the necessary information.
64 #define update_mmu_cache(vma, address, ptep) do { } while (0)
65 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
68 * ZERO_PAGE is a global shared page that is always zero; used
69 * for zero-mapped memory areas etc..
72 extern unsigned long empty_zero_page
;
73 extern unsigned long zero_page_mask
;
75 #define ZERO_PAGE(vaddr) \
76 (virt_to_page((void *)(empty_zero_page + \
77 (((unsigned long)(vaddr)) &zero_page_mask))))
78 #define __HAVE_COLOR_ZERO_PAGE
80 /* TODO: s390 cannot support io_remap_pfn_range... */
81 #endif /* !__ASSEMBLY__ */
84 * PMD_SHIFT determines the size of the area a second-level page
86 * PGDIR_SHIFT determines what a third-level page table entry can map
90 #define PGDIR_SHIFT 42
92 #define PMD_SIZE (1UL << PMD_SHIFT)
93 #define PMD_MASK (~(PMD_SIZE-1))
94 #define PUD_SIZE (1UL << PUD_SHIFT)
95 #define PUD_MASK (~(PUD_SIZE-1))
96 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
97 #define PGDIR_MASK (~(PGDIR_SIZE-1))
100 * entries per page directory level: the S390 is two-level, so
101 * we don't really have any PMD directory physically.
102 * for S390 segment-table entries are combined to one PGD
103 * that leads to 1024 pte per pgd
105 #define PTRS_PER_PTE 256
106 #define PTRS_PER_PMD 2048
107 #define PTRS_PER_PUD 2048
108 #define PTRS_PER_PGD 2048
110 #define FIRST_USER_ADDRESS 0UL
112 #define pte_ERROR(e) \
113 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
114 #define pmd_ERROR(e) \
115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
116 #define pud_ERROR(e) \
117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
118 #define pgd_ERROR(e) \
119 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
123 * The vmalloc and module area will always be on the topmost area of the
124 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
125 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
126 * modules will reside. That makes sure that inter module branches always
127 * happen without trampolines and in addition the placement within a 2GB frame
128 * is branch prediction unit friendly.
130 extern unsigned long VMALLOC_START
;
131 extern unsigned long VMALLOC_END
;
132 extern struct page
*vmemmap
;
134 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
136 extern unsigned long MODULES_VADDR
;
137 extern unsigned long MODULES_END
;
138 #define MODULES_VADDR MODULES_VADDR
139 #define MODULES_END MODULES_END
140 #define MODULES_LEN (1UL << 31)
142 static inline int is_module_addr(void *addr
)
144 BUILD_BUG_ON(MODULES_LEN
> (1UL << 31));
145 if (addr
< (void *)MODULES_VADDR
)
147 if (addr
> (void *)MODULES_END
)
153 * A 64 bit pagetable entry of S390 has following format:
155 * 0000000000111111111122222222223333333333444444444455555555556666
156 * 0123456789012345678901234567890123456789012345678901234567890123
158 * I Page-Invalid Bit: Page is not available for address-translation
159 * P Page-Protection Bit: Store access not possible for page
160 * C Change-bit override: HW is not required to set change bit
162 * A 64 bit segmenttable entry of S390 has following format:
163 * | P-table origin | TT
164 * 0000000000111111111122222222223333333333444444444455555555556666
165 * 0123456789012345678901234567890123456789012345678901234567890123
167 * I Segment-Invalid Bit: Segment is not available for address-translation
168 * C Common-Segment Bit: Segment is not private (PoP 3-30)
169 * P Page-Protection Bit: Store access not possible for page
172 * A 64 bit region table entry of S390 has following format:
173 * | S-table origin | TF TTTL
174 * 0000000000111111111122222222223333333333444444444455555555556666
175 * 0123456789012345678901234567890123456789012345678901234567890123
177 * I Segment-Invalid Bit: Segment is not available for address-translation
182 * The 64 bit regiontable origin of S390 has following format:
183 * | region table origon | DTTL
184 * 0000000000111111111122222222223333333333444444444455555555556666
185 * 0123456789012345678901234567890123456789012345678901234567890123
187 * X Space-Switch event:
188 * G Segment-Invalid Bit:
189 * P Private-Space Bit:
190 * S Storage-Alteration:
194 * A storage key has the following format:
198 * F : fetch protection bit
203 /* Hardware bits in the page table entry */
204 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
205 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
206 #define _PAGE_INVALID 0x400 /* HW invalid bit */
207 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
209 /* Software bits in the page table entry */
210 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
211 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
212 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
213 #define _PAGE_READ 0x010 /* SW pte read bit */
214 #define _PAGE_WRITE 0x020 /* SW pte write bit */
215 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
216 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
217 #define __HAVE_ARCH_PTE_SPECIAL
219 #ifdef CONFIG_MEM_SOFT_DIRTY
220 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
222 #define _PAGE_SOFT_DIRTY 0x000
225 /* Set of bits not changed in pte_modify */
226 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
227 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
230 * handle_pte_fault uses pte_present and pte_none to find out the pte type
231 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
232 * distinguish present from not-present ptes. It is changed only with the page
235 * The following table gives the different possible bit combinations for
236 * the pte hardware and software bits in the last 12 bits of a pte
237 * (. unassigned bit, x don't care, t swap type):
245 * prot-none, clean, old .11.xx0000.1
246 * prot-none, clean, young .11.xx0001.1
247 * prot-none, dirty, old .11.xx0010.1
248 * prot-none, dirty, young .11.xx0011.1
249 * read-only, clean, old .11.xx0100.1
250 * read-only, clean, young .01.xx0101.1
251 * read-only, dirty, old .11.xx0110.1
252 * read-only, dirty, young .01.xx0111.1
253 * read-write, clean, old .11.xx1100.1
254 * read-write, clean, young .01.xx1101.1
255 * read-write, dirty, old .10.xx1110.1
256 * read-write, dirty, young .00.xx1111.1
257 * HW-bits: R read-only, I invalid
258 * SW-bits: p present, y young, d dirty, r read, w write, s special,
261 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
262 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
263 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
266 /* Bits in the segment/region table address-space-control-element */
267 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
268 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
269 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
270 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
271 #define _ASCE_REAL_SPACE 0x20 /* real space control */
272 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
273 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
274 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
275 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
276 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
277 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
279 /* Bits in the region table entry */
280 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
281 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
282 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
283 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
284 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
285 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
286 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
287 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
288 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
289 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
291 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
292 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
293 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
294 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
295 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
296 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
298 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
299 #define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */
301 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
302 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
303 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
304 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
305 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
307 #ifdef CONFIG_MEM_SOFT_DIRTY
308 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
310 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
313 #define _REGION_ENTRY_BITS 0xfffffffffffff227UL
314 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL
316 /* Bits in the segment table entry */
317 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
318 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
319 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
320 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
321 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
322 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
323 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
325 #define _SEGMENT_ENTRY (0)
326 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
328 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
329 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
330 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
331 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
332 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
334 #ifdef CONFIG_MEM_SOFT_DIRTY
335 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
337 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
341 * Segment table and region3 table entry encoding
342 * (R = read-only, I = invalid, y = young bit):
344 * prot-none, clean, old 00..1...1...00
345 * prot-none, clean, young 01..1...1...00
346 * prot-none, dirty, old 10..1...1...00
347 * prot-none, dirty, young 11..1...1...00
348 * read-only, clean, old 00..1...1...01
349 * read-only, clean, young 01..1...0...01
350 * read-only, dirty, old 10..1...1...01
351 * read-only, dirty, young 11..1...0...01
352 * read-write, clean, old 00..1...1...11
353 * read-write, clean, young 01..1...0...11
354 * read-write, dirty, old 10..0...1...11
355 * read-write, dirty, young 11..0...0...11
356 * The segment table origin is used to distinguish empty (origin==0) from
357 * read-write, old segment table entries (origin!=0)
358 * HW-bits: R read-only, I invalid
359 * SW-bits: y young, d dirty, r read, w write
362 /* Page status table bits for virtualization */
363 #define PGSTE_ACC_BITS 0xf000000000000000UL
364 #define PGSTE_FP_BIT 0x0800000000000000UL
365 #define PGSTE_PCL_BIT 0x0080000000000000UL
366 #define PGSTE_HR_BIT 0x0040000000000000UL
367 #define PGSTE_HC_BIT 0x0020000000000000UL
368 #define PGSTE_GR_BIT 0x0004000000000000UL
369 #define PGSTE_GC_BIT 0x0002000000000000UL
370 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
371 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
372 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
374 /* Guest Page State used for virtualization */
375 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
376 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
377 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
378 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
381 * A user page table pointer has the space-switch-event bit, the
382 * private-space-control bit and the storage-alteration-event-control
383 * bit set. A kernel page table pointer doesn't need them.
385 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
389 * Page protection definitions.
391 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
392 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
393 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
394 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
395 _PAGE_INVALID | _PAGE_PROTECT)
396 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
397 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
398 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
399 _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
402 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
403 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
404 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
405 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
406 _PAGE_PROTECT | _PAGE_NOEXEC)
407 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_YOUNG | _PAGE_DIRTY)
411 * On s390 the page table entry has an invalid bit and a read-only bit.
412 * Read permission implies execute permission and write permission
413 * implies read permission.
416 #define __P000 PAGE_NONE
417 #define __P001 PAGE_RO
418 #define __P010 PAGE_RO
419 #define __P011 PAGE_RO
420 #define __P100 PAGE_RX
421 #define __P101 PAGE_RX
422 #define __P110 PAGE_RX
423 #define __P111 PAGE_RX
425 #define __S000 PAGE_NONE
426 #define __S001 PAGE_RO
427 #define __S010 PAGE_RW
428 #define __S011 PAGE_RW
429 #define __S100 PAGE_RX
430 #define __S101 PAGE_RX
431 #define __S110 PAGE_RWX
432 #define __S111 PAGE_RWX
435 * Segment entry (large page) protection definitions.
437 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
438 _SEGMENT_ENTRY_PROTECT)
439 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
440 _SEGMENT_ENTRY_READ | \
441 _SEGMENT_ENTRY_NOEXEC)
442 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
444 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
445 _SEGMENT_ENTRY_WRITE | \
446 _SEGMENT_ENTRY_NOEXEC)
447 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
448 _SEGMENT_ENTRY_WRITE)
449 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
450 _SEGMENT_ENTRY_LARGE | \
451 _SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_WRITE | \
453 _SEGMENT_ENTRY_YOUNG | \
454 _SEGMENT_ENTRY_DIRTY | \
455 _SEGMENT_ENTRY_NOEXEC)
456 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
457 _SEGMENT_ENTRY_LARGE | \
458 _SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_YOUNG | \
460 _SEGMENT_ENTRY_PROTECT | \
461 _SEGMENT_ENTRY_NOEXEC)
464 * Region3 entry (large page) protection definitions.
467 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
468 _REGION3_ENTRY_LARGE | \
469 _REGION3_ENTRY_READ | \
470 _REGION3_ENTRY_WRITE | \
471 _REGION3_ENTRY_YOUNG | \
472 _REGION3_ENTRY_DIRTY | \
473 _REGION_ENTRY_NOEXEC)
474 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
475 _REGION3_ENTRY_LARGE | \
476 _REGION3_ENTRY_READ | \
477 _REGION3_ENTRY_YOUNG | \
478 _REGION_ENTRY_PROTECT | \
479 _REGION_ENTRY_NOEXEC)
481 static inline int mm_has_pgste(struct mm_struct
*mm
)
484 if (unlikely(mm
->context
.has_pgste
))
490 static inline int mm_alloc_pgste(struct mm_struct
*mm
)
493 if (unlikely(mm
->context
.alloc_pgste
))
500 * In the case that a guest uses storage keys
501 * faults should no longer be backed by zero pages
503 #define mm_forbids_zeropage mm_use_skey
504 static inline int mm_use_skey(struct mm_struct
*mm
)
507 if (mm
->context
.use_skey
)
513 static inline void csp(unsigned int *ptr
, unsigned int old
, unsigned int new)
515 register unsigned long reg2
asm("2") = old
;
516 register unsigned long reg3
asm("3") = new;
517 unsigned long address
= (unsigned long)ptr
| 1;
521 : "+d" (reg2
), "+m" (*ptr
)
522 : "d" (reg3
), "d" (address
)
526 static inline void cspg(unsigned long *ptr
, unsigned long old
, unsigned long new)
528 register unsigned long reg2
asm("2") = old
;
529 register unsigned long reg3
asm("3") = new;
530 unsigned long address
= (unsigned long)ptr
| 1;
533 " .insn rre,0xb98a0000,%0,%3"
534 : "+d" (reg2
), "+m" (*ptr
)
535 : "d" (reg3
), "d" (address
)
539 #define CRDTE_DTT_PAGE 0x00UL
540 #define CRDTE_DTT_SEGMENT 0x10UL
541 #define CRDTE_DTT_REGION3 0x14UL
542 #define CRDTE_DTT_REGION2 0x18UL
543 #define CRDTE_DTT_REGION1 0x1cUL
545 static inline void crdte(unsigned long old
, unsigned long new,
546 unsigned long table
, unsigned long dtt
,
547 unsigned long address
, unsigned long asce
)
549 register unsigned long reg2
asm("2") = old
;
550 register unsigned long reg3
asm("3") = new;
551 register unsigned long reg4
asm("4") = table
| dtt
;
552 register unsigned long reg5
asm("5") = address
;
554 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
556 : "d" (reg3
), "d" (reg4
), "d" (reg5
), "a" (asce
)
561 * pgd/pmd/pte query functions
563 static inline int pgd_present(pgd_t pgd
)
565 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
567 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
570 static inline int pgd_none(pgd_t pgd
)
572 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
574 return (pgd_val(pgd
) & _REGION_ENTRY_INVALID
) != 0UL;
577 static inline int pgd_bad(pgd_t pgd
)
580 * With dynamic page table levels the pgd can be a region table
581 * entry or a segment table entry. Check for the bit that are
582 * invalid for either table entry.
585 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
586 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
587 return (pgd_val(pgd
) & mask
) != 0;
590 static inline int pud_present(pud_t pud
)
592 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
594 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
597 static inline int pud_none(pud_t pud
)
599 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
601 return pud_val(pud
) == _REGION3_ENTRY_EMPTY
;
604 static inline int pud_large(pud_t pud
)
606 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
608 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
611 static inline unsigned long pud_pfn(pud_t pud
)
613 unsigned long origin_mask
;
615 origin_mask
= _REGION3_ENTRY_ORIGIN
;
617 origin_mask
= _REGION3_ENTRY_ORIGIN_LARGE
;
618 return (pud_val(pud
) & origin_mask
) >> PAGE_SHIFT
;
621 static inline int pmd_large(pmd_t pmd
)
623 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) != 0;
626 static inline int pmd_bad(pmd_t pmd
)
629 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS_LARGE
) != 0;
630 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS
) != 0;
633 static inline int pud_bad(pud_t pud
)
635 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
636 return pmd_bad(__pmd(pud_val(pud
)));
638 return (pud_val(pud
) & ~_REGION_ENTRY_BITS_LARGE
) != 0;
639 return (pud_val(pud
) & ~_REGION_ENTRY_BITS
) != 0;
642 static inline int pmd_present(pmd_t pmd
)
644 return pmd_val(pmd
) != _SEGMENT_ENTRY_EMPTY
;
647 static inline int pmd_none(pmd_t pmd
)
649 return pmd_val(pmd
) == _SEGMENT_ENTRY_EMPTY
;
652 static inline unsigned long pmd_pfn(pmd_t pmd
)
654 unsigned long origin_mask
;
656 origin_mask
= _SEGMENT_ENTRY_ORIGIN
;
658 origin_mask
= _SEGMENT_ENTRY_ORIGIN_LARGE
;
659 return (pmd_val(pmd
) & origin_mask
) >> PAGE_SHIFT
;
662 #define __HAVE_ARCH_PMD_WRITE
663 static inline int pmd_write(pmd_t pmd
)
665 return (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
) != 0;
668 static inline int pmd_dirty(pmd_t pmd
)
672 dirty
= (pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
) != 0;
676 static inline int pmd_young(pmd_t pmd
)
680 young
= (pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
) != 0;
684 static inline int pte_present(pte_t pte
)
686 /* Bit pattern: (pte & 0x001) == 0x001 */
687 return (pte_val(pte
) & _PAGE_PRESENT
) != 0;
690 static inline int pte_none(pte_t pte
)
692 /* Bit pattern: pte == 0x400 */
693 return pte_val(pte
) == _PAGE_INVALID
;
696 static inline int pte_swap(pte_t pte
)
698 /* Bit pattern: (pte & 0x201) == 0x200 */
699 return (pte_val(pte
) & (_PAGE_PROTECT
| _PAGE_PRESENT
))
703 static inline int pte_special(pte_t pte
)
705 return (pte_val(pte
) & _PAGE_SPECIAL
);
708 #define __HAVE_ARCH_PTE_SAME
709 static inline int pte_same(pte_t a
, pte_t b
)
711 return pte_val(a
) == pte_val(b
);
714 #ifdef CONFIG_NUMA_BALANCING
715 static inline int pte_protnone(pte_t pte
)
717 return pte_present(pte
) && !(pte_val(pte
) & _PAGE_READ
);
720 static inline int pmd_protnone(pmd_t pmd
)
722 /* pmd_large(pmd) implies pmd_present(pmd) */
723 return pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_READ
);
727 static inline int pte_soft_dirty(pte_t pte
)
729 return pte_val(pte
) & _PAGE_SOFT_DIRTY
;
731 #define pte_swp_soft_dirty pte_soft_dirty
733 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
735 pte_val(pte
) |= _PAGE_SOFT_DIRTY
;
738 #define pte_swp_mksoft_dirty pte_mksoft_dirty
740 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
742 pte_val(pte
) &= ~_PAGE_SOFT_DIRTY
;
745 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
747 static inline int pmd_soft_dirty(pmd_t pmd
)
749 return pmd_val(pmd
) & _SEGMENT_ENTRY_SOFT_DIRTY
;
752 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
754 pmd_val(pmd
) |= _SEGMENT_ENTRY_SOFT_DIRTY
;
758 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
760 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_SOFT_DIRTY
;
765 * query functions pte_write/pte_dirty/pte_young only work if
766 * pte_present() is true. Undefined behaviour if not..
768 static inline int pte_write(pte_t pte
)
770 return (pte_val(pte
) & _PAGE_WRITE
) != 0;
773 static inline int pte_dirty(pte_t pte
)
775 return (pte_val(pte
) & _PAGE_DIRTY
) != 0;
778 static inline int pte_young(pte_t pte
)
780 return (pte_val(pte
) & _PAGE_YOUNG
) != 0;
783 #define __HAVE_ARCH_PTE_UNUSED
784 static inline int pte_unused(pte_t pte
)
786 return pte_val(pte
) & _PAGE_UNUSED
;
790 * pgd/pmd/pte modification functions
793 static inline void pgd_clear(pgd_t
*pgd
)
795 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
796 pgd_val(*pgd
) = _REGION2_ENTRY_EMPTY
;
799 static inline void pud_clear(pud_t
*pud
)
801 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
802 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
805 static inline void pmd_clear(pmd_t
*pmdp
)
807 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
810 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
812 pte_val(*ptep
) = _PAGE_INVALID
;
816 * The following pte modification functions only work if
817 * pte_present() is true. Undefined behaviour if not..
819 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
821 pte_val(pte
) &= _PAGE_CHG_MASK
;
822 pte_val(pte
) |= pgprot_val(newprot
);
824 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
825 * has the invalid bit set, clear it again for readable, young pages
827 if ((pte_val(pte
) & _PAGE_YOUNG
) && (pte_val(pte
) & _PAGE_READ
))
828 pte_val(pte
) &= ~_PAGE_INVALID
;
830 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
831 * protection bit set, clear it again for writable, dirty pages
833 if ((pte_val(pte
) & _PAGE_DIRTY
) && (pte_val(pte
) & _PAGE_WRITE
))
834 pte_val(pte
) &= ~_PAGE_PROTECT
;
838 static inline pte_t
pte_wrprotect(pte_t pte
)
840 pte_val(pte
) &= ~_PAGE_WRITE
;
841 pte_val(pte
) |= _PAGE_PROTECT
;
845 static inline pte_t
pte_mkwrite(pte_t pte
)
847 pte_val(pte
) |= _PAGE_WRITE
;
848 if (pte_val(pte
) & _PAGE_DIRTY
)
849 pte_val(pte
) &= ~_PAGE_PROTECT
;
853 static inline pte_t
pte_mkclean(pte_t pte
)
855 pte_val(pte
) &= ~_PAGE_DIRTY
;
856 pte_val(pte
) |= _PAGE_PROTECT
;
860 static inline pte_t
pte_mkdirty(pte_t pte
)
862 pte_val(pte
) |= _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
;
863 if (pte_val(pte
) & _PAGE_WRITE
)
864 pte_val(pte
) &= ~_PAGE_PROTECT
;
868 static inline pte_t
pte_mkold(pte_t pte
)
870 pte_val(pte
) &= ~_PAGE_YOUNG
;
871 pte_val(pte
) |= _PAGE_INVALID
;
875 static inline pte_t
pte_mkyoung(pte_t pte
)
877 pte_val(pte
) |= _PAGE_YOUNG
;
878 if (pte_val(pte
) & _PAGE_READ
)
879 pte_val(pte
) &= ~_PAGE_INVALID
;
883 static inline pte_t
pte_mkspecial(pte_t pte
)
885 pte_val(pte
) |= _PAGE_SPECIAL
;
889 #ifdef CONFIG_HUGETLB_PAGE
890 static inline pte_t
pte_mkhuge(pte_t pte
)
892 pte_val(pte
) |= _PAGE_LARGE
;
897 #define IPTE_GLOBAL 0
900 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
, int local
)
902 unsigned long pto
= (unsigned long) ptep
;
904 /* Invalidation + TLB flush for the pte */
906 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
907 : "+m" (*ptep
) : [r1
] "a" (pto
), [r2
] "a" (address
),
911 static inline void __ptep_ipte_range(unsigned long address
, int nr
,
912 pte_t
*ptep
, int local
)
914 unsigned long pto
= (unsigned long) ptep
;
916 /* Invalidate a range of ptes + TLB flush of the ptes */
919 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
920 : [r2
] "+a" (address
), [r3
] "+a" (nr
)
921 : [r1
] "a" (pto
), [m4
] "i" (local
) : "memory");
926 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
927 * both clear the TLB for the unmapped pte. The reason is that
928 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
929 * to modify an active pte. The sequence is
930 * 1) ptep_get_and_clear
933 * On s390 the tlb needs to get flushed with the modification of the pte
934 * if the pte is active. The only way how this can be implemented is to
935 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
938 pte_t
ptep_xchg_direct(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
939 pte_t
ptep_xchg_lazy(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
941 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
942 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
943 unsigned long addr
, pte_t
*ptep
)
947 pte
= ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, pte_mkold(pte
));
948 return pte_young(pte
);
951 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
952 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
953 unsigned long address
, pte_t
*ptep
)
955 return ptep_test_and_clear_young(vma
, address
, ptep
);
958 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
959 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
960 unsigned long addr
, pte_t
*ptep
)
962 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
965 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
966 pte_t
ptep_modify_prot_start(struct mm_struct
*, unsigned long, pte_t
*);
967 void ptep_modify_prot_commit(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
969 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
970 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
971 unsigned long addr
, pte_t
*ptep
)
973 return ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
977 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
978 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
979 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
980 * cannot be accessed while the batched unmap is running. In this case
981 * full==1 and a simple pte_clear is enough. See tlb.h.
983 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
984 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
986 pte_t
*ptep
, int full
)
990 *ptep
= __pte(_PAGE_INVALID
);
993 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
996 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
997 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
998 unsigned long addr
, pte_t
*ptep
)
1003 ptep_xchg_lazy(mm
, addr
, ptep
, pte_wrprotect(pte
));
1006 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1007 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1008 unsigned long addr
, pte_t
*ptep
,
1009 pte_t entry
, int dirty
)
1011 if (pte_same(*ptep
, entry
))
1013 ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, entry
);
1018 * Additional functions to handle KVM guest page tables
1020 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1021 pte_t
*ptep
, pte_t entry
);
1022 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1023 void ptep_notify(struct mm_struct
*mm
, unsigned long addr
,
1024 pte_t
*ptep
, unsigned long bits
);
1025 int ptep_force_prot(struct mm_struct
*mm
, unsigned long gaddr
,
1026 pte_t
*ptep
, int prot
, unsigned long bit
);
1027 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
1028 pte_t
*ptep
, int reset
);
1029 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1030 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
1031 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
);
1032 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
);
1034 bool test_and_clear_guest_dirty(struct mm_struct
*mm
, unsigned long address
);
1035 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1036 unsigned char key
, bool nq
);
1037 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1038 unsigned char key
, unsigned char *oldkey
,
1039 bool nq
, bool mr
, bool mc
);
1040 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
);
1041 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1042 unsigned char *key
);
1045 * Certain architectures need to do special things when PTEs
1046 * within a page table are directly modified. Thus, the following
1047 * hook is made available.
1049 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1050 pte_t
*ptep
, pte_t entry
)
1052 if (!MACHINE_HAS_NX
)
1053 pte_val(entry
) &= ~_PAGE_NOEXEC
;
1054 if (pte_present(entry
))
1055 pte_val(entry
) &= ~_PAGE_UNUSED
;
1056 if (mm_has_pgste(mm
))
1057 ptep_set_pte_at(mm
, addr
, ptep
, entry
);
1063 * Conversion functions: convert a page and protection to a page entry,
1064 * and a page entry and page directory to the page they refer to.
1066 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1069 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1070 return pte_mkyoung(__pte
);
1073 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1075 unsigned long physpage
= page_to_phys(page
);
1076 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1078 if (pte_write(__pte
) && PageDirty(page
))
1079 __pte
= pte_mkdirty(__pte
);
1083 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1084 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1085 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1086 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1088 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1089 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1091 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1092 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1093 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1095 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
1097 pud_t
*pud
= (pud_t
*) pgd
;
1098 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1099 pud
= (pud_t
*) pgd_deref(*pgd
);
1100 return pud
+ pud_index(address
);
1103 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1105 pmd_t
*pmd
= (pmd_t
*) pud
;
1106 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1107 pmd
= (pmd_t
*) pud_deref(*pud
);
1108 return pmd
+ pmd_index(address
);
1111 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1112 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1113 #define pte_page(x) pfn_to_page(pte_pfn(x))
1115 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1116 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1118 /* Find an entry in the lowest level page table.. */
1119 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1120 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1121 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1122 #define pte_unmap(pte) do { } while (0)
1124 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1126 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_WRITE
;
1127 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1131 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1133 pmd_val(pmd
) |= _SEGMENT_ENTRY_WRITE
;
1134 if (pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1136 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1140 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
1142 if (pmd_large(pmd
)) {
1143 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_DIRTY
;
1144 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1149 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1151 if (pmd_large(pmd
)) {
1152 pmd_val(pmd
) |= _SEGMENT_ENTRY_DIRTY
|
1153 _SEGMENT_ENTRY_SOFT_DIRTY
;
1154 if (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
)
1155 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1160 static inline pud_t
pud_wrprotect(pud_t pud
)
1162 pud_val(pud
) &= ~_REGION3_ENTRY_WRITE
;
1163 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1167 static inline pud_t
pud_mkwrite(pud_t pud
)
1169 pud_val(pud
) |= _REGION3_ENTRY_WRITE
;
1170 if (pud_large(pud
) && !(pud_val(pud
) & _REGION3_ENTRY_DIRTY
))
1172 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1176 static inline pud_t
pud_mkclean(pud_t pud
)
1178 if (pud_large(pud
)) {
1179 pud_val(pud
) &= ~_REGION3_ENTRY_DIRTY
;
1180 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1185 static inline pud_t
pud_mkdirty(pud_t pud
)
1187 if (pud_large(pud
)) {
1188 pud_val(pud
) |= _REGION3_ENTRY_DIRTY
|
1189 _REGION3_ENTRY_SOFT_DIRTY
;
1190 if (pud_val(pud
) & _REGION3_ENTRY_WRITE
)
1191 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1196 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1197 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1200 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1201 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1203 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1204 return pgprot_val(SEGMENT_NONE
);
1205 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RO
))
1206 return pgprot_val(SEGMENT_RO
);
1207 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RX
))
1208 return pgprot_val(SEGMENT_RX
);
1209 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RW
))
1210 return pgprot_val(SEGMENT_RW
);
1211 return pgprot_val(SEGMENT_RWX
);
1214 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1216 if (pmd_large(pmd
)) {
1217 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1218 if (pmd_val(pmd
) & _SEGMENT_ENTRY_READ
)
1219 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_INVALID
;
1224 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1226 if (pmd_large(pmd
)) {
1227 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_YOUNG
;
1228 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1233 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1235 if (pmd_large(pmd
)) {
1236 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN_LARGE
|
1237 _SEGMENT_ENTRY_DIRTY
| _SEGMENT_ENTRY_YOUNG
|
1238 _SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_SOFT_DIRTY
;
1239 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1240 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1241 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1242 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
))
1243 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1246 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN
;
1247 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1251 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1254 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1258 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1260 static inline void __pmdp_csp(pmd_t
*pmdp
)
1262 csp((unsigned int *)pmdp
+ 1, pmd_val(*pmdp
),
1263 pmd_val(*pmdp
) | _SEGMENT_ENTRY_INVALID
);
1266 #define IDTE_GLOBAL 0
1267 #define IDTE_LOCAL 1
1269 static inline void __pmdp_idte(unsigned long address
, pmd_t
*pmdp
, int local
)
1273 sto
= (unsigned long) pmdp
- pmd_index(address
) * sizeof(pmd_t
);
1275 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1277 : [r1
] "a" (sto
), [r2
] "a" ((address
& HPAGE_MASK
)),
1282 static inline void __pudp_idte(unsigned long address
, pud_t
*pudp
, int local
)
1286 r3o
= (unsigned long) pudp
- pud_index(address
) * sizeof(pud_t
);
1287 r3o
|= _ASCE_TYPE_REGION3
;
1289 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1291 : [r1
] "a" (r3o
), [r2
] "a" ((address
& PUD_MASK
)),
1296 pmd_t
pmdp_xchg_direct(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1297 pmd_t
pmdp_xchg_lazy(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1298 pud_t
pudp_xchg_direct(struct mm_struct
*, unsigned long, pud_t
*, pud_t
);
1300 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1302 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1303 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1306 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1307 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1309 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1310 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1311 unsigned long addr
, pmd_t
*pmdp
,
1312 pmd_t entry
, int dirty
)
1314 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1316 entry
= pmd_mkyoung(entry
);
1318 entry
= pmd_mkdirty(entry
);
1319 if (pmd_val(*pmdp
) == pmd_val(entry
))
1321 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, entry
);
1325 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1326 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1327 unsigned long addr
, pmd_t
*pmdp
)
1331 pmd
= pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, pmd_mkold(pmd
));
1332 return pmd_young(pmd
);
1335 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1336 static inline int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1337 unsigned long addr
, pmd_t
*pmdp
)
1339 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1340 return pmdp_test_and_clear_young(vma
, addr
, pmdp
);
1343 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1344 pmd_t
*pmdp
, pmd_t entry
)
1346 if (!MACHINE_HAS_NX
)
1347 pmd_val(entry
) &= ~_SEGMENT_ENTRY_NOEXEC
;
1351 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1353 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1354 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1355 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1359 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1360 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
1361 unsigned long addr
, pmd_t
*pmdp
)
1363 return pmdp_xchg_direct(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1366 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1367 static inline pmd_t
pmdp_huge_get_and_clear_full(struct mm_struct
*mm
,
1369 pmd_t
*pmdp
, int full
)
1373 *pmdp
= __pmd(_SEGMENT_ENTRY_EMPTY
);
1376 return pmdp_xchg_lazy(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1379 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1380 static inline pmd_t
pmdp_huge_clear_flush(struct vm_area_struct
*vma
,
1381 unsigned long addr
, pmd_t
*pmdp
)
1383 return pmdp_huge_get_and_clear(vma
->vm_mm
, addr
, pmdp
);
1386 #define __HAVE_ARCH_PMDP_INVALIDATE
1387 static inline void pmdp_invalidate(struct vm_area_struct
*vma
,
1388 unsigned long addr
, pmd_t
*pmdp
)
1390 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1393 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1394 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1395 unsigned long addr
, pmd_t
*pmdp
)
1400 pmd
= pmdp_xchg_lazy(mm
, addr
, pmdp
, pmd_wrprotect(pmd
));
1403 static inline pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
1404 unsigned long address
,
1407 return pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1409 #define pmdp_collapse_flush pmdp_collapse_flush
1411 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1412 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1414 static inline int pmd_trans_huge(pmd_t pmd
)
1416 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1419 #define has_transparent_hugepage has_transparent_hugepage
1420 static inline int has_transparent_hugepage(void)
1422 return MACHINE_HAS_EDAT1
? 1 : 0;
1424 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1427 * 64 bit swap entry format:
1428 * A page-table entry has some bits we have to treat in a special way.
1429 * Bits 52 and bit 55 have to be zero, otherwise a specification
1430 * exception will occur instead of a page translation exception. The
1431 * specification exception has the bad habit not to store necessary
1432 * information in the lowcore.
1433 * Bits 54 and 63 are used to indicate the page type.
1434 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1435 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1436 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1438 * | offset |01100|type |00|
1439 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1440 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1443 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1444 #define __SWP_OFFSET_SHIFT 12
1445 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1446 #define __SWP_TYPE_SHIFT 2
1448 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1452 pte_val(pte
) = _PAGE_INVALID
| _PAGE_PROTECT
;
1453 pte_val(pte
) |= (offset
& __SWP_OFFSET_MASK
) << __SWP_OFFSET_SHIFT
;
1454 pte_val(pte
) |= (type
& __SWP_TYPE_MASK
) << __SWP_TYPE_SHIFT
;
1458 static inline unsigned long __swp_type(swp_entry_t entry
)
1460 return (entry
.val
>> __SWP_TYPE_SHIFT
) & __SWP_TYPE_MASK
;
1463 static inline unsigned long __swp_offset(swp_entry_t entry
)
1465 return (entry
.val
>> __SWP_OFFSET_SHIFT
) & __SWP_OFFSET_MASK
;
1468 static inline swp_entry_t
__swp_entry(unsigned long type
, unsigned long offset
)
1470 return (swp_entry_t
) { pte_val(mk_swap_pte(type
, offset
)) };
1473 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1474 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1476 #endif /* !__ASSEMBLY__ */
1478 #define kern_addr_valid(addr) (1)
1480 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1481 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1482 extern int s390_enable_sie(void);
1483 extern int s390_enable_skey(void);
1484 extern void s390_reset_cmma(struct mm_struct
*mm
);
1486 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1487 #define HAVE_ARCH_UNMAPPED_AREA
1488 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1491 * No page table caches to initialise
1493 static inline void pgtable_cache_init(void) { }
1494 static inline void check_pgt_cache(void) { }
1496 #include <asm-generic/pgtable.h>
1498 #endif /* _S390_PAGE_H */