]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/include/asm/pgtable_32.h
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / arch / x86 / include / asm / pgtable_32.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PGTABLE_32_H
3#define _ASM_X86_PGTABLE_32_H
1da177e4 4
f402a65f 5#include <asm/pgtable_32_types.h>
1da177e4
LT
6
7/*
8 * The Linux memory management assumes a three-level page table setup. On
9 * the i386, we use that, but "fold" the mid level into the top-level page
10 * table, so that we physically have the same two-level page table as the
11 * i386 mmu expects.
12 *
13 * This file contains the functions and defines necessary to modify and use
14 * the i386 page table tree.
15 */
16#ifndef __ASSEMBLY__
17#include <asm/processor.h>
1da177e4 18#include <linux/threads.h>
da181a8b 19#include <asm/paravirt.h>
1da177e4 20
1977f032 21#include <linux/bitops.h>
1da177e4
LT
22#include <linux/list.h>
23#include <linux/spinlock.h>
24
8c65b4a6
TS
25struct mm_struct;
26struct vm_area_struct;
27
1da177e4 28extern pgd_t swapper_pg_dir[1024];
b40827fa 29extern pgd_t initial_page_table[1024];
1e620f9b 30extern pmd_t initial_pg_pmd[];
1da177e4 31
1da177e4 32void paging_init(void);
945fd17a 33void sync_initial_page_table(void);
1da177e4 34
1da177e4
LT
35/*
36 * Define this if things work differently on an i386 and an i486:
37 * it will (on an i486) warn about kernel memory accesses that are
96d4f267 38 * done without a 'access_ok( ..)'
1da177e4 39 */
e49332bd 40#undef TEST_ACCESS_OK
1da177e4 41
1da177e4
LT
42#ifdef CONFIG_X86_PAE
43# include <asm/pgtable-3level.h>
44#else
45# include <asm/pgtable-2level.h>
46#endif
47
1da177e4 48#if defined(CONFIG_HIGHPTE)
cf840147 49#define pte_offset_map(dir, address) \
ece0e2b6 50 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
cf840147 51 pte_index((address)))
ece0e2b6 52#define pte_unmap(pte) kunmap_atomic((pte))
1da177e4 53#else
cf840147
JP
54#define pte_offset_map(dir, address) \
55 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
1da177e4 56#define pte_unmap(pte) do { } while (0)
1da177e4
LT
57#endif
58
23002d88 59/* Clear a kernel PTE and flush it from the TLB */
cf840147
JP
60#define kpte_clear_flush(ptep, vaddr) \
61do { \
62 pte_clear(&init_mm, (vaddr), (ptep)); \
1299ef1d 63 __flush_tlb_one_kernel((vaddr)); \
23002d88
ZA
64} while (0)
65
1da177e4
LT
66#endif /* !__ASSEMBLY__ */
67
4757d7d8 68/*
43173265 69 * kern_addr_valid() is (1) for FLATMEM and (0) for SPARSEMEM
4757d7d8 70 */
05b79bdc 71#ifdef CONFIG_FLATMEM
1da177e4 72#define kern_addr_valid(addr) (1)
4757d7d8
TG
73#else
74#define kern_addr_valid(kaddr) (0)
75#endif
1da177e4 76
1e620f9b
BO
77/*
78 * This is how much memory in addition to the memory covered up to
79 * and including _end we need mapped initially.
80 * We need:
81 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
82 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
83 *
84 * Modulo rounding, each megabyte assigned here requires a kilobyte of
85 * memory, which is currently unreclaimed.
86 *
87 * This should be a multiple of a page.
88 *
89 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
90 * and small than max_low_pfn, otherwise will waste some page table entries
91 */
92#if PTRS_PER_PMD > 1
93#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
94#else
95#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
96#endif
97
98/*
99 * Number of possible pages in the lowmem region.
100 *
101 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
102 * gas warning about overflowing shift count when gas has been compiled
103 * with only a host target support using a 32-bit type for internal
104 * representation.
105 */
26515699 106#define LOWMEM_PAGES ((((_ULL(2)<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
1e620f9b 107
1965aae3 108#endif /* _ASM_X86_PGTABLE_32_H */