]> git.ipfire.org Git - people/ms/linux.git/blob - arch/x86/power/hibernate_32.c
mm: remove include/linux/bootmem.h
[people/ms/linux.git] / arch / x86 / power / hibernate_32.c
1 /*
2 * Hibernation support specific for i386 - temporary page tables
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
7 */
8
9 #include <linux/gfp.h>
10 #include <linux/suspend.h>
11 #include <linux/memblock.h>
12
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15 #include <asm/mmzone.h>
16 #include <asm/sections.h>
17 #include <asm/suspend.h>
18
19 /* Pointer to the temporary resume page tables */
20 pgd_t *resume_pg_dir;
21
22 /* The following three functions are based on the analogous code in
23 * arch/x86/mm/init_32.c
24 */
25
26 /*
27 * Create a middle page table on a resume-safe page and put a pointer to it in
28 * the given global directory entry. This only returns the gd entry
29 * in non-PAE compilation mode, since the middle layer is folded.
30 */
31 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
32 {
33 p4d_t *p4d;
34 pud_t *pud;
35 pmd_t *pmd_table;
36
37 #ifdef CONFIG_X86_PAE
38 pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
39 if (!pmd_table)
40 return NULL;
41
42 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
43 p4d = p4d_offset(pgd, 0);
44 pud = pud_offset(p4d, 0);
45
46 BUG_ON(pmd_table != pmd_offset(pud, 0));
47 #else
48 p4d = p4d_offset(pgd, 0);
49 pud = pud_offset(p4d, 0);
50 pmd_table = pmd_offset(pud, 0);
51 #endif
52
53 return pmd_table;
54 }
55
56 /*
57 * Create a page table on a resume-safe page and place a pointer to it in
58 * a middle page directory entry.
59 */
60 static pte_t *resume_one_page_table_init(pmd_t *pmd)
61 {
62 if (pmd_none(*pmd)) {
63 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
64 if (!page_table)
65 return NULL;
66
67 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
68
69 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
70
71 return page_table;
72 }
73
74 return pte_offset_kernel(pmd, 0);
75 }
76
77 /*
78 * This maps the physical memory to kernel virtual address space, a total
79 * of max_low_pfn pages, by creating page tables starting from address
80 * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
81 */
82 static int resume_physical_mapping_init(pgd_t *pgd_base)
83 {
84 unsigned long pfn;
85 pgd_t *pgd;
86 pmd_t *pmd;
87 pte_t *pte;
88 int pgd_idx, pmd_idx;
89
90 pgd_idx = pgd_index(PAGE_OFFSET);
91 pgd = pgd_base + pgd_idx;
92 pfn = 0;
93
94 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
95 pmd = resume_one_md_table_init(pgd);
96 if (!pmd)
97 return -ENOMEM;
98
99 if (pfn >= max_low_pfn)
100 continue;
101
102 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
103 if (pfn >= max_low_pfn)
104 break;
105
106 /* Map with big pages if possible, otherwise create
107 * normal page tables.
108 * NOTE: We can mark everything as executable here
109 */
110 if (boot_cpu_has(X86_FEATURE_PSE)) {
111 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
112 pfn += PTRS_PER_PTE;
113 } else {
114 pte_t *max_pte;
115
116 pte = resume_one_page_table_init(pmd);
117 if (!pte)
118 return -ENOMEM;
119
120 max_pte = pte + PTRS_PER_PTE;
121 for (; pte < max_pte; pte++, pfn++) {
122 if (pfn >= max_low_pfn)
123 break;
124
125 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
126 }
127 }
128 }
129 }
130
131 return 0;
132 }
133
134 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
135 {
136 #ifdef CONFIG_X86_PAE
137 int i;
138
139 /* Init entries of the first-level page table to the zero page */
140 for (i = 0; i < PTRS_PER_PGD; i++)
141 set_pgd(pg_dir + i,
142 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
143 #endif
144 }
145
146 static int set_up_temporary_text_mapping(pgd_t *pgd_base)
147 {
148 pgd_t *pgd;
149 pmd_t *pmd;
150 pte_t *pte;
151
152 pgd = pgd_base + pgd_index(restore_jump_address);
153
154 pmd = resume_one_md_table_init(pgd);
155 if (!pmd)
156 return -ENOMEM;
157
158 if (boot_cpu_has(X86_FEATURE_PSE)) {
159 set_pmd(pmd + pmd_index(restore_jump_address),
160 __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
161 } else {
162 pte = resume_one_page_table_init(pmd);
163 if (!pte)
164 return -ENOMEM;
165 set_pte(pte + pte_index(restore_jump_address),
166 __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
167 }
168
169 return 0;
170 }
171
172 asmlinkage int swsusp_arch_resume(void)
173 {
174 int error;
175
176 resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
177 if (!resume_pg_dir)
178 return -ENOMEM;
179
180 resume_init_first_level_page_table(resume_pg_dir);
181
182 error = set_up_temporary_text_mapping(resume_pg_dir);
183 if (error)
184 return error;
185
186 error = resume_physical_mapping_init(resume_pg_dir);
187 if (error)
188 return error;
189
190 temp_pgt = __pa(resume_pg_dir);
191
192 error = relocate_restore_code();
193 if (error)
194 return error;
195
196 /* We have got enough memory and from now on we cannot recover */
197 restore_image();
198 return 0;
199 }