]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7744ccdb TL |
2 | /* |
3 | * AMD Memory Encryption Support | |
4 | * | |
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | |
6 | * | |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7744ccdb TL |
8 | */ |
9 | ||
bc829ee3 TL |
10 | #define DISABLE_BRANCH_PROFILING |
11 | ||
7744ccdb | 12 | #include <linux/linkage.h> |
5868f365 | 13 | #include <linux/init.h> |
21729f81 | 14 | #include <linux/mm.h> |
ea8c64ac | 15 | #include <linux/dma-direct.h> |
c7753208 | 16 | #include <linux/swiotlb.h> |
aca20d54 | 17 | #include <linux/mem_encrypt.h> |
7744ccdb | 18 | |
7f8b7e7f TL |
19 | #include <asm/tlbflush.h> |
20 | #include <asm/fixmap.h> | |
b9d05200 TL |
21 | #include <asm/setup.h> |
22 | #include <asm/bootparam.h> | |
c7753208 | 23 | #include <asm/set_memory.h> |
6ebcb060 | 24 | #include <asm/cacheflush.h> |
aca20d54 TL |
25 | #include <asm/processor-flags.h> |
26 | #include <asm/msr.h> | |
27 | #include <asm/cmdline.h> | |
28 | ||
dfaaec90 BS |
29 | #include "mm_internal.h" |
30 | ||
7744ccdb TL |
31 | /* |
32 | * Since SME related variables are set early in the boot process they must | |
33 | * reside in the .data section so as not to be zeroed out when the .bss | |
34 | * section is later cleared. | |
35 | */ | |
21d9bb4a | 36 | u64 sme_me_mask __section(.data) = 0; |
87df2617 | 37 | EXPORT_SYMBOL(sme_me_mask); |
606b21d4 TL |
38 | DEFINE_STATIC_KEY_FALSE(sev_enable_key); |
39 | EXPORT_SYMBOL_GPL(sev_enable_key); | |
5868f365 | 40 | |
1cd9c22f | 41 | bool sev_enabled __section(.data); |
d8aa7eea | 42 | |
7f8b7e7f TL |
43 | /* Buffer used for early in-place encryption by BSP, no locking needed */ |
44 | static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); | |
45 | ||
46 | /* | |
47 | * This routine does not change the underlying encryption setting of the | |
48 | * page(s) that map this memory. It assumes that eventually the memory is | |
49 | * meant to be accessed as either encrypted or decrypted but the contents | |
50 | * are currently not in the desired state. | |
51 | * | |
52 | * This routine follows the steps outlined in the AMD64 Architecture | |
53 | * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. | |
54 | */ | |
55 | static void __init __sme_early_enc_dec(resource_size_t paddr, | |
56 | unsigned long size, bool enc) | |
57 | { | |
58 | void *src, *dst; | |
59 | size_t len; | |
60 | ||
61 | if (!sme_me_mask) | |
62 | return; | |
63 | ||
7f8b7e7f TL |
64 | wbinvd(); |
65 | ||
66 | /* | |
67 | * There are limited number of early mapping slots, so map (at most) | |
68 | * one page at time. | |
69 | */ | |
70 | while (size) { | |
71 | len = min_t(size_t, sizeof(sme_early_buffer), size); | |
72 | ||
73 | /* | |
74 | * Create mappings for the current and desired format of | |
75 | * the memory. Use a write-protected mapping for the source. | |
76 | */ | |
77 | src = enc ? early_memremap_decrypted_wp(paddr, len) : | |
78 | early_memremap_encrypted_wp(paddr, len); | |
79 | ||
80 | dst = enc ? early_memremap_encrypted(paddr, len) : | |
81 | early_memremap_decrypted(paddr, len); | |
82 | ||
83 | /* | |
84 | * If a mapping can't be obtained to perform the operation, | |
85 | * then eventual access of that area in the desired mode | |
86 | * will cause a crash. | |
87 | */ | |
88 | BUG_ON(!src || !dst); | |
89 | ||
90 | /* | |
91 | * Use a temporary buffer, of cache-line multiple size, to | |
92 | * avoid data corruption as documented in the APM. | |
93 | */ | |
94 | memcpy(sme_early_buffer, src, len); | |
95 | memcpy(dst, sme_early_buffer, len); | |
96 | ||
97 | early_memunmap(dst, len); | |
98 | early_memunmap(src, len); | |
99 | ||
100 | paddr += len; | |
101 | size -= len; | |
102 | } | |
103 | } | |
104 | ||
105 | void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) | |
106 | { | |
107 | __sme_early_enc_dec(paddr, size, true); | |
108 | } | |
109 | ||
110 | void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) | |
111 | { | |
112 | __sme_early_enc_dec(paddr, size, false); | |
113 | } | |
114 | ||
b9d05200 TL |
115 | static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, |
116 | bool map) | |
117 | { | |
118 | unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; | |
119 | pmdval_t pmd_flags, pmd; | |
120 | ||
121 | /* Use early_pmd_flags but remove the encryption mask */ | |
122 | pmd_flags = __sme_clr(early_pmd_flags); | |
123 | ||
124 | do { | |
125 | pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; | |
126 | __early_make_pgtable((unsigned long)vaddr, pmd); | |
127 | ||
128 | vaddr += PMD_SIZE; | |
129 | paddr += PMD_SIZE; | |
130 | size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; | |
131 | } while (size); | |
132 | ||
133 | __native_flush_tlb(); | |
134 | } | |
135 | ||
136 | void __init sme_unmap_bootdata(char *real_mode_data) | |
137 | { | |
138 | struct boot_params *boot_data; | |
139 | unsigned long cmdline_paddr; | |
140 | ||
141 | if (!sme_active()) | |
142 | return; | |
143 | ||
144 | /* Get the command line address before unmapping the real_mode_data */ | |
145 | boot_data = (struct boot_params *)real_mode_data; | |
146 | cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); | |
147 | ||
148 | __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); | |
149 | ||
150 | if (!cmdline_paddr) | |
151 | return; | |
152 | ||
153 | __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); | |
154 | } | |
155 | ||
156 | void __init sme_map_bootdata(char *real_mode_data) | |
157 | { | |
158 | struct boot_params *boot_data; | |
159 | unsigned long cmdline_paddr; | |
160 | ||
161 | if (!sme_active()) | |
162 | return; | |
163 | ||
164 | __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); | |
165 | ||
166 | /* Get the command line address after mapping the real_mode_data */ | |
167 | boot_data = (struct boot_params *)real_mode_data; | |
168 | cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); | |
169 | ||
170 | if (!cmdline_paddr) | |
171 | return; | |
172 | ||
173 | __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); | |
174 | } | |
175 | ||
21729f81 TL |
176 | void __init sme_early_init(void) |
177 | { | |
178 | unsigned int i; | |
179 | ||
180 | if (!sme_me_mask) | |
181 | return; | |
182 | ||
183 | early_pmd_flags = __sme_set(early_pmd_flags); | |
184 | ||
185 | __supported_pte_mask = __sme_set(__supported_pte_mask); | |
186 | ||
187 | /* Update the protection map with memory encryption mask */ | |
188 | for (i = 0; i < ARRAY_SIZE(protection_map); i++) | |
189 | protection_map[i] = pgprot_encrypted(protection_map[i]); | |
d7b417fa TL |
190 | |
191 | if (sev_active()) | |
192 | swiotlb_force = SWIOTLB_FORCE; | |
193 | } | |
194 | ||
dfaaec90 BS |
195 | static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) |
196 | { | |
197 | pgprot_t old_prot, new_prot; | |
198 | unsigned long pfn, pa, size; | |
199 | pte_t new_pte; | |
200 | ||
201 | switch (level) { | |
202 | case PG_LEVEL_4K: | |
203 | pfn = pte_pfn(*kpte); | |
204 | old_prot = pte_pgprot(*kpte); | |
205 | break; | |
206 | case PG_LEVEL_2M: | |
207 | pfn = pmd_pfn(*(pmd_t *)kpte); | |
208 | old_prot = pmd_pgprot(*(pmd_t *)kpte); | |
209 | break; | |
210 | case PG_LEVEL_1G: | |
211 | pfn = pud_pfn(*(pud_t *)kpte); | |
212 | old_prot = pud_pgprot(*(pud_t *)kpte); | |
213 | break; | |
214 | default: | |
215 | return; | |
216 | } | |
217 | ||
218 | new_prot = old_prot; | |
219 | if (enc) | |
220 | pgprot_val(new_prot) |= _PAGE_ENC; | |
221 | else | |
222 | pgprot_val(new_prot) &= ~_PAGE_ENC; | |
223 | ||
224 | /* If prot is same then do nothing. */ | |
225 | if (pgprot_val(old_prot) == pgprot_val(new_prot)) | |
226 | return; | |
227 | ||
228 | pa = pfn << page_level_shift(level); | |
229 | size = page_level_size(level); | |
230 | ||
231 | /* | |
232 | * We are going to perform in-place en-/decryption and change the | |
233 | * physical page attribute from C=1 to C=0 or vice versa. Flush the | |
234 | * caches to ensure that data gets accessed with the correct C-bit. | |
235 | */ | |
236 | clflush_cache_range(__va(pa), size); | |
237 | ||
238 | /* Encrypt/decrypt the contents in-place */ | |
239 | if (enc) | |
240 | sme_early_encrypt(pa, size); | |
241 | else | |
242 | sme_early_decrypt(pa, size); | |
243 | ||
244 | /* Change the page encryption mask. */ | |
245 | new_pte = pfn_pte(pfn, new_prot); | |
246 | set_pte_atomic(kpte, new_pte); | |
247 | } | |
248 | ||
249 | static int __init early_set_memory_enc_dec(unsigned long vaddr, | |
250 | unsigned long size, bool enc) | |
251 | { | |
252 | unsigned long vaddr_end, vaddr_next; | |
253 | unsigned long psize, pmask; | |
254 | int split_page_size_mask; | |
255 | int level, ret; | |
256 | pte_t *kpte; | |
257 | ||
258 | vaddr_next = vaddr; | |
259 | vaddr_end = vaddr + size; | |
260 | ||
261 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { | |
262 | kpte = lookup_address(vaddr, &level); | |
263 | if (!kpte || pte_none(*kpte)) { | |
264 | ret = 1; | |
265 | goto out; | |
266 | } | |
267 | ||
268 | if (level == PG_LEVEL_4K) { | |
269 | __set_clr_pte_enc(kpte, level, enc); | |
270 | vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; | |
271 | continue; | |
272 | } | |
273 | ||
274 | psize = page_level_size(level); | |
275 | pmask = page_level_mask(level); | |
276 | ||
277 | /* | |
278 | * Check whether we can change the large page in one go. | |
279 | * We request a split when the address is not aligned and | |
280 | * the number of pages to set/clear encryption bit is smaller | |
281 | * than the number of pages in the large page. | |
282 | */ | |
283 | if (vaddr == (vaddr & pmask) && | |
284 | ((vaddr_end - vaddr) >= psize)) { | |
285 | __set_clr_pte_enc(kpte, level, enc); | |
286 | vaddr_next = (vaddr & pmask) + psize; | |
287 | continue; | |
288 | } | |
289 | ||
290 | /* | |
291 | * The virtual address is part of a larger page, create the next | |
292 | * level page table mapping (4K or 2M). If it is part of a 2M | |
293 | * page then we request a split of the large page into 4K | |
294 | * chunks. A 1GB large page is split into 2M pages, resp. | |
295 | */ | |
296 | if (level == PG_LEVEL_2M) | |
297 | split_page_size_mask = 0; | |
298 | else | |
299 | split_page_size_mask = 1 << PG_LEVEL_2M; | |
300 | ||
eccd9064 BS |
301 | /* |
302 | * kernel_physical_mapping_change() does not flush the TLBs, so | |
303 | * a TLB flush is required after we exit from the for loop. | |
304 | */ | |
305 | kernel_physical_mapping_change(__pa(vaddr & pmask), | |
306 | __pa((vaddr_end & pmask) + psize), | |
307 | split_page_size_mask); | |
dfaaec90 BS |
308 | } |
309 | ||
310 | ret = 0; | |
311 | ||
312 | out: | |
313 | __flush_tlb_all(); | |
314 | return ret; | |
315 | } | |
316 | ||
317 | int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) | |
318 | { | |
319 | return early_set_memory_enc_dec(vaddr, size, false); | |
320 | } | |
321 | ||
322 | int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) | |
323 | { | |
324 | return early_set_memory_enc_dec(vaddr, size, true); | |
325 | } | |
326 | ||
d8aa7eea TL |
327 | /* |
328 | * SME and SEV are very similar but they are not the same, so there are | |
329 | * times that the kernel will need to distinguish between SME and SEV. The | |
330 | * sme_active() and sev_active() functions are used for this. When a | |
331 | * distinction isn't needed, the mem_encrypt_active() function can be used. | |
332 | * | |
333 | * The trampoline code is a good example for this requirement. Before | |
334 | * paging is activated, SME will access all memory as decrypted, but SEV | |
335 | * will access all memory as encrypted. So, when APs are being brought | |
336 | * up under SME the trampoline area cannot be encrypted, whereas under SEV | |
337 | * the trampoline area must be encrypted. | |
338 | */ | |
339 | bool sme_active(void) | |
340 | { | |
341 | return sme_me_mask && !sev_enabled; | |
342 | } | |
9d5f38ba | 343 | EXPORT_SYMBOL(sme_active); |
d8aa7eea TL |
344 | |
345 | bool sev_active(void) | |
346 | { | |
347 | return sme_me_mask && sev_enabled; | |
348 | } | |
9d5f38ba | 349 | EXPORT_SYMBOL(sev_active); |
d8aa7eea | 350 | |
c7753208 | 351 | /* Architecture __weak replacement functions */ |
b3f0907c BS |
352 | void __init mem_encrypt_free_decrypted_mem(void) |
353 | { | |
354 | unsigned long vaddr, vaddr_end, npages; | |
355 | int r; | |
356 | ||
357 | vaddr = (unsigned long)__start_bss_decrypted_unused; | |
358 | vaddr_end = (unsigned long)__end_bss_decrypted; | |
359 | npages = (vaddr_end - vaddr) >> PAGE_SHIFT; | |
360 | ||
361 | /* | |
362 | * The unused memory range was mapped decrypted, change the encryption | |
363 | * attribute from decrypted to encrypted before freeing it. | |
364 | */ | |
365 | if (mem_encrypt_active()) { | |
366 | r = set_memory_encrypted(vaddr, npages); | |
367 | if (r) { | |
368 | pr_warn("failed to free unused decrypted pages\n"); | |
369 | return; | |
370 | } | |
371 | } | |
372 | ||
373 | free_init_pages("unused decrypted", vaddr, vaddr_end); | |
374 | } | |
375 | ||
c7753208 TL |
376 | void __init mem_encrypt_init(void) |
377 | { | |
378 | if (!sme_me_mask) | |
379 | return; | |
380 | ||
381 | /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ | |
382 | swiotlb_update_mem_attributes(); | |
aca20d54 | 383 | |
606b21d4 TL |
384 | /* |
385 | * With SEV, we need to unroll the rep string I/O instructions. | |
386 | */ | |
387 | if (sev_active()) | |
388 | static_branch_enable(&sev_enable_key); | |
389 | ||
1958b5fc TL |
390 | pr_info("AMD %s active\n", |
391 | sev_active() ? "Secure Encrypted Virtualization (SEV)" | |
392 | : "Secure Memory Encryption (SME)"); | |
c7753208 TL |
393 | } |
394 |