]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/kernel/ldt.c
x86/pti: Put the LDT in its own PGD if PTI is on
[thirdparty/linux.git] / arch / x86 / kernel / ldt.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
78aa1f66 6 *
1da177e4 7 * This handles calls from both 32bit and 64bit mode.
c2b3496b
PZ
8 *
9 * Lock order:
10 * contex.ldt_usr_sem
11 * mmap_sem
12 * context.lock
1da177e4
LT
13 */
14
15#include <linux/errno.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/sched.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
da20ab35 21#include <linux/syscalls.h>
37868fe1 22#include <linux/slab.h>
1da177e4 23#include <linux/vmalloc.h>
423a5405 24#include <linux/uaccess.h>
1da177e4 25
1da177e4 26#include <asm/ldt.h>
f55f0501 27#include <asm/tlb.h>
1da177e4 28#include <asm/desc.h>
70f5088d 29#include <asm/mmu_context.h>
bbc1f698 30#include <asm/syscalls.h>
1da177e4 31
a6323757
AL
32static void refresh_ldt_segments(void)
33{
34#ifdef CONFIG_X86_64
35 unsigned short sel;
36
37 /*
38 * Make sure that the cached DS and ES descriptors match the updated
39 * LDT.
40 */
41 savesegment(ds, sel);
42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
43 loadsegment(ds, sel);
44
45 savesegment(es, sel);
46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
47 loadsegment(es, sel);
48#endif
49}
50
c2b3496b 51/* context.lock is held by the task which issued the smp function call */
3d28ebce 52static void flush_ldt(void *__mm)
1da177e4 53{
3d28ebce 54 struct mm_struct *mm = __mm;
37868fe1 55
3d28ebce 56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
37868fe1
AL
57 return;
58
f55f0501 59 load_mm_ldt(mm);
a6323757
AL
60
61 refresh_ldt_segments();
1da177e4 62}
1da177e4 63
37868fe1 64/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
bbf79d21 65static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
1da177e4 66{
37868fe1 67 struct ldt_struct *new_ldt;
990e9dc3 68 unsigned int alloc_size;
37868fe1 69
bbf79d21 70 if (num_entries > LDT_ENTRIES)
37868fe1
AL
71 return NULL;
72
73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
74 if (!new_ldt)
75 return NULL;
76
77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
bbf79d21 78 alloc_size = num_entries * LDT_ENTRY_SIZE;
37868fe1
AL
79
80 /*
81 * Xen is very picky: it requires a page-aligned LDT that has no
82 * trailing nonzero bytes in any page that contains LDT descriptors.
83 * Keep it simple: zero the whole allocation and never allocate less
84 * than PAGE_SIZE.
85 */
86 if (alloc_size > PAGE_SIZE)
87 new_ldt->entries = vzalloc(alloc_size);
1da177e4 88 else
f454b478 89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
1da177e4 90
37868fe1
AL
91 if (!new_ldt->entries) {
92 kfree(new_ldt);
93 return NULL;
94 }
77e463d1 95
f55f0501
AL
96 /* The new LDT isn't aliased for PTI yet. */
97 new_ldt->slot = -1;
98
bbf79d21 99 new_ldt->nr_entries = num_entries;
37868fe1
AL
100 return new_ldt;
101}
38ffbe66 102
f55f0501
AL
103/*
104 * If PTI is enabled, this maps the LDT into the kernelmode and
105 * usermode tables for the given mm.
106 *
107 * There is no corresponding unmap function. Even if the LDT is freed, we
108 * leave the PTEs around until the slot is reused or the mm is destroyed.
109 * This is harmless: the LDT is always in ordinary memory, and no one will
110 * access the freed slot.
111 *
112 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
113 * it useful, and the flush would slow down modify_ldt().
114 */
115static int
116map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
117{
118#ifdef CONFIG_PAGE_TABLE_ISOLATION
119 bool is_vmalloc, had_top_level_entry;
120 unsigned long va;
121 spinlock_t *ptl;
122 pgd_t *pgd;
123 int i;
124
125 if (!static_cpu_has(X86_FEATURE_PTI))
126 return 0;
127
128 /*
129 * Any given ldt_struct should have map_ldt_struct() called at most
130 * once.
131 */
132 WARN_ON(ldt->slot != -1);
133
134 /*
135 * Did we already have the top level entry allocated? We can't
136 * use pgd_none() for this because it doens't do anything on
137 * 4-level page table kernels.
138 */
139 pgd = pgd_offset(mm, LDT_BASE_ADDR);
140 had_top_level_entry = (pgd->pgd != 0);
141
142 is_vmalloc = is_vmalloc_addr(ldt->entries);
143
144 for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
145 unsigned long offset = i << PAGE_SHIFT;
146 const void *src = (char *)ldt->entries + offset;
147 unsigned long pfn;
148 pte_t pte, *ptep;
149
150 va = (unsigned long)ldt_slot_va(slot) + offset;
151 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
152 page_to_pfn(virt_to_page(src));
153 /*
154 * Treat the PTI LDT range as a *userspace* range.
155 * get_locked_pte() will allocate all needed pagetables
156 * and account for them in this mm.
157 */
158 ptep = get_locked_pte(mm, va, &ptl);
159 if (!ptep)
160 return -ENOMEM;
161 pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL));
162 set_pte_at(mm, va, ptep, pte);
163 pte_unmap_unlock(ptep, ptl);
164 }
165
166 if (mm->context.ldt) {
167 /*
168 * We already had an LDT. The top-level entry should already
169 * have been allocated and synchronized with the usermode
170 * tables.
171 */
172 WARN_ON(!had_top_level_entry);
173 if (static_cpu_has(X86_FEATURE_PTI))
174 WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
175 } else {
176 /*
177 * This is the first time we're mapping an LDT for this process.
178 * Sync the pgd to the usermode tables.
179 */
180 WARN_ON(had_top_level_entry);
181 if (static_cpu_has(X86_FEATURE_PTI)) {
182 WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
183 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
184 }
185 }
186
187 va = (unsigned long)ldt_slot_va(slot);
188 flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
189
190 ldt->slot = slot;
191#endif
192 return 0;
193}
194
195static void free_ldt_pgtables(struct mm_struct *mm)
196{
197#ifdef CONFIG_PAGE_TABLE_ISOLATION
198 struct mmu_gather tlb;
199 unsigned long start = LDT_BASE_ADDR;
200 unsigned long end = start + (1UL << PGDIR_SHIFT);
201
202 if (!static_cpu_has(X86_FEATURE_PTI))
203 return;
204
205 tlb_gather_mmu(&tlb, mm, start, end);
206 free_pgd_range(&tlb, start, end, start, end);
207 tlb_finish_mmu(&tlb, start, end);
208#endif
209}
210
37868fe1
AL
211/* After calling this, the LDT is immutable. */
212static void finalize_ldt_struct(struct ldt_struct *ldt)
213{
bbf79d21 214 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
1da177e4
LT
215}
216
c2b3496b 217static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
1da177e4 218{
c2b3496b
PZ
219 mutex_lock(&mm->context.lock);
220
3382290e 221 /* Synchronizes with READ_ONCE in load_mm_ldt. */
c2b3496b 222 smp_store_release(&mm->context.ldt, ldt);
37868fe1 223
c2b3496b
PZ
224 /* Activate the LDT for all CPUs using currents mm. */
225 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
226
227 mutex_unlock(&mm->context.lock);
37868fe1 228}
78aa1f66 229
37868fe1
AL
230static void free_ldt_struct(struct ldt_struct *ldt)
231{
232 if (likely(!ldt))
233 return;
38ffbe66 234
bbf79d21
BP
235 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
236 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
8d5341a6 237 vfree_atomic(ldt->entries);
37868fe1 238 else
f454b478 239 free_page((unsigned long)ldt->entries);
37868fe1 240 kfree(ldt);
1da177e4
LT
241}
242
243/*
a4828f81
TG
244 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
245 * the new task is not running, so nothing can be installed.
1da177e4 246 */
a4828f81 247int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
1da177e4 248{
37868fe1 249 struct ldt_struct *new_ldt;
1da177e4
LT
250 int retval = 0;
251
a4828f81 252 if (!old_mm)
37868fe1 253 return 0;
37868fe1
AL
254
255 mutex_lock(&old_mm->context.lock);
a4828f81 256 if (!old_mm->context.ldt)
37868fe1 257 goto out_unlock;
37868fe1 258
bbf79d21 259 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
37868fe1
AL
260 if (!new_ldt) {
261 retval = -ENOMEM;
262 goto out_unlock;
263 }
264
265 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
bbf79d21 266 new_ldt->nr_entries * LDT_ENTRY_SIZE);
37868fe1
AL
267 finalize_ldt_struct(new_ldt);
268
f55f0501
AL
269 retval = map_ldt_struct(mm, new_ldt, 0);
270 if (retval) {
271 free_ldt_pgtables(mm);
272 free_ldt_struct(new_ldt);
273 goto out_unlock;
274 }
37868fe1
AL
275 mm->context.ldt = new_ldt;
276
277out_unlock:
278 mutex_unlock(&old_mm->context.lock);
1da177e4
LT
279 return retval;
280}
281
282/*
77e463d1
TG
283 * No need to lock the MM as we are the last user
284 *
285 * 64bit: Don't touch the LDT register - we're already in the next thread.
1da177e4 286 */
39a0526f 287void destroy_context_ldt(struct mm_struct *mm)
1da177e4 288{
37868fe1
AL
289 free_ldt_struct(mm->context.ldt);
290 mm->context.ldt = NULL;
1da177e4
LT
291}
292
f55f0501
AL
293void ldt_arch_exit_mmap(struct mm_struct *mm)
294{
295 free_ldt_pgtables(mm);
296}
297
78aa1f66 298static int read_ldt(void __user *ptr, unsigned long bytecount)
1da177e4 299{
78aa1f66 300 struct mm_struct *mm = current->mm;
bbf79d21
BP
301 unsigned long entries_size;
302 int retval;
1da177e4 303
c2b3496b 304 down_read(&mm->context.ldt_usr_sem);
37868fe1
AL
305
306 if (!mm->context.ldt) {
307 retval = 0;
308 goto out_unlock;
309 }
310
78aa1f66
TG
311 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
312 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
1da177e4 313
bbf79d21
BP
314 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
315 if (entries_size > bytecount)
316 entries_size = bytecount;
1da177e4 317
bbf79d21 318 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
37868fe1
AL
319 retval = -EFAULT;
320 goto out_unlock;
321 }
322
bbf79d21 323 if (entries_size != bytecount) {
37868fe1 324 /* Zero-fill the rest and pretend we read bytecount bytes. */
bbf79d21 325 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
37868fe1
AL
326 retval = -EFAULT;
327 goto out_unlock;
1da177e4
LT
328 }
329 }
37868fe1
AL
330 retval = bytecount;
331
332out_unlock:
c2b3496b 333 up_read(&mm->context.ldt_usr_sem);
37868fe1 334 return retval;
1da177e4
LT
335}
336
78aa1f66 337static int read_default_ldt(void __user *ptr, unsigned long bytecount)
1da177e4 338{
77e463d1
TG
339 /* CHECKME: Can we use _one_ random number ? */
340#ifdef CONFIG_X86_32
341 unsigned long size = 5 * sizeof(struct desc_struct);
342#else
343 unsigned long size = 128;
344#endif
345 if (bytecount > size)
346 bytecount = size;
1da177e4
LT
347 if (clear_user(ptr, bytecount))
348 return -EFAULT;
78aa1f66 349 return bytecount;
1da177e4
LT
350}
351
78aa1f66 352static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
1da177e4 353{
70f5088d 354 struct mm_struct *mm = current->mm;
990e9dc3 355 struct ldt_struct *new_ldt, *old_ldt;
bbf79d21 356 unsigned int old_nr_entries, new_nr_entries;
990e9dc3 357 struct user_desc ldt_info;
5af72502 358 struct desc_struct ldt;
1da177e4 359 int error;
1da177e4
LT
360
361 error = -EINVAL;
1da177e4
LT
362 if (bytecount != sizeof(ldt_info))
363 goto out;
78aa1f66 364 error = -EFAULT;
70f5088d 365 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
1da177e4
LT
366 goto out;
367
368 error = -EINVAL;
369 if (ldt_info.entry_number >= LDT_ENTRIES)
370 goto out;
371 if (ldt_info.contents == 3) {
372 if (oldmode)
373 goto out;
374 if (ldt_info.seg_not_present == 0)
375 goto out;
376 }
377
37868fe1
AL
378 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
379 LDT_empty(&ldt_info)) {
380 /* The user wants to clear the entry. */
381 memset(&ldt, 0, sizeof(ldt));
382 } else {
383 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
384 error = -EINVAL;
385 goto out;
1da177e4 386 }
37868fe1
AL
387
388 fill_ldt(&ldt, &ldt_info);
389 if (oldmode)
390 ldt.avl = 0;
1da177e4
LT
391 }
392
c2b3496b
PZ
393 if (down_write_killable(&mm->context.ldt_usr_sem))
394 return -EINTR;
37868fe1 395
bbf79d21
BP
396 old_ldt = mm->context.ldt;
397 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
398 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
37868fe1
AL
399
400 error = -ENOMEM;
bbf79d21 401 new_ldt = alloc_ldt_struct(new_nr_entries);
37868fe1 402 if (!new_ldt)
34273f41 403 goto out_unlock;
34273f41 404
37868fe1 405 if (old_ldt)
bbf79d21
BP
406 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
407
37868fe1
AL
408 new_ldt->entries[ldt_info.entry_number] = ldt;
409 finalize_ldt_struct(new_ldt);
1da177e4 410
f55f0501
AL
411 /*
412 * If we are using PTI, map the new LDT into the userspace pagetables.
413 * If there is already an LDT, use the other slot so that other CPUs
414 * will continue to use the old LDT until install_ldt() switches
415 * them over to the new LDT.
416 */
417 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
418 if (error) {
419 free_ldt_struct(old_ldt);
420 goto out_unlock;
421 }
422
37868fe1
AL
423 install_ldt(mm, new_ldt);
424 free_ldt_struct(old_ldt);
1da177e4
LT
425 error = 0;
426
427out_unlock:
c2b3496b 428 up_write(&mm->context.ldt_usr_sem);
1da177e4
LT
429out:
430 return error;
431}
432
da20ab35
DH
433SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
434 unsigned long , bytecount)
1da177e4
LT
435{
436 int ret = -ENOSYS;
437
438 switch (func) {
439 case 0:
440 ret = read_ldt(ptr, bytecount);
441 break;
442 case 1:
443 ret = write_ldt(ptr, bytecount, 1);
444 break;
445 case 2:
446 ret = read_default_ldt(ptr, bytecount);
447 break;
448 case 0x11:
449 ret = write_ldt(ptr, bytecount, 0);
450 break;
451 }
da20ab35
DH
452 /*
453 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
454 * return type, but tht ABI for sys_modify_ldt() expects
455 * 'int'. This cast gives us an int-sized value in %rax
456 * for the return code. The 'unsigned' is necessary so
457 * the compiler does not try to sign-extend the negative
458 * return codes into the high half of the register when
459 * taking the value from int->long.
460 */
461 return (unsigned int)ret;
1da177e4 462}