]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/kernel/ldt.c
mmap locking API: convert mmap_sem comments
[thirdparty/linux.git] / arch / x86 / kernel / ldt.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
78aa1f66 6 *
1da177e4 7 * This handles calls from both 32bit and 64bit mode.
c2b3496b
PZ
8 *
9 * Lock order:
10 * contex.ldt_usr_sem
c1e8d7c6 11 * mmap_lock
c2b3496b 12 * context.lock
1da177e4
LT
13 */
14
15#include <linux/errno.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/sched.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
da20ab35 21#include <linux/syscalls.h>
37868fe1 22#include <linux/slab.h>
1da177e4 23#include <linux/vmalloc.h>
423a5405 24#include <linux/uaccess.h>
1da177e4 25
1da177e4 26#include <asm/ldt.h>
f55f0501 27#include <asm/tlb.h>
1da177e4 28#include <asm/desc.h>
70f5088d 29#include <asm/mmu_context.h>
186525bd
IM
30#include <asm/pgtable_areas.h>
31
32/* This is a multiple of PAGE_SIZE. */
33#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
34
35static inline void *ldt_slot_va(int slot)
36{
37 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
38}
39
40void load_mm_ldt(struct mm_struct *mm)
41{
42 struct ldt_struct *ldt;
43
44 /* READ_ONCE synchronizes with smp_store_release */
45 ldt = READ_ONCE(mm->context.ldt);
46
47 /*
48 * Any change to mm->context.ldt is followed by an IPI to all
49 * CPUs with the mm active. The LDT will not be freed until
50 * after the IPI is handled by all such CPUs. This means that,
51 * if the ldt_struct changes before we return, the values we see
52 * will be safe, and the new values will be loaded before we run
53 * any user code.
54 *
55 * NB: don't try to convert this to use RCU without extreme care.
56 * We would still need IRQs off, because we don't want to change
57 * the local LDT after an IPI loaded a newer value than the one
58 * that we can see.
59 */
60
61 if (unlikely(ldt)) {
62 if (static_cpu_has(X86_FEATURE_PTI)) {
63 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
64 /*
65 * Whoops -- either the new LDT isn't mapped
66 * (if slot == -1) or is mapped into a bogus
67 * slot (if slot > 1).
68 */
69 clear_LDT();
70 return;
71 }
72
73 /*
74 * If page table isolation is enabled, ldt->entries
75 * will not be mapped in the userspace pagetables.
76 * Tell the CPU to access the LDT through the alias
77 * at ldt_slot_va(ldt->slot).
78 */
79 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
80 } else {
81 set_ldt(ldt->entries, ldt->nr_entries);
82 }
83 } else {
84 clear_LDT();
85 }
86}
87
88void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
89{
90 /*
91 * Load the LDT if either the old or new mm had an LDT.
92 *
93 * An mm will never go from having an LDT to not having an LDT. Two
94 * mms never share an LDT, so we don't gain anything by checking to
95 * see whether the LDT changed. There's also no guarantee that
96 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
97 * then prev->context.ldt will also be non-NULL.
98 *
99 * If we really cared, we could optimize the case where prev == next
100 * and we're exiting lazy mode. Most of the time, if this happens,
101 * we don't actually need to reload LDTR, but modify_ldt() is mostly
102 * used by legacy code and emulators where we don't need this level of
103 * performance.
104 *
105 * This uses | instead of || because it generates better code.
106 */
107 if (unlikely((unsigned long)prev->context.ldt |
108 (unsigned long)next->context.ldt))
109 load_mm_ldt(next);
110
111 DEBUG_LOCKS_WARN_ON(preemptible());
112}
1da177e4 113
a6323757
AL
114static void refresh_ldt_segments(void)
115{
116#ifdef CONFIG_X86_64
117 unsigned short sel;
118
119 /*
120 * Make sure that the cached DS and ES descriptors match the updated
121 * LDT.
122 */
123 savesegment(ds, sel);
124 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
125 loadsegment(ds, sel);
126
127 savesegment(es, sel);
128 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
129 loadsegment(es, sel);
130#endif
131}
132
c2b3496b 133/* context.lock is held by the task which issued the smp function call */
3d28ebce 134static void flush_ldt(void *__mm)
1da177e4 135{
3d28ebce 136 struct mm_struct *mm = __mm;
37868fe1 137
3d28ebce 138 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
37868fe1
AL
139 return;
140
f55f0501 141 load_mm_ldt(mm);
a6323757
AL
142
143 refresh_ldt_segments();
1da177e4 144}
1da177e4 145
37868fe1 146/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
bbf79d21 147static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
1da177e4 148{
37868fe1 149 struct ldt_struct *new_ldt;
990e9dc3 150 unsigned int alloc_size;
37868fe1 151
bbf79d21 152 if (num_entries > LDT_ENTRIES)
37868fe1
AL
153 return NULL;
154
155 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
156 if (!new_ldt)
157 return NULL;
158
159 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
bbf79d21 160 alloc_size = num_entries * LDT_ENTRY_SIZE;
37868fe1
AL
161
162 /*
163 * Xen is very picky: it requires a page-aligned LDT that has no
164 * trailing nonzero bytes in any page that contains LDT descriptors.
165 * Keep it simple: zero the whole allocation and never allocate less
166 * than PAGE_SIZE.
167 */
168 if (alloc_size > PAGE_SIZE)
169 new_ldt->entries = vzalloc(alloc_size);
1da177e4 170 else
f454b478 171 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
1da177e4 172
37868fe1
AL
173 if (!new_ldt->entries) {
174 kfree(new_ldt);
175 return NULL;
176 }
77e463d1 177
f55f0501
AL
178 /* The new LDT isn't aliased for PTI yet. */
179 new_ldt->slot = -1;
180
bbf79d21 181 new_ldt->nr_entries = num_entries;
37868fe1
AL
182 return new_ldt;
183}
38ffbe66 184
9bae3197
JR
185#ifdef CONFIG_PAGE_TABLE_ISOLATION
186
187static void do_sanity_check(struct mm_struct *mm,
188 bool had_kernel_mapping,
189 bool had_user_mapping)
190{
191 if (mm->context.ldt) {
192 /*
193 * We already had an LDT. The top-level entry should already
194 * have been allocated and synchronized with the usermode
195 * tables.
196 */
197 WARN_ON(!had_kernel_mapping);
67e87d43 198 if (boot_cpu_has(X86_FEATURE_PTI))
9bae3197
JR
199 WARN_ON(!had_user_mapping);
200 } else {
201 /*
202 * This is the first time we're mapping an LDT for this process.
203 * Sync the pgd to the usermode tables.
204 */
205 WARN_ON(had_kernel_mapping);
67e87d43 206 if (boot_cpu_has(X86_FEATURE_PTI))
9bae3197
JR
207 WARN_ON(had_user_mapping);
208 }
209}
210
6df934b9
JR
211#ifdef CONFIG_X86_PAE
212
213static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
214{
215 p4d_t *p4d;
216 pud_t *pud;
217
218 if (pgd->pgd == 0)
219 return NULL;
220
221 p4d = p4d_offset(pgd, va);
222 if (p4d_none(*p4d))
223 return NULL;
224
225 pud = pud_offset(p4d, va);
226 if (pud_none(*pud))
227 return NULL;
228
229 return pmd_offset(pud, va);
230}
231
232static void map_ldt_struct_to_user(struct mm_struct *mm)
233{
234 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
235 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
236 pmd_t *k_pmd, *u_pmd;
237
238 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
239 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
240
67e87d43 241 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
6df934b9
JR
242 set_pmd(u_pmd, *k_pmd);
243}
244
245static void sanity_check_ldt_mapping(struct mm_struct *mm)
246{
247 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
248 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
249 bool had_kernel, had_user;
250 pmd_t *k_pmd, *u_pmd;
251
252 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
253 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
254 had_kernel = (k_pmd->pmd != 0);
255 had_user = (u_pmd->pmd != 0);
256
257 do_sanity_check(mm, had_kernel, had_user);
258}
259
260#else /* !CONFIG_X86_PAE */
261
9bae3197
JR
262static void map_ldt_struct_to_user(struct mm_struct *mm)
263{
264 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
265
67e87d43 266 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
9bae3197
JR
267 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
268}
269
270static void sanity_check_ldt_mapping(struct mm_struct *mm)
271{
272 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
273 bool had_kernel = (pgd->pgd != 0);
274 bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
275
276 do_sanity_check(mm, had_kernel, had_user);
277}
278
6df934b9
JR
279#endif /* CONFIG_X86_PAE */
280
f55f0501
AL
281/*
282 * If PTI is enabled, this maps the LDT into the kernelmode and
283 * usermode tables for the given mm.
f55f0501
AL
284 */
285static int
286map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
287{
f55f0501 288 unsigned long va;
9bae3197 289 bool is_vmalloc;
f55f0501 290 spinlock_t *ptl;
a0e6e083 291 int i, nr_pages;
f55f0501 292
67e87d43 293 if (!boot_cpu_has(X86_FEATURE_PTI))
f55f0501
AL
294 return 0;
295
296 /*
297 * Any given ldt_struct should have map_ldt_struct() called at most
298 * once.
299 */
300 WARN_ON(ldt->slot != -1);
301
9bae3197
JR
302 /* Check if the current mappings are sane */
303 sanity_check_ldt_mapping(mm);
304
f55f0501
AL
305 is_vmalloc = is_vmalloc_addr(ldt->entries);
306
a0e6e083
KS
307 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
308
309 for (i = 0; i < nr_pages; i++) {
f55f0501
AL
310 unsigned long offset = i << PAGE_SHIFT;
311 const void *src = (char *)ldt->entries + offset;
312 unsigned long pfn;
fb43d6cb 313 pgprot_t pte_prot;
f55f0501
AL
314 pte_t pte, *ptep;
315
316 va = (unsigned long)ldt_slot_va(slot) + offset;
317 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
318 page_to_pfn(virt_to_page(src));
319 /*
320 * Treat the PTI LDT range as a *userspace* range.
321 * get_locked_pte() will allocate all needed pagetables
322 * and account for them in this mm.
323 */
324 ptep = get_locked_pte(mm, va, &ptl);
325 if (!ptep)
326 return -ENOMEM;
9f5cb6b3
TG
327 /*
328 * Map it RO so the easy to find address is not a primary
329 * target via some kernel interface which misses a
330 * permission check.
331 */
fb43d6cb
DH
332 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
333 /* Filter out unsuppored __PAGE_KERNEL* bits: */
e6f39e87 334 pgprot_val(pte_prot) &= __supported_pte_mask;
fb43d6cb 335 pte = pfn_pte(pfn, pte_prot);
f55f0501
AL
336 set_pte_at(mm, va, ptep, pte);
337 pte_unmap_unlock(ptep, ptl);
338 }
339
9bae3197
JR
340 /* Propagate LDT mapping to the user page-table */
341 map_ldt_struct_to_user(mm);
f55f0501 342
f55f0501 343 ldt->slot = slot;
f55f0501
AL
344 return 0;
345}
346
a0e6e083
KS
347static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
348{
349 unsigned long va;
350 int i, nr_pages;
351
352 if (!ldt)
353 return;
354
355 /* LDT map/unmap is only required for PTI */
67e87d43 356 if (!boot_cpu_has(X86_FEATURE_PTI))
a0e6e083
KS
357 return;
358
359 nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
360
361 for (i = 0; i < nr_pages; i++) {
362 unsigned long offset = i << PAGE_SHIFT;
363 spinlock_t *ptl;
364 pte_t *ptep;
365
366 va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
367 ptep = get_locked_pte(mm, va, &ptl);
368 pte_clear(mm, va, ptep);
369 pte_unmap_unlock(ptep, ptl);
370 }
371
372 va = (unsigned long)ldt_slot_va(ldt->slot);
373 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
374}
375
9bae3197
JR
376#else /* !CONFIG_PAGE_TABLE_ISOLATION */
377
378static int
379map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
380{
381 return 0;
382}
a0e6e083
KS
383
384static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
385{
386}
9bae3197
JR
387#endif /* CONFIG_PAGE_TABLE_ISOLATION */
388
f55f0501
AL
389static void free_ldt_pgtables(struct mm_struct *mm)
390{
391#ifdef CONFIG_PAGE_TABLE_ISOLATION
392 struct mmu_gather tlb;
393 unsigned long start = LDT_BASE_ADDR;
8195d869 394 unsigned long end = LDT_END_ADDR;
f55f0501 395
67e87d43 396 if (!boot_cpu_has(X86_FEATURE_PTI))
f55f0501
AL
397 return;
398
399 tlb_gather_mmu(&tlb, mm, start, end);
400 free_pgd_range(&tlb, start, end, start, end);
401 tlb_finish_mmu(&tlb, start, end);
402#endif
403}
404
37868fe1
AL
405/* After calling this, the LDT is immutable. */
406static void finalize_ldt_struct(struct ldt_struct *ldt)
407{
bbf79d21 408 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
1da177e4
LT
409}
410
c2b3496b 411static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
1da177e4 412{
c2b3496b
PZ
413 mutex_lock(&mm->context.lock);
414
3382290e 415 /* Synchronizes with READ_ONCE in load_mm_ldt. */
c2b3496b 416 smp_store_release(&mm->context.ldt, ldt);
37868fe1 417
c2b3496b
PZ
418 /* Activate the LDT for all CPUs using currents mm. */
419 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
420
421 mutex_unlock(&mm->context.lock);
37868fe1 422}
78aa1f66 423
37868fe1
AL
424static void free_ldt_struct(struct ldt_struct *ldt)
425{
426 if (likely(!ldt))
427 return;
38ffbe66 428
bbf79d21
BP
429 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
430 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
8d5341a6 431 vfree_atomic(ldt->entries);
37868fe1 432 else
f454b478 433 free_page((unsigned long)ldt->entries);
37868fe1 434 kfree(ldt);
1da177e4
LT
435}
436
437/*
a4828f81
TG
438 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
439 * the new task is not running, so nothing can be installed.
1da177e4 440 */
a4828f81 441int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
1da177e4 442{
37868fe1 443 struct ldt_struct *new_ldt;
1da177e4
LT
444 int retval = 0;
445
a4828f81 446 if (!old_mm)
37868fe1 447 return 0;
37868fe1
AL
448
449 mutex_lock(&old_mm->context.lock);
a4828f81 450 if (!old_mm->context.ldt)
37868fe1 451 goto out_unlock;
37868fe1 452
bbf79d21 453 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
37868fe1
AL
454 if (!new_ldt) {
455 retval = -ENOMEM;
456 goto out_unlock;
457 }
458
459 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
bbf79d21 460 new_ldt->nr_entries * LDT_ENTRY_SIZE);
37868fe1
AL
461 finalize_ldt_struct(new_ldt);
462
f55f0501
AL
463 retval = map_ldt_struct(mm, new_ldt, 0);
464 if (retval) {
465 free_ldt_pgtables(mm);
466 free_ldt_struct(new_ldt);
467 goto out_unlock;
468 }
37868fe1
AL
469 mm->context.ldt = new_ldt;
470
471out_unlock:
472 mutex_unlock(&old_mm->context.lock);
1da177e4
LT
473 return retval;
474}
475
476/*
77e463d1
TG
477 * No need to lock the MM as we are the last user
478 *
479 * 64bit: Don't touch the LDT register - we're already in the next thread.
1da177e4 480 */
39a0526f 481void destroy_context_ldt(struct mm_struct *mm)
1da177e4 482{
37868fe1
AL
483 free_ldt_struct(mm->context.ldt);
484 mm->context.ldt = NULL;
1da177e4
LT
485}
486
f55f0501
AL
487void ldt_arch_exit_mmap(struct mm_struct *mm)
488{
489 free_ldt_pgtables(mm);
490}
491
78aa1f66 492static int read_ldt(void __user *ptr, unsigned long bytecount)
1da177e4 493{
78aa1f66 494 struct mm_struct *mm = current->mm;
bbf79d21
BP
495 unsigned long entries_size;
496 int retval;
1da177e4 497
c2b3496b 498 down_read(&mm->context.ldt_usr_sem);
37868fe1
AL
499
500 if (!mm->context.ldt) {
501 retval = 0;
502 goto out_unlock;
503 }
504
78aa1f66
TG
505 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
506 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
1da177e4 507
bbf79d21
BP
508 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
509 if (entries_size > bytecount)
510 entries_size = bytecount;
1da177e4 511
bbf79d21 512 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
37868fe1
AL
513 retval = -EFAULT;
514 goto out_unlock;
515 }
516
bbf79d21 517 if (entries_size != bytecount) {
37868fe1 518 /* Zero-fill the rest and pretend we read bytecount bytes. */
bbf79d21 519 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
37868fe1
AL
520 retval = -EFAULT;
521 goto out_unlock;
1da177e4
LT
522 }
523 }
37868fe1
AL
524 retval = bytecount;
525
526out_unlock:
c2b3496b 527 up_read(&mm->context.ldt_usr_sem);
37868fe1 528 return retval;
1da177e4
LT
529}
530
78aa1f66 531static int read_default_ldt(void __user *ptr, unsigned long bytecount)
1da177e4 532{
77e463d1
TG
533 /* CHECKME: Can we use _one_ random number ? */
534#ifdef CONFIG_X86_32
535 unsigned long size = 5 * sizeof(struct desc_struct);
536#else
537 unsigned long size = 128;
538#endif
539 if (bytecount > size)
540 bytecount = size;
1da177e4
LT
541 if (clear_user(ptr, bytecount))
542 return -EFAULT;
78aa1f66 543 return bytecount;
1da177e4
LT
544}
545
78aa1f66 546static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
1da177e4 547{
70f5088d 548 struct mm_struct *mm = current->mm;
990e9dc3 549 struct ldt_struct *new_ldt, *old_ldt;
bbf79d21 550 unsigned int old_nr_entries, new_nr_entries;
990e9dc3 551 struct user_desc ldt_info;
5af72502 552 struct desc_struct ldt;
1da177e4 553 int error;
1da177e4
LT
554
555 error = -EINVAL;
1da177e4
LT
556 if (bytecount != sizeof(ldt_info))
557 goto out;
78aa1f66 558 error = -EFAULT;
70f5088d 559 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
1da177e4
LT
560 goto out;
561
562 error = -EINVAL;
563 if (ldt_info.entry_number >= LDT_ENTRIES)
564 goto out;
565 if (ldt_info.contents == 3) {
566 if (oldmode)
567 goto out;
568 if (ldt_info.seg_not_present == 0)
569 goto out;
570 }
571
37868fe1
AL
572 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
573 LDT_empty(&ldt_info)) {
574 /* The user wants to clear the entry. */
575 memset(&ldt, 0, sizeof(ldt));
576 } else {
577 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
578 error = -EINVAL;
579 goto out;
1da177e4 580 }
37868fe1
AL
581
582 fill_ldt(&ldt, &ldt_info);
583 if (oldmode)
584 ldt.avl = 0;
1da177e4
LT
585 }
586
c2b3496b
PZ
587 if (down_write_killable(&mm->context.ldt_usr_sem))
588 return -EINTR;
37868fe1 589
bbf79d21
BP
590 old_ldt = mm->context.ldt;
591 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
592 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
37868fe1
AL
593
594 error = -ENOMEM;
bbf79d21 595 new_ldt = alloc_ldt_struct(new_nr_entries);
37868fe1 596 if (!new_ldt)
34273f41 597 goto out_unlock;
34273f41 598
37868fe1 599 if (old_ldt)
bbf79d21
BP
600 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
601
37868fe1
AL
602 new_ldt->entries[ldt_info.entry_number] = ldt;
603 finalize_ldt_struct(new_ldt);
1da177e4 604
f55f0501
AL
605 /*
606 * If we are using PTI, map the new LDT into the userspace pagetables.
607 * If there is already an LDT, use the other slot so that other CPUs
608 * will continue to use the old LDT until install_ldt() switches
609 * them over to the new LDT.
610 */
611 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
612 if (error) {
a62d6985
TG
613 /*
614 * This only can fail for the first LDT setup. If an LDT is
615 * already installed then the PTE page is already
616 * populated. Mop up a half populated page table.
617 */
7f414195
TG
618 if (!WARN_ON_ONCE(old_ldt))
619 free_ldt_pgtables(mm);
a62d6985 620 free_ldt_struct(new_ldt);
f55f0501
AL
621 goto out_unlock;
622 }
623
37868fe1 624 install_ldt(mm, new_ldt);
a0e6e083 625 unmap_ldt_struct(mm, old_ldt);
37868fe1 626 free_ldt_struct(old_ldt);
1da177e4
LT
627 error = 0;
628
629out_unlock:
c2b3496b 630 up_write(&mm->context.ldt_usr_sem);
1da177e4
LT
631out:
632 return error;
633}
634
da20ab35
DH
635SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
636 unsigned long , bytecount)
1da177e4
LT
637{
638 int ret = -ENOSYS;
639
640 switch (func) {
641 case 0:
642 ret = read_ldt(ptr, bytecount);
643 break;
644 case 1:
645 ret = write_ldt(ptr, bytecount, 1);
646 break;
647 case 2:
648 ret = read_default_ldt(ptr, bytecount);
649 break;
650 case 0x11:
651 ret = write_ldt(ptr, bytecount, 0);
652 break;
653 }
da20ab35
DH
654 /*
655 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
656 * return type, but tht ABI for sys_modify_ldt() expects
657 * 'int'. This cast gives us an int-sized value in %rax
658 * for the return code. The 'unsigned' is necessary so
659 * the compiler does not try to sign-extend the negative
660 * return codes into the high half of the register when
661 * taking the value from int->long.
662 */
663 return (unsigned int)ret;
1da177e4 664}