]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/huge_memory.c
Merge tag 'hid-for-linus-2025070502' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / mm / huge_memory.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
71e3aac0
AA
2/*
3 * Copyright (C) 2009 Red Hat, Inc.
71e3aac0
AA
4 */
5
ae3a8c1c
AM
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
71e3aac0
AA
8#include <linux/mm.h>
9#include <linux/sched.h>
fa6c0231 10#include <linux/sched/mm.h>
6a3827d7 11#include <linux/sched/numa_balancing.h>
71e3aac0
AA
12#include <linux/highmem.h>
13#include <linux/hugetlb.h>
14#include <linux/mmu_notifier.h>
15#include <linux/rmap.h>
16#include <linux/swap.h>
97ae1749 17#include <linux/shrinker.h>
ba76149f 18#include <linux/mm_inline.h>
e9b61f19 19#include <linux/swapops.h>
fb5c2029 20#include <linux/backing-dev.h>
4897c765 21#include <linux/dax.h>
4b989955 22#include <linux/mm_types.h>
ba76149f 23#include <linux/khugepaged.h>
878aee7d 24#include <linux/freezer.h>
f25748e3 25#include <linux/pfn_t.h>
a664b2d8 26#include <linux/mman.h>
3565fce3 27#include <linux/memremap.h>
325adeb5 28#include <linux/pagemap.h>
49071d43 29#include <linux/debugfs.h>
4daae3b4 30#include <linux/migrate.h>
43b5fbbd 31#include <linux/hashtable.h>
6b251fc9 32#include <linux/userfaultfd_k.h>
33c3fc71 33#include <linux/page_idle.h>
baa355fd 34#include <linux/shmem_fs.h>
6b31d595 35#include <linux/oom.h>
98fa15f3 36#include <linux/numa.h>
f7da677b 37#include <linux/page_owner.h>
a1a3a2fc 38#include <linux/sched/sysctl.h>
467b171a 39#include <linux/memory-tiers.h>
4ef9ad19 40#include <linux/compat.h>
be25d1d4 41#include <linux/pgalloc_tag.h>
8710f6ed 42#include <linux/pagewalk.h>
97ae1749 43
71e3aac0
AA
44#include <asm/tlb.h>
45#include <asm/pgalloc.h>
46#include "internal.h"
014bb1de 47#include "swap.h"
71e3aac0 48
283fd6fe
AK
49#define CREATE_TRACE_POINTS
50#include <trace/events/thp.h>
51
ba76149f 52/*
b14d595a
MD
53 * By default, transparent hugepage support is disabled in order to avoid
54 * risking an increased memory footprint for applications that are not
55 * guaranteed to benefit from it. When transparent hugepage support is
56 * enabled, it is for all mappings, and khugepaged scans all mappings.
8bfa3f9a
JW
57 * Defrag is invoked by khugepaged hugepage allocations and by page faults
58 * for all hugepage allocations.
ba76149f 59 */
71e3aac0 60unsigned long transparent_hugepage_flags __read_mostly =
13ece886 61#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
ba76149f 62 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
13ece886
AA
63#endif
64#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
65 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
66#endif
444eb2a4 67 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
79da5407
KS
68 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
69 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
ba76149f 70
54d91729
QZ
71static struct shrinker *deferred_split_shrinker;
72static unsigned long deferred_split_count(struct shrinker *shrink,
73 struct shrink_control *sc);
74static unsigned long deferred_split_scan(struct shrinker *shrink,
75 struct shrink_control *sc);
81d3ff3c 76static bool split_underused_thp = true;
f000565a 77
97ae1749 78static atomic_t huge_zero_refcount;
5691753d 79struct folio *huge_zero_folio __read_mostly;
3b77e8c8 80unsigned long huge_zero_pfn __read_mostly = ~0UL;
3485b883
RR
81unsigned long huge_anon_orders_always __read_mostly;
82unsigned long huge_anon_orders_madvise __read_mostly;
83unsigned long huge_anon_orders_inherit __read_mostly;
dd4d30d1 84static bool anon_orders_configured __initdata;
3485b883 85
9884efd7
KW
86static inline bool file_thp_enabled(struct vm_area_struct *vma)
87{
88 struct inode *inode;
89
90 if (!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
91 return false;
92
93 if (!vma->vm_file)
94 return false;
95
96 inode = file_inode(vma->vm_file);
97
98 return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
99}
100
3485b883 101unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
102 unsigned long vm_flags,
103 unsigned long tva_flags,
3485b883
RR
104 unsigned long orders)
105{
e0ffb29b
MW
106 bool smaps = tva_flags & TVA_SMAPS;
107 bool in_pf = tva_flags & TVA_IN_PF;
108 bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
d659b715
GS
109 unsigned long supported_orders;
110
3485b883 111 /* Check the intersection of requested and supported orders. */
d659b715
GS
112 if (vma_is_anonymous(vma))
113 supported_orders = THP_ORDERS_ALL_ANON;
5dd40721
PX
114 else if (vma_is_special_huge(vma))
115 supported_orders = THP_ORDERS_ALL_SPECIAL;
d659b715
GS
116 else
117 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
118
119 orders &= supported_orders;
3485b883
RR
120 if (!orders)
121 return 0;
4a6c1297 122
9fec5168 123 if (!vma->vm_mm) /* vdso */
3485b883 124 return 0;
9fec5168 125
963756aa 126 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags))
3485b883 127 return 0;
c0630669 128
7da4e2cb 129 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
9fec5168 130 if (vma_is_dax(vma))
3485b883 131 return in_pf ? orders : 0;
7da4e2cb
YS
132
133 /*
7a81751f 134 * khugepaged special VMA and hugetlb VMA.
7da4e2cb
YS
135 * Must be checked after dax since some dax mappings may have
136 * VM_MIXEDMAP set.
137 */
7a81751f 138 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
3485b883 139 return 0;
9fec5168 140
7da4e2cb 141 /*
3485b883
RR
142 * Check alignment for file vma and size for both file and anon vma by
143 * filtering out the unsuitable orders.
7da4e2cb
YS
144 *
145 * Skip the check for page fault. Huge fault does the check in fault
3485b883 146 * handlers.
7da4e2cb 147 */
3485b883
RR
148 if (!in_pf) {
149 int order = highest_order(orders);
150 unsigned long addr;
151
152 while (orders) {
153 addr = vma->vm_end - (PAGE_SIZE << order);
154 if (thp_vma_suitable_order(vma, addr, order))
155 break;
156 order = next_order(&orders, order);
157 }
158
159 if (!orders)
160 return 0;
161 }
9fec5168 162
7da4e2cb
YS
163 /*
164 * Enabled via shmem mount options or sysfs settings.
165 * Must be done before hugepage flags check since shmem has its
166 * own flags.
167 */
6beeab87 168 if (!in_pf && shmem_file(vma->vm_file))
26c7d841 169 return shmem_allowable_huge_orders(file_inode(vma->vm_file),
e1e4cfd0 170 vma, vma->vm_pgoff, 0,
6beeab87 171 !enforce_sysfs);
9fec5168 172
7a81751f 173 if (!vma_is_anonymous(vma)) {
3485b883
RR
174 /*
175 * Enforce sysfs THP requirements as necessary. Anonymous vmas
176 * were already handled in thp_vma_allowable_orders().
177 */
178 if (enforce_sysfs &&
179 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
180 !hugepage_global_always())))
181 return 0;
182
7a81751f
ZK
183 /*
184 * Trust that ->huge_fault() handlers know what they are doing
185 * in fault path.
186 */
187 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
3485b883 188 return orders;
7a81751f
ZK
189 /* Only regular file is valid in collapse path */
190 if (((!in_pf || smaps)) && file_thp_enabled(vma))
3485b883
RR
191 return orders;
192 return 0;
7a81751f 193 }
9fec5168
YS
194
195 if (vma_is_temporary_stack(vma))
3485b883 196 return 0;
9fec5168
YS
197
198 /*
199 * THPeligible bit of smaps should show 1 for proper VMAs even
200 * though anon_vma is not initialized yet.
7da4e2cb
YS
201 *
202 * Allow page fault since anon_vma may be not initialized until
203 * the first page fault.
9fec5168
YS
204 */
205 if (!vma->anon_vma)
3485b883 206 return (smaps || in_pf) ? orders : 0;
9fec5168 207
3485b883 208 return orders;
7635d9cb
MH
209}
210
aaa9705b 211static bool get_huge_zero_page(void)
97ae1749 212{
5691753d 213 struct folio *zero_folio;
97ae1749
KS
214retry:
215 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
aaa9705b 216 return true;
97ae1749 217
5691753d 218 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
4a6c1297 219 HPAGE_PMD_ORDER);
5691753d 220 if (!zero_folio) {
d8a8e1f0 221 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
aaa9705b 222 return false;
d8a8e1f0 223 }
2a1b8648
ML
224 /* Ensure zero folio won't have large_rmappable flag set. */
225 folio_clear_large_rmappable(zero_folio);
97ae1749 226 preempt_disable();
5691753d 227 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
97ae1749 228 preempt_enable();
5691753d 229 folio_put(zero_folio);
97ae1749
KS
230 goto retry;
231 }
5691753d 232 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
97ae1749
KS
233
234 /* We take additional reference here. It will be put back by shrinker */
235 atomic_set(&huge_zero_refcount, 2);
236 preempt_enable();
f4981502 237 count_vm_event(THP_ZERO_PAGE_ALLOC);
aaa9705b 238 return true;
4a6c1297
KS
239}
240
6fcb52a5 241static void put_huge_zero_page(void)
4a6c1297 242{
97ae1749
KS
243 /*
244 * Counter should never go to zero here. Only shrinker can put
245 * last reference.
246 */
247 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
4a6c1297
KS
248}
249
5691753d 250struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
6fcb52a5
AL
251{
252 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
5691753d 253 return READ_ONCE(huge_zero_folio);
6fcb52a5
AL
254
255 if (!get_huge_zero_page())
256 return NULL;
257
258 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
259 put_huge_zero_page();
260
5691753d 261 return READ_ONCE(huge_zero_folio);
6fcb52a5
AL
262}
263
632230ff 264void mm_put_huge_zero_folio(struct mm_struct *mm)
6fcb52a5
AL
265{
266 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
267 put_huge_zero_page();
268}
269
48896466
GC
270static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
271 struct shrink_control *sc)
4a6c1297 272{
48896466
GC
273 /* we can free zero page only if last reference remains */
274 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
275}
97ae1749 276
48896466
GC
277static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
278 struct shrink_control *sc)
279{
97ae1749 280 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
5691753d
MWO
281 struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
282 BUG_ON(zero_folio == NULL);
3b77e8c8 283 WRITE_ONCE(huge_zero_pfn, ~0UL);
5691753d 284 folio_put(zero_folio);
48896466 285 return HPAGE_PMD_NR;
97ae1749
KS
286 }
287
288 return 0;
4a6c1297
KS
289}
290
54d91729 291static struct shrinker *huge_zero_page_shrinker;
97ae1749 292
71e3aac0 293#ifdef CONFIG_SYSFS
71e3aac0
AA
294static ssize_t enabled_show(struct kobject *kobj,
295 struct kobj_attribute *attr, char *buf)
296{
bfb0ffeb
JP
297 const char *output;
298
444eb2a4 299 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
bfb0ffeb
JP
300 output = "[always] madvise never";
301 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
302 &transparent_hugepage_flags))
303 output = "always [madvise] never";
444eb2a4 304 else
bfb0ffeb
JP
305 output = "always madvise [never]";
306
307 return sysfs_emit(buf, "%s\n", output);
71e3aac0 308}
444eb2a4 309
71e3aac0
AA
310static ssize_t enabled_store(struct kobject *kobj,
311 struct kobj_attribute *attr,
312 const char *buf, size_t count)
313{
21440d7e 314 ssize_t ret = count;
ba76149f 315
f42f2552 316 if (sysfs_streq(buf, "always")) {
21440d7e
DR
317 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
318 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
f42f2552 319 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
320 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
321 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 322 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
323 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
324 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
325 } else
326 ret = -EINVAL;
ba76149f
AA
327
328 if (ret > 0) {
b46e756f 329 int err = start_stop_khugepaged();
ba76149f
AA
330 if (err)
331 ret = err;
332 }
ba76149f 333 return ret;
71e3aac0 334}
37139bb0
ML
335
336static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
71e3aac0 337
b46e756f 338ssize_t single_hugepage_flag_show(struct kobject *kobj,
bfb0ffeb
JP
339 struct kobj_attribute *attr, char *buf,
340 enum transparent_hugepage_flag flag)
71e3aac0 341{
bfb0ffeb
JP
342 return sysfs_emit(buf, "%d\n",
343 !!test_bit(flag, &transparent_hugepage_flags));
71e3aac0 344}
e27e6151 345
b46e756f 346ssize_t single_hugepage_flag_store(struct kobject *kobj,
71e3aac0
AA
347 struct kobj_attribute *attr,
348 const char *buf, size_t count,
349 enum transparent_hugepage_flag flag)
350{
e27e6151
BH
351 unsigned long value;
352 int ret;
353
354 ret = kstrtoul(buf, 10, &value);
355 if (ret < 0)
356 return ret;
357 if (value > 1)
358 return -EINVAL;
359
360 if (value)
71e3aac0 361 set_bit(flag, &transparent_hugepage_flags);
e27e6151 362 else
71e3aac0 363 clear_bit(flag, &transparent_hugepage_flags);
71e3aac0
AA
364
365 return count;
366}
367
71e3aac0
AA
368static ssize_t defrag_show(struct kobject *kobj,
369 struct kobj_attribute *attr, char *buf)
370{
bfb0ffeb
JP
371 const char *output;
372
373 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
374 &transparent_hugepage_flags))
375 output = "[always] defer defer+madvise madvise never";
376 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
377 &transparent_hugepage_flags))
378 output = "always [defer] defer+madvise madvise never";
379 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
380 &transparent_hugepage_flags))
381 output = "always defer [defer+madvise] madvise never";
382 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
383 &transparent_hugepage_flags))
384 output = "always defer defer+madvise [madvise] never";
385 else
386 output = "always defer defer+madvise madvise [never]";
387
388 return sysfs_emit(buf, "%s\n", output);
71e3aac0 389}
21440d7e 390
71e3aac0
AA
391static ssize_t defrag_store(struct kobject *kobj,
392 struct kobj_attribute *attr,
393 const char *buf, size_t count)
394{
f42f2552 395 if (sysfs_streq(buf, "always")) {
21440d7e
DR
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
399 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
f42f2552 400 } else if (sysfs_streq(buf, "defer+madvise")) {
21440d7e
DR
401 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
402 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
403 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
404 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 405 } else if (sysfs_streq(buf, "defer")) {
4fad7fb6
DR
406 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
407 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
408 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
409 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
f42f2552 410 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
411 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
412 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
413 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
414 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 415 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
416 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
417 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
418 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
419 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
420 } else
421 return -EINVAL;
422
423 return count;
71e3aac0 424}
37139bb0 425static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
71e3aac0 426
79da5407 427static ssize_t use_zero_page_show(struct kobject *kobj,
ae7a927d 428 struct kobj_attribute *attr, char *buf)
79da5407 429{
b46e756f 430 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 431 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
79da5407
KS
432}
433static ssize_t use_zero_page_store(struct kobject *kobj,
434 struct kobj_attribute *attr, const char *buf, size_t count)
435{
b46e756f 436 return single_hugepage_flag_store(kobj, attr, buf, count,
79da5407
KS
437 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
438}
37139bb0 439static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
49920d28
HD
440
441static ssize_t hpage_pmd_size_show(struct kobject *kobj,
ae7a927d 442 struct kobj_attribute *attr, char *buf)
49920d28 443{
ae7a927d 444 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
49920d28
HD
445}
446static struct kobj_attribute hpage_pmd_size_attr =
447 __ATTR_RO(hpage_pmd_size);
448
81d3ff3c
UA
449static ssize_t split_underused_thp_show(struct kobject *kobj,
450 struct kobj_attribute *attr, char *buf)
451{
452 return sysfs_emit(buf, "%d\n", split_underused_thp);
453}
454
455static ssize_t split_underused_thp_store(struct kobject *kobj,
456 struct kobj_attribute *attr,
457 const char *buf, size_t count)
458{
459 int err = kstrtobool(buf, &split_underused_thp);
460
461 if (err < 0)
462 return err;
463
464 return count;
465}
466
467static struct kobj_attribute split_underused_thp_attr = __ATTR(
468 shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
469
71e3aac0
AA
470static struct attribute *hugepage_attr[] = {
471 &enabled_attr.attr,
472 &defrag_attr.attr,
79da5407 473 &use_zero_page_attr.attr,
49920d28 474 &hpage_pmd_size_attr.attr,
396bcc52 475#ifdef CONFIG_SHMEM
5a6e75f8 476 &shmem_enabled_attr.attr,
71e3aac0 477#endif
81d3ff3c 478 &split_underused_thp_attr.attr,
71e3aac0
AA
479 NULL,
480};
481
8aa95a21 482static const struct attribute_group hugepage_attr_group = {
71e3aac0 483 .attrs = hugepage_attr,
ba76149f
AA
484};
485
3485b883
RR
486static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
487static void thpsize_release(struct kobject *kobj);
488static DEFINE_SPINLOCK(huge_anon_orders_lock);
489static LIST_HEAD(thpsize_list);
490
70e59a75
RR
491static ssize_t anon_enabled_show(struct kobject *kobj,
492 struct kobj_attribute *attr, char *buf)
3485b883
RR
493{
494 int order = to_thpsize(kobj)->order;
495 const char *output;
496
497 if (test_bit(order, &huge_anon_orders_always))
498 output = "[always] inherit madvise never";
499 else if (test_bit(order, &huge_anon_orders_inherit))
500 output = "always [inherit] madvise never";
501 else if (test_bit(order, &huge_anon_orders_madvise))
502 output = "always inherit [madvise] never";
503 else
504 output = "always inherit madvise [never]";
505
506 return sysfs_emit(buf, "%s\n", output);
507}
508
70e59a75
RR
509static ssize_t anon_enabled_store(struct kobject *kobj,
510 struct kobj_attribute *attr,
511 const char *buf, size_t count)
3485b883
RR
512{
513 int order = to_thpsize(kobj)->order;
514 ssize_t ret = count;
515
516 if (sysfs_streq(buf, "always")) {
517 spin_lock(&huge_anon_orders_lock);
518 clear_bit(order, &huge_anon_orders_inherit);
519 clear_bit(order, &huge_anon_orders_madvise);
520 set_bit(order, &huge_anon_orders_always);
521 spin_unlock(&huge_anon_orders_lock);
522 } else if (sysfs_streq(buf, "inherit")) {
523 spin_lock(&huge_anon_orders_lock);
524 clear_bit(order, &huge_anon_orders_always);
525 clear_bit(order, &huge_anon_orders_madvise);
526 set_bit(order, &huge_anon_orders_inherit);
527 spin_unlock(&huge_anon_orders_lock);
528 } else if (sysfs_streq(buf, "madvise")) {
529 spin_lock(&huge_anon_orders_lock);
530 clear_bit(order, &huge_anon_orders_always);
531 clear_bit(order, &huge_anon_orders_inherit);
532 set_bit(order, &huge_anon_orders_madvise);
533 spin_unlock(&huge_anon_orders_lock);
534 } else if (sysfs_streq(buf, "never")) {
535 spin_lock(&huge_anon_orders_lock);
536 clear_bit(order, &huge_anon_orders_always);
537 clear_bit(order, &huge_anon_orders_inherit);
538 clear_bit(order, &huge_anon_orders_madvise);
539 spin_unlock(&huge_anon_orders_lock);
540 } else
541 ret = -EINVAL;
542
00f58104
RR
543 if (ret > 0) {
544 int err;
545
546 err = start_stop_khugepaged();
547 if (err)
548 ret = err;
549 }
3485b883
RR
550 return ret;
551}
552
70e59a75
RR
553static struct kobj_attribute anon_enabled_attr =
554 __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
3485b883 555
70e59a75
RR
556static struct attribute *anon_ctrl_attrs[] = {
557 &anon_enabled_attr.attr,
558 NULL,
559};
560
561static const struct attribute_group anon_ctrl_attr_grp = {
562 .attrs = anon_ctrl_attrs,
563};
3485b883 564
70e59a75 565static struct attribute *file_ctrl_attrs[] = {
4b989955
BW
566#ifdef CONFIG_SHMEM
567 &thpsize_shmem_enabled_attr.attr,
568#endif
3485b883
RR
569 NULL,
570};
571
70e59a75
RR
572static const struct attribute_group file_ctrl_attr_grp = {
573 .attrs = file_ctrl_attrs,
574};
575
576static struct attribute *any_ctrl_attrs[] = {
577 NULL,
578};
579
580static const struct attribute_group any_ctrl_attr_grp = {
581 .attrs = any_ctrl_attrs,
3485b883
RR
582};
583
584static const struct kobj_type thpsize_ktype = {
585 .release = &thpsize_release,
586 .sysfs_ops = &kobj_sysfs_ops,
587};
588
ec33687c
BS
589DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
590
591static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
592{
593 unsigned long sum = 0;
594 int cpu;
595
596 for_each_possible_cpu(cpu) {
597 struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
598
599 sum += this->stats[order][item];
600 }
601
602 return sum;
603}
604
605#define DEFINE_MTHP_STAT_ATTR(_name, _index) \
606static ssize_t _name##_show(struct kobject *kobj, \
607 struct kobj_attribute *attr, char *buf) \
608{ \
609 int order = to_thpsize(kobj)->order; \
610 \
611 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \
612} \
613static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
614
615DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
616DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
617DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
0c560dd8 618DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
aaf2914a 619DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
67c8b11b
WH
620DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK);
621DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
0d648dd5
BW
622DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
623DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
70e59a75 624#ifdef CONFIG_SHMEM
63d9866a
RR
625DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
626DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
627DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
70e59a75 628#endif
f216c845
LY
629DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
630DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
631DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
5d65c8d7 632DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
8175ebfd 633DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
ec33687c 634
70e59a75 635static struct attribute *anon_stats_attrs[] = {
ec33687c
BS
636 &anon_fault_alloc_attr.attr,
637 &anon_fault_fallback_attr.attr,
638 &anon_fault_fallback_charge_attr.attr,
70e59a75 639#ifndef CONFIG_SHMEM
0c560dd8 640 &zswpout_attr.attr,
aaf2914a 641 &swpin_attr.attr,
67c8b11b
WH
642 &swpin_fallback_attr.attr,
643 &swpin_fallback_charge_attr.attr,
0d648dd5
BW
644 &swpout_attr.attr,
645 &swpout_fallback_attr.attr,
70e59a75
RR
646#endif
647 &split_deferred_attr.attr,
5d65c8d7 648 &nr_anon_attr.attr,
8175ebfd 649 &nr_anon_partially_mapped_attr.attr,
70e59a75
RR
650 NULL,
651};
652
653static struct attribute_group anon_stats_attr_grp = {
654 .name = "stats",
655 .attrs = anon_stats_attrs,
656};
657
658static struct attribute *file_stats_attrs[] = {
659#ifdef CONFIG_SHMEM
63d9866a
RR
660 &shmem_alloc_attr.attr,
661 &shmem_fallback_attr.attr,
662 &shmem_fallback_charge_attr.attr,
70e59a75
RR
663#endif
664 NULL,
665};
666
667static struct attribute_group file_stats_attr_grp = {
668 .name = "stats",
669 .attrs = file_stats_attrs,
670};
671
672static struct attribute *any_stats_attrs[] = {
673#ifdef CONFIG_SHMEM
0c560dd8 674 &zswpout_attr.attr,
aaf2914a 675 &swpin_attr.attr,
67c8b11b
WH
676 &swpin_fallback_attr.attr,
677 &swpin_fallback_charge_attr.attr,
70e59a75
RR
678 &swpout_attr.attr,
679 &swpout_fallback_attr.attr,
680#endif
f216c845
LY
681 &split_attr.attr,
682 &split_failed_attr.attr,
ec33687c
BS
683 NULL,
684};
685
70e59a75 686static struct attribute_group any_stats_attr_grp = {
ec33687c 687 .name = "stats",
70e59a75 688 .attrs = any_stats_attrs,
ec33687c
BS
689};
690
70e59a75
RR
691static int sysfs_add_group(struct kobject *kobj,
692 const struct attribute_group *grp)
693{
694 int ret = -ENOENT;
695
696 /*
697 * If the group is named, try to merge first, assuming the subdirectory
698 * was already created. This avoids the warning emitted by
699 * sysfs_create_group() if the directory already exists.
700 */
701 if (grp->name)
702 ret = sysfs_merge_group(kobj, grp);
703 if (ret)
704 ret = sysfs_create_group(kobj, grp);
705
706 return ret;
707}
708
3485b883
RR
709static struct thpsize *thpsize_create(int order, struct kobject *parent)
710{
711 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
712 struct thpsize *thpsize;
70e59a75 713 int ret = -ENOMEM;
3485b883
RR
714
715 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
716 if (!thpsize)
70e59a75
RR
717 goto err;
718
719 thpsize->order = order;
3485b883
RR
720
721 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
722 "hugepages-%lukB", size);
723 if (ret) {
724 kfree(thpsize);
70e59a75 725 goto err;
3485b883
RR
726 }
727
70e59a75
RR
728
729 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
730 if (ret)
731 goto err_put;
732
733 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
734 if (ret)
735 goto err_put;
736
737 if (BIT(order) & THP_ORDERS_ALL_ANON) {
738 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
739 if (ret)
740 goto err_put;
741
742 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
743 if (ret)
744 goto err_put;
3485b883
RR
745 }
746
70e59a75
RR
747 if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
748 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
749 if (ret)
750 goto err_put;
751
752 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
753 if (ret)
754 goto err_put;
ec33687c
BS
755 }
756
3485b883 757 return thpsize;
70e59a75
RR
758err_put:
759 kobject_put(&thpsize->kobj);
760err:
761 return ERR_PTR(ret);
3485b883
RR
762}
763
764static void thpsize_release(struct kobject *kobj)
765{
766 kfree(to_thpsize(kobj));
767}
768
569e5590 769static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
71e3aac0 770{
71e3aac0 771 int err;
3485b883
RR
772 struct thpsize *thpsize;
773 unsigned long orders;
774 int order;
775
776 /*
777 * Default to setting PMD-sized THP to inherit the global setting and
778 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
779 * constant so we have to do this here.
780 */
dd4d30d1
RR
781 if (!anon_orders_configured)
782 huge_anon_orders_inherit = BIT(PMD_ORDER);
71e3aac0 783
569e5590
SL
784 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
785 if (unlikely(!*hugepage_kobj)) {
ae3a8c1c 786 pr_err("failed to create transparent hugepage kobject\n");
569e5590 787 return -ENOMEM;
ba76149f
AA
788 }
789
569e5590 790 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
ba76149f 791 if (err) {
ae3a8c1c 792 pr_err("failed to register transparent hugepage group\n");
569e5590 793 goto delete_obj;
ba76149f
AA
794 }
795
569e5590 796 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
ba76149f 797 if (err) {
ae3a8c1c 798 pr_err("failed to register transparent hugepage group\n");
569e5590 799 goto remove_hp_group;
ba76149f 800 }
569e5590 801
70e59a75 802 orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
3485b883
RR
803 order = highest_order(orders);
804 while (orders) {
805 thpsize = thpsize_create(order, *hugepage_kobj);
806 if (IS_ERR(thpsize)) {
807 pr_err("failed to create thpsize for order %d\n", order);
808 err = PTR_ERR(thpsize);
809 goto remove_all;
810 }
811 list_add(&thpsize->node, &thpsize_list);
812 order = next_order(&orders, order);
813 }
814
569e5590
SL
815 return 0;
816
3485b883
RR
817remove_all:
818 hugepage_exit_sysfs(*hugepage_kobj);
819 return err;
569e5590
SL
820remove_hp_group:
821 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
822delete_obj:
823 kobject_put(*hugepage_kobj);
824 return err;
825}
826
827static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
828{
3485b883
RR
829 struct thpsize *thpsize, *tmp;
830
831 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
832 list_del(&thpsize->node);
833 kobject_put(&thpsize->kobj);
834 }
835
569e5590
SL
836 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
837 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
838 kobject_put(hugepage_kobj);
839}
840#else
841static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
842{
843 return 0;
844}
845
846static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
847{
848}
849#endif /* CONFIG_SYSFS */
850
54d91729
QZ
851static int __init thp_shrinker_init(void)
852{
853 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
854 if (!huge_zero_page_shrinker)
855 return -ENOMEM;
856
857 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
858 SHRINKER_MEMCG_AWARE |
859 SHRINKER_NONSLAB,
860 "thp-deferred_split");
861 if (!deferred_split_shrinker) {
862 shrinker_free(huge_zero_page_shrinker);
863 return -ENOMEM;
864 }
865
866 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
867 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
868 shrinker_register(huge_zero_page_shrinker);
869
870 deferred_split_shrinker->count_objects = deferred_split_count;
871 deferred_split_shrinker->scan_objects = deferred_split_scan;
872 shrinker_register(deferred_split_shrinker);
873
874 return 0;
875}
876
877static void __init thp_shrinker_exit(void)
878{
879 shrinker_free(huge_zero_page_shrinker);
880 shrinker_free(deferred_split_shrinker);
881}
882
569e5590
SL
883static int __init hugepage_init(void)
884{
885 int err;
886 struct kobject *hugepage_kobj;
887
888 if (!has_transparent_hugepage()) {
3c556d24 889 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
569e5590
SL
890 return -EINVAL;
891 }
892
ff20c2e0
KS
893 /*
894 * hugepages can't be allocated by the buddy allocator
895 */
5e0a760b 896 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
ff20c2e0 897
569e5590
SL
898 err = hugepage_init_sysfs(&hugepage_kobj);
899 if (err)
65ebb64f 900 goto err_sysfs;
ba76149f 901
b46e756f 902 err = khugepaged_init();
ba76149f 903 if (err)
65ebb64f 904 goto err_slab;
ba76149f 905
54d91729 906 err = thp_shrinker_init();
9a982250 907 if (err)
54d91729 908 goto err_shrinker;
97ae1749 909
97562cd2
RR
910 /*
911 * By default disable transparent hugepages on smaller systems,
912 * where the extra memory used could hurt more than TLB overhead
913 * is likely to save. The admin can still enable it through /sys.
914 */
ca79b0c2 915 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
97562cd2 916 transparent_hugepage_flags = 0;
79553da2
KS
917 return 0;
918 }
97562cd2 919
79553da2 920 err = start_stop_khugepaged();
65ebb64f
KS
921 if (err)
922 goto err_khugepaged;
ba76149f 923
569e5590 924 return 0;
65ebb64f 925err_khugepaged:
54d91729
QZ
926 thp_shrinker_exit();
927err_shrinker:
b46e756f 928 khugepaged_destroy();
65ebb64f 929err_slab:
569e5590 930 hugepage_exit_sysfs(hugepage_kobj);
65ebb64f 931err_sysfs:
ba76149f 932 return err;
71e3aac0 933}
a64fb3cd 934subsys_initcall(hugepage_init);
71e3aac0
AA
935
936static int __init setup_transparent_hugepage(char *str)
937{
938 int ret = 0;
939 if (!str)
940 goto out;
941 if (!strcmp(str, "always")) {
942 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
943 &transparent_hugepage_flags);
944 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
945 &transparent_hugepage_flags);
946 ret = 1;
947 } else if (!strcmp(str, "madvise")) {
948 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
949 &transparent_hugepage_flags);
950 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
951 &transparent_hugepage_flags);
952 ret = 1;
953 } else if (!strcmp(str, "never")) {
954 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
955 &transparent_hugepage_flags);
956 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
957 &transparent_hugepage_flags);
958 ret = 1;
959 }
960out:
961 if (!ret)
ae3a8c1c 962 pr_warn("transparent_hugepage= cannot parse, ignored\n");
71e3aac0
AA
963 return ret;
964}
965__setup("transparent_hugepage=", setup_transparent_hugepage);
966
dd4d30d1
RR
967static char str_dup[PAGE_SIZE] __initdata;
968static int __init setup_thp_anon(char *str)
969{
970 char *token, *range, *policy, *subtoken;
971 unsigned long always, inherit, madvise;
972 char *start_size, *end_size;
973 int start, end, nr;
974 char *p;
975
976 if (!str || strlen(str) + 1 > PAGE_SIZE)
977 goto err;
93c1e57a 978 strscpy(str_dup, str);
dd4d30d1
RR
979
980 always = huge_anon_orders_always;
981 madvise = huge_anon_orders_madvise;
982 inherit = huge_anon_orders_inherit;
983 p = str_dup;
984 while ((token = strsep(&p, ";")) != NULL) {
985 range = strsep(&token, ":");
986 policy = token;
987
988 if (!policy)
989 goto err;
990
991 while ((subtoken = strsep(&range, ",")) != NULL) {
992 if (strchr(subtoken, '-')) {
993 start_size = strsep(&subtoken, "-");
994 end_size = subtoken;
995
1c8d4849
MC
996 start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON);
997 end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON);
dd4d30d1 998 } else {
1c8d4849
MC
999 start_size = end_size = subtoken;
1000 start = end = get_order_from_str(subtoken,
1001 THP_ORDERS_ALL_ANON);
1002 }
1003
1004 if (start == -EINVAL) {
1005 pr_err("invalid size %s in thp_anon boot parameter\n", start_size);
1006 goto err;
1007 }
1008
1009 if (end == -EINVAL) {
1010 pr_err("invalid size %s in thp_anon boot parameter\n", end_size);
1011 goto err;
dd4d30d1
RR
1012 }
1013
1014 if (start < 0 || end < 0 || start > end)
1015 goto err;
1016
1017 nr = end - start + 1;
1018 if (!strcmp(policy, "always")) {
1019 bitmap_set(&always, start, nr);
1020 bitmap_clear(&inherit, start, nr);
1021 bitmap_clear(&madvise, start, nr);
1022 } else if (!strcmp(policy, "madvise")) {
1023 bitmap_set(&madvise, start, nr);
1024 bitmap_clear(&inherit, start, nr);
1025 bitmap_clear(&always, start, nr);
1026 } else if (!strcmp(policy, "inherit")) {
1027 bitmap_set(&inherit, start, nr);
1028 bitmap_clear(&madvise, start, nr);
1029 bitmap_clear(&always, start, nr);
1030 } else if (!strcmp(policy, "never")) {
1031 bitmap_clear(&inherit, start, nr);
1032 bitmap_clear(&madvise, start, nr);
1033 bitmap_clear(&always, start, nr);
1034 } else {
1035 pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1036 goto err;
1037 }
1038 }
1039 }
1040
1041 huge_anon_orders_always = always;
1042 huge_anon_orders_madvise = madvise;
1043 huge_anon_orders_inherit = inherit;
1044 anon_orders_configured = true;
1045 return 1;
1046
1047err:
1048 pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1049 return 0;
1050}
1051__setup("thp_anon=", setup_thp_anon);
1052
f55e1014 1053pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
71e3aac0 1054{
f55e1014 1055 if (likely(vma->vm_flags & VM_WRITE))
161e393c 1056 pmd = pmd_mkwrite(pmd, vma);
71e3aac0
AA
1057 return pmd;
1058}
1059
87eaceb3 1060#ifdef CONFIG_MEMCG
f8baa6be
MWO
1061static inline
1062struct deferred_split *get_deferred_split_queue(struct folio *folio)
9a982250 1063{
f8baa6be
MWO
1064 struct mem_cgroup *memcg = folio_memcg(folio);
1065 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
1066
1067 if (memcg)
1068 return &memcg->deferred_split_queue;
1069 else
1070 return &pgdat->deferred_split_queue;
9a982250 1071}
87eaceb3 1072#else
f8baa6be
MWO
1073static inline
1074struct deferred_split *get_deferred_split_queue(struct folio *folio)
87eaceb3 1075{
f8baa6be 1076 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
1077
1078 return &pgdat->deferred_split_queue;
1079}
1080#endif
9a982250 1081
5beaee54 1082static inline bool is_transparent_hugepage(const struct folio *folio)
005ba37c 1083{
a644b0ab 1084 if (!folio_test_large(folio))
fa1f68cc 1085 return false;
005ba37c 1086
5beaee54 1087 return is_huge_zero_folio(folio) ||
de53c05f 1088 folio_test_large_rmappable(folio);
005ba37c 1089}
005ba37c 1090
97d3d0f9
KS
1091static unsigned long __thp_get_unmapped_area(struct file *filp,
1092 unsigned long addr, unsigned long len,
ed48e87c
RE
1093 loff_t off, unsigned long flags, unsigned long size,
1094 vm_flags_t vm_flags)
74d2fad1 1095{
74d2fad1
TK
1096 loff_t off_end = off + len;
1097 loff_t off_align = round_up(off, size);
96204e15 1098 unsigned long len_pad, ret, off_sub;
74d2fad1 1099
d9592025 1100 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
4ef9ad19
YS
1101 return 0;
1102
74d2fad1
TK
1103 if (off_end <= off_align || (off_end - off_align) < size)
1104 return 0;
1105
1106 len_pad = len + size;
1107 if (len_pad < len || (off + len_pad) < off)
1108 return 0;
1109
ed48e87c
RE
1110 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
1111 off >> PAGE_SHIFT, flags, vm_flags);
97d3d0f9
KS
1112
1113 /*
1114 * The failure might be due to length padding. The caller will retry
1115 * without the padding.
1116 */
1117 if (IS_ERR_VALUE(ret))
74d2fad1
TK
1118 return 0;
1119
97d3d0f9
KS
1120 /*
1121 * Do not try to align to THP boundary if allocation at the address
1122 * hint succeeds.
1123 */
1124 if (ret == addr)
1125 return addr;
1126
96204e15
RR
1127 off_sub = (off - ret) & (size - 1);
1128
529ce23a 1129 if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub)
96204e15
RR
1130 return ret + size;
1131
1132 ret += off_sub;
97d3d0f9 1133 return ret;
74d2fad1
TK
1134}
1135
ed48e87c
RE
1136unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1137 unsigned long len, unsigned long pgoff, unsigned long flags,
1138 vm_flags_t vm_flags)
74d2fad1 1139{
97d3d0f9 1140 unsigned long ret;
74d2fad1
TK
1141 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1142
ed48e87c 1143 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
97d3d0f9
KS
1144 if (ret)
1145 return ret;
1854bc6e 1146
ed48e87c
RE
1147 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
1148 vm_flags);
1149}
1150
1151unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1152 unsigned long len, unsigned long pgoff, unsigned long flags)
1153{
1154 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
74d2fad1
TK
1155}
1156EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1157
ebcfc63d
DJ
1158static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
1159 unsigned long addr)
71e3aac0 1160{
ebcfc63d
DJ
1161 gfp_t gfp = vma_thp_gfp_mask(vma);
1162 const int order = HPAGE_PMD_ORDER;
1163 struct folio *folio;
71e3aac0 1164
6359c39c 1165 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK);
00501b53 1166
ebcfc63d
DJ
1167 if (unlikely(!folio)) {
1168 count_vm_event(THP_FAULT_FALLBACK);
1169 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1170 return NULL;
1171 }
1172
1173 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
cfe3236d
KW
1174 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1175 folio_put(folio);
6b251fc9 1176 count_vm_event(THP_FAULT_FALLBACK);
85b9f46e 1177 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
ebcfc63d
DJ
1178 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
1179 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1180 return NULL;
6b251fc9 1181 }
cfe3236d 1182 folio_throttle_swaprate(folio, gfp);
00501b53 1183
5708d96d 1184 /*
c51a4f11
ZY
1185 * When a folio is not zeroed during allocation (__GFP_ZERO not used)
1186 * or user folios require special handling, folio_zero_user() is used to
1187 * make sure that the page corresponding to the faulting address will be
1188 * hot in the cache after zeroing.
5708d96d 1189 */
c51a4f11 1190 if (user_alloc_needs_zeroing())
5708d96d 1191 folio_zero_user(folio, addr);
52f37629 1192 /*
cfe3236d 1193 * The memory barrier inside __folio_mark_uptodate makes sure that
78fefd04 1194 * folio_zero_user writes become visible before the set_pmd_at()
52f37629
MK
1195 * write.
1196 */
cfe3236d 1197 __folio_mark_uptodate(folio);
ebcfc63d
DJ
1198 return folio;
1199}
1200
1201static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
1202 struct vm_area_struct *vma, unsigned long haddr)
1203{
1204 pmd_t entry;
1205
e3981db4 1206 entry = folio_mk_pmd(folio, vma->vm_page_prot);
ebcfc63d
DJ
1207 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1208 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1209 folio_add_lru_vma(folio, vma);
1210 set_pmd_at(vma->vm_mm, haddr, pmd, entry);
1211 update_mmu_cache_pmd(vma, haddr, pmd);
1212 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1213 count_vm_event(THP_FAULT_ALLOC);
1214 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1215 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1216}
1217
1218static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1219{
1220 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1221 struct vm_area_struct *vma = vmf->vma;
1222 struct folio *folio;
1223 pgtable_t pgtable;
1224 vm_fault_t ret = 0;
1225
1226 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1227 if (unlikely(!folio))
1228 return VM_FAULT_FALLBACK;
1229
1230 pgtable = pte_alloc_one(vma->vm_mm);
1231 if (unlikely(!pgtable)) {
1232 ret = VM_FAULT_OOM;
1233 goto release;
1234 }
71e3aac0 1235
82b0f8c3
JK
1236 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1237 if (unlikely(!pmd_none(*vmf->pmd))) {
6b31d595 1238 goto unlock_release;
71e3aac0 1239 } else {
6b31d595
MH
1240 ret = check_stable_address_space(vma->vm_mm);
1241 if (ret)
1242 goto unlock_release;
1243
6b251fc9
AA
1244 /* Deliver the page fault to userland */
1245 if (userfaultfd_missing(vma)) {
82b0f8c3 1246 spin_unlock(vmf->ptl);
cfe3236d 1247 folio_put(folio);
bae473a4 1248 pte_free(vma->vm_mm, pgtable);
8fd5eda4
ML
1249 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1250 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1251 return ret;
6b251fc9 1252 }
82b0f8c3 1253 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
ebcfc63d 1254 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
c4812909 1255 mm_inc_nr_ptes(vma->vm_mm);
dafff3f4 1256 deferred_split_folio(folio, false);
82b0f8c3 1257 spin_unlock(vmf->ptl);
71e3aac0
AA
1258 }
1259
aa2e878e 1260 return 0;
6b31d595
MH
1261unlock_release:
1262 spin_unlock(vmf->ptl);
1263release:
1264 if (pgtable)
1265 pte_free(vma->vm_mm, pgtable);
cfe3236d 1266 folio_put(folio);
6b31d595
MH
1267 return ret;
1268
71e3aac0
AA
1269}
1270
444eb2a4 1271/*
21440d7e
DR
1272 * always: directly stall for all thp allocations
1273 * defer: wake kswapd and fail if not immediately available
1274 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1275 * fail if not immediately available
1276 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1277 * available
1278 * never: never stall for any thp allocation
444eb2a4 1279 */
164cc4fe 1280gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
444eb2a4 1281{
164cc4fe 1282 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
2f0799a0 1283
ac79f78d 1284 /* Always do synchronous compaction */
a8282608
AA
1285 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1286 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
ac79f78d
DR
1287
1288 /* Kick kcompactd and fail quickly */
21440d7e 1289 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
19deb769 1290 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
ac79f78d
DR
1291
1292 /* Synchronous compaction if madvised, otherwise kick kcompactd */
21440d7e 1293 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
1294 return GFP_TRANSHUGE_LIGHT |
1295 (vma_madvised ? __GFP_DIRECT_RECLAIM :
1296 __GFP_KSWAPD_RECLAIM);
ac79f78d
DR
1297
1298 /* Only do synchronous compaction if madvised */
21440d7e 1299 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
1300 return GFP_TRANSHUGE_LIGHT |
1301 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
ac79f78d 1302
19deb769 1303 return GFP_TRANSHUGE_LIGHT;
444eb2a4
MG
1304}
1305
c4088ebd 1306/* Caller must hold page table lock. */
e28833bc 1307static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
97ae1749 1308 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
e28833bc 1309 struct folio *zero_folio)
fc9fe822
KS
1310{
1311 pmd_t entry;
e3981db4 1312 entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
c8bb4163 1313 pgtable_trans_huge_deposit(mm, pmd, pgtable);
fc9fe822 1314 set_pmd_at(mm, haddr, pmd, entry);
c4812909 1315 mm_inc_nr_ptes(mm);
fc9fe822
KS
1316}
1317
2b740303 1318vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
71e3aac0 1319{
82b0f8c3 1320 struct vm_area_struct *vma = vmf->vma;
82b0f8c3 1321 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
a373baed 1322 vm_fault_t ret;
71e3aac0 1323
3485b883 1324 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
c0292554 1325 return VM_FAULT_FALLBACK;
a373baed
MWO
1326 ret = vmf_anon_prepare(vmf);
1327 if (ret)
1328 return ret;
4fa6893f 1329 khugepaged_enter_vma(vma, vma->vm_flags);
d2081b2b 1330
82b0f8c3 1331 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 1332 !mm_forbids_zeropage(vma->vm_mm) &&
128ec037
KS
1333 transparent_hugepage_use_zero_page()) {
1334 pgtable_t pgtable;
e28833bc 1335 struct folio *zero_folio;
2b740303 1336 vm_fault_t ret;
e28833bc 1337
4cf58924 1338 pgtable = pte_alloc_one(vma->vm_mm);
128ec037 1339 if (unlikely(!pgtable))
ba76149f 1340 return VM_FAULT_OOM;
e28833bc
MWO
1341 zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1342 if (unlikely(!zero_folio)) {
bae473a4 1343 pte_free(vma->vm_mm, pgtable);
81ab4201 1344 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1345 return VM_FAULT_FALLBACK;
b9bbfbe3 1346 }
82b0f8c3 1347 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
6b251fc9 1348 ret = 0;
82b0f8c3 1349 if (pmd_none(*vmf->pmd)) {
6b31d595
MH
1350 ret = check_stable_address_space(vma->vm_mm);
1351 if (ret) {
1352 spin_unlock(vmf->ptl);
bfe8cc1d 1353 pte_free(vma->vm_mm, pgtable);
6b31d595 1354 } else if (userfaultfd_missing(vma)) {
82b0f8c3 1355 spin_unlock(vmf->ptl);
bfe8cc1d 1356 pte_free(vma->vm_mm, pgtable);
82b0f8c3 1357 ret = handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
1358 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1359 } else {
e28833bc
MWO
1360 set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1361 haddr, vmf->pmd, zero_folio);
fca40573 1362 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1363 spin_unlock(vmf->ptl);
6b251fc9 1364 }
bfe8cc1d 1365 } else {
82b0f8c3 1366 spin_unlock(vmf->ptl);
bae473a4 1367 pte_free(vma->vm_mm, pgtable);
bfe8cc1d 1368 }
6b251fc9 1369 return ret;
71e3aac0 1370 }
ebcfc63d
DJ
1371
1372 return __do_huge_pmd_anonymous_page(vmf);
71e3aac0
AA
1373}
1374
6c88f726 1375static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3b6521f5
OH
1376 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1377 pgtable_t pgtable)
5cad465d
MW
1378{
1379 struct mm_struct *mm = vma->vm_mm;
1380 pmd_t entry;
5cad465d 1381
6c88f726
AP
1382 lockdep_assert_held(pmd_lockptr(mm, pmd));
1383
c6f3c5ee
AK
1384 if (!pmd_none(*pmd)) {
1385 if (write) {
1386 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1387 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
6c88f726 1388 return -EEXIST;
c6f3c5ee
AK
1389 }
1390 entry = pmd_mkyoung(*pmd);
1391 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1392 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1393 update_mmu_cache_pmd(vma, addr, pmd);
1394 }
1395
6c88f726 1396 return -EEXIST;
c6f3c5ee
AK
1397 }
1398
f25748e3
DW
1399 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1400 if (pfn_t_devmap(pfn))
1401 entry = pmd_mkdevmap(entry);
3c8e44c9
PX
1402 else
1403 entry = pmd_mkspecial(entry);
01871e59 1404 if (write) {
f55e1014
LT
1405 entry = pmd_mkyoung(pmd_mkdirty(entry));
1406 entry = maybe_pmd_mkwrite(entry, vma);
5cad465d 1407 }
3b6521f5
OH
1408
1409 if (pgtable) {
1410 pgtable_trans_huge_deposit(mm, pmd, pgtable);
c4812909 1411 mm_inc_nr_ptes(mm);
3b6521f5
OH
1412 }
1413
01871e59
RZ
1414 set_pmd_at(mm, addr, pmd, entry);
1415 update_mmu_cache_pmd(vma, addr, pmd);
6c88f726 1416 return 0;
5cad465d
MW
1417}
1418
9a9731b1 1419/**
7b806d22 1420 * vmf_insert_pfn_pmd - insert a pmd size pfn
9a9731b1
THV
1421 * @vmf: Structure describing the fault
1422 * @pfn: pfn to insert
9a9731b1
THV
1423 * @write: whether it's a write fault
1424 *
7b806d22 1425 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1426 *
1427 * Return: vm_fault_t value.
1428 */
7b806d22 1429vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
5cad465d 1430{
fce86ff5
DW
1431 unsigned long addr = vmf->address & PMD_MASK;
1432 struct vm_area_struct *vma = vmf->vma;
7b806d22 1433 pgprot_t pgprot = vma->vm_page_prot;
3b6521f5 1434 pgtable_t pgtable = NULL;
6c88f726
AP
1435 spinlock_t *ptl;
1436 int error;
fce86ff5 1437
5cad465d
MW
1438 /*
1439 * If we had pmd_special, we could avoid all these restrictions,
1440 * but we need to be consistent with PTEs and architectures that
1441 * can't support a 'special' bit.
1442 */
e1fb4a08
DJ
1443 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1444 !pfn_t_devmap(pfn));
5cad465d
MW
1445 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1446 (VM_PFNMAP|VM_MIXEDMAP));
1447 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
5cad465d
MW
1448
1449 if (addr < vma->vm_start || addr >= vma->vm_end)
1450 return VM_FAULT_SIGBUS;
308a047c 1451
3b6521f5 1452 if (arch_needs_pgtable_deposit()) {
4cf58924 1453 pgtable = pte_alloc_one(vma->vm_mm);
3b6521f5
OH
1454 if (!pgtable)
1455 return VM_FAULT_OOM;
1456 }
1457
e1e1a3ae
DH
1458 pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
1459
6c88f726
AP
1460 ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1461 error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write,
1462 pgtable);
1463 spin_unlock(ptl);
1464 if (error && pgtable)
1465 pte_free(vma->vm_mm, pgtable);
308a047c 1466
ae18d6dc 1467 return VM_FAULT_NOPAGE;
5cad465d 1468}
7b806d22 1469EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
5cad465d 1470
6c88f726
AP
1471vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
1472 bool write)
1473{
1474 struct vm_area_struct *vma = vmf->vma;
1475 unsigned long addr = vmf->address & PMD_MASK;
1476 struct mm_struct *mm = vma->vm_mm;
1477 spinlock_t *ptl;
1478 pgtable_t pgtable = NULL;
1479 int error;
1480
1481 if (addr < vma->vm_start || addr >= vma->vm_end)
1482 return VM_FAULT_SIGBUS;
1483
1484 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
1485 return VM_FAULT_SIGBUS;
1486
1487 if (arch_needs_pgtable_deposit()) {
1488 pgtable = pte_alloc_one(vma->vm_mm);
1489 if (!pgtable)
1490 return VM_FAULT_OOM;
1491 }
1492
1493 ptl = pmd_lock(mm, vmf->pmd);
1494 if (pmd_none(*vmf->pmd)) {
1495 folio_get(folio);
1496 folio_add_file_rmap_pmd(folio, &folio->page, vma);
1497 add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR);
1498 }
1499 error = insert_pfn_pmd(vma, addr, vmf->pmd,
1500 pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot,
1501 write, pgtable);
1502 spin_unlock(ptl);
1503 if (error && pgtable)
1504 pte_free(mm, pgtable);
1505
1506 return VM_FAULT_NOPAGE;
1507}
1508EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
1509
a00cc7d9 1510#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
f55e1014 1511static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
a00cc7d9 1512{
f55e1014 1513 if (likely(vma->vm_flags & VM_WRITE))
a00cc7d9
MW
1514 pud = pud_mkwrite(pud);
1515 return pud;
1516}
1517
1518static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
7b806d22 1519 pud_t *pud, pfn_t pfn, bool write)
a00cc7d9
MW
1520{
1521 struct mm_struct *mm = vma->vm_mm;
7b806d22 1522 pgprot_t prot = vma->vm_page_prot;
a00cc7d9 1523 pud_t entry;
a00cc7d9 1524
c6f3c5ee
AK
1525 if (!pud_none(*pud)) {
1526 if (write) {
ef713ec3 1527 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
dbe54153 1528 return;
c6f3c5ee
AK
1529 entry = pud_mkyoung(*pud);
1530 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1531 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1532 update_mmu_cache_pud(vma, addr, pud);
1533 }
dbe54153 1534 return;
c6f3c5ee
AK
1535 }
1536
a00cc7d9
MW
1537 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1538 if (pfn_t_devmap(pfn))
1539 entry = pud_mkdevmap(entry);
3c8e44c9
PX
1540 else
1541 entry = pud_mkspecial(entry);
a00cc7d9 1542 if (write) {
f55e1014
LT
1543 entry = pud_mkyoung(pud_mkdirty(entry));
1544 entry = maybe_pud_mkwrite(entry, vma);
a00cc7d9
MW
1545 }
1546 set_pud_at(mm, addr, pud, entry);
1547 update_mmu_cache_pud(vma, addr, pud);
a00cc7d9
MW
1548}
1549
9a9731b1 1550/**
7b806d22 1551 * vmf_insert_pfn_pud - insert a pud size pfn
9a9731b1
THV
1552 * @vmf: Structure describing the fault
1553 * @pfn: pfn to insert
9a9731b1
THV
1554 * @write: whether it's a write fault
1555 *
7b806d22 1556 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1557 *
1558 * Return: vm_fault_t value.
1559 */
7b806d22 1560vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
a00cc7d9 1561{
fce86ff5
DW
1562 unsigned long addr = vmf->address & PUD_MASK;
1563 struct vm_area_struct *vma = vmf->vma;
7b806d22 1564 pgprot_t pgprot = vma->vm_page_prot;
dbe54153 1565 spinlock_t *ptl;
fce86ff5 1566
a00cc7d9
MW
1567 /*
1568 * If we had pud_special, we could avoid all these restrictions,
1569 * but we need to be consistent with PTEs and architectures that
1570 * can't support a 'special' bit.
1571 */
62ec0d8c
DJ
1572 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1573 !pfn_t_devmap(pfn));
a00cc7d9
MW
1574 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1575 (VM_PFNMAP|VM_MIXEDMAP));
1576 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
a00cc7d9
MW
1577
1578 if (addr < vma->vm_start || addr >= vma->vm_end)
1579 return VM_FAULT_SIGBUS;
1580
e1e1a3ae 1581 pfnmap_setup_cachemode_pfn(pfn_t_to_pfn(pfn), &pgprot);
a00cc7d9 1582
dbe54153 1583 ptl = pud_lock(vma->vm_mm, vmf->pud);
7b806d22 1584 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
dbe54153
AP
1585 spin_unlock(ptl);
1586
a00cc7d9
MW
1587 return VM_FAULT_NOPAGE;
1588}
7b806d22 1589EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
dbe54153
AP
1590
1591/**
1592 * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry
1593 * @vmf: Structure describing the fault
1594 * @folio: folio to insert
1595 * @write: whether it's a write fault
1596 *
1597 * Return: vm_fault_t value.
1598 */
1599vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
1600 bool write)
1601{
1602 struct vm_area_struct *vma = vmf->vma;
1603 unsigned long addr = vmf->address & PUD_MASK;
1604 pud_t *pud = vmf->pud;
1605 struct mm_struct *mm = vma->vm_mm;
1606 spinlock_t *ptl;
1607
1608 if (addr < vma->vm_start || addr >= vma->vm_end)
1609 return VM_FAULT_SIGBUS;
1610
1611 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
1612 return VM_FAULT_SIGBUS;
1613
1614 ptl = pud_lock(mm, pud);
1615
1616 /*
1617 * If there is already an entry present we assume the folio is
1618 * already mapped, hence no need to take another reference. We
1619 * still call insert_pfn_pud() though in case the mapping needs
1620 * upgrading to writeable.
1621 */
1622 if (pud_none(*vmf->pud)) {
1623 folio_get(folio);
1624 folio_add_file_rmap_pud(folio, &folio->page, vma);
1625 add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR);
1626 }
1627 insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)),
1628 write);
1629 spin_unlock(ptl);
1630
1631 return VM_FAULT_NOPAGE;
1632}
1633EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
a00cc7d9
MW
1634#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1635
4418c522
PX
1636void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1637 pmd_t *pmd, bool write)
3565fce3
DW
1638{
1639 pmd_t _pmd;
1640
a8f97366 1641 _pmd = pmd_mkyoung(*pmd);
a69e4717 1642 if (write)
a8f97366 1643 _pmd = pmd_mkdirty(_pmd);
3565fce3 1644 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
a69e4717 1645 pmd, _pmd, write))
3565fce3
DW
1646 update_mmu_cache_pmd(vma, addr, pmd);
1647}
1648
1649struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1650 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
1651{
1652 unsigned long pfn = pmd_pfn(*pmd);
1653 struct mm_struct *mm = vma->vm_mm;
3565fce3 1654 struct page *page;
0f089235 1655 int ret;
3565fce3
DW
1656
1657 assert_spin_locked(pmd_lockptr(mm, pmd));
1658
f6f37321 1659 if (flags & FOLL_WRITE && !pmd_write(*pmd))
3565fce3
DW
1660 return NULL;
1661
1662 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1663 /* pass */;
1664 else
1665 return NULL;
1666
1667 if (flags & FOLL_TOUCH)
a69e4717 1668 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3565fce3
DW
1669
1670 /*
1671 * device mapped pages can only be returned if the
1672 * caller will manage the page reference count.
1673 */
3faa52c0 1674 if (!(flags & (FOLL_GET | FOLL_PIN)))
3565fce3
DW
1675 return ERR_PTR(-EEXIST);
1676
1677 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1678 *pgmap = get_dev_pagemap(pfn, *pgmap);
1679 if (!*pgmap)
3565fce3
DW
1680 return ERR_PTR(-EFAULT);
1681 page = pfn_to_page(pfn);
f442fa61 1682 ret = try_grab_folio(page_folio(page), 1, flags);
0f089235
LG
1683 if (ret)
1684 page = ERR_PTR(ret);
3565fce3
DW
1685
1686 return page;
1687}
1688
71e3aac0
AA
1689int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1690 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 1691 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
71e3aac0 1692{
c4088ebd 1693 spinlock_t *dst_ptl, *src_ptl;
71e3aac0 1694 struct page *src_page;
96c772c2 1695 struct folio *src_folio;
71e3aac0 1696 pmd_t pmd;
12c9d70b 1697 pgtable_t pgtable = NULL;
628d47ce 1698 int ret = -ENOMEM;
71e3aac0 1699
bc02afbd 1700 pmd = pmdp_get_lockless(src_pmd);
47fa3011 1701 if (unlikely(pmd_present(pmd) && pmd_special(pmd))) {
bc02afbd
PX
1702 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1703 src_ptl = pmd_lockptr(src_mm, src_pmd);
1704 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1705 /*
1706 * No need to recheck the pmd, it can't change with write
1707 * mmap lock held here.
1708 *
1709 * Meanwhile, making sure it's not a CoW VMA with writable
1710 * mapping, otherwise it means either the anon page wrongly
1711 * applied special bit, or we made the PRIVATE mapping be
1712 * able to wrongly write to the backend MMIO.
1713 */
1714 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1715 goto set_pmd;
1716 }
1717
628d47ce 1718 /* Skip if can be re-fill on fault */
8f34f1ea 1719 if (!vma_is_anonymous(dst_vma))
628d47ce
KS
1720 return 0;
1721
4cf58924 1722 pgtable = pte_alloc_one(dst_mm);
628d47ce
KS
1723 if (unlikely(!pgtable))
1724 goto out;
71e3aac0 1725
c4088ebd
KS
1726 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1727 src_ptl = pmd_lockptr(src_mm, src_pmd);
1728 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
71e3aac0
AA
1729
1730 ret = -EAGAIN;
1731 pmd = *src_pmd;
84c3fc4e
ZY
1732
1733#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1734 if (unlikely(is_swap_pmd(pmd))) {
1735 swp_entry_t entry = pmd_to_swp_entry(pmd);
1736
1737 VM_BUG_ON(!is_pmd_migration_entry(pmd));
6c287605 1738 if (!is_readable_migration_entry(entry)) {
4dd845b5
AP
1739 entry = make_readable_migration_entry(
1740 swp_offset(entry));
84c3fc4e 1741 pmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
1742 if (pmd_swp_soft_dirty(*src_pmd))
1743 pmd = pmd_swp_mksoft_dirty(pmd);
8f34f1ea
PX
1744 if (pmd_swp_uffd_wp(*src_pmd))
1745 pmd = pmd_swp_mkuffd_wp(pmd);
84c3fc4e
ZY
1746 set_pmd_at(src_mm, addr, src_pmd, pmd);
1747 }
dd8a67f9 1748 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
af5b0f6a 1749 mm_inc_nr_ptes(dst_mm);
dd8a67f9 1750 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
8f34f1ea
PX
1751 if (!userfaultfd_wp(dst_vma))
1752 pmd = pmd_swp_clear_uffd_wp(pmd);
84c3fc4e
ZY
1753 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1754 ret = 0;
1755 goto out_unlock;
1756 }
1757#endif
1758
628d47ce 1759 if (unlikely(!pmd_trans_huge(pmd))) {
71e3aac0
AA
1760 pte_free(dst_mm, pgtable);
1761 goto out_unlock;
1762 }
fc9fe822 1763 /*
c4088ebd 1764 * When page table lock is held, the huge zero pmd should not be
fc9fe822
KS
1765 * under splitting since we don't split the page itself, only pmd to
1766 * a page table.
1767 */
1768 if (is_huge_zero_pmd(pmd)) {
97ae1749 1769 /*
e28833bc
MWO
1770 * mm_get_huge_zero_folio() will never allocate a new
1771 * folio here, since we already have a zero page to
1772 * copy. It just takes a reference.
97ae1749 1773 */
5691753d 1774 mm_get_huge_zero_folio(dst_mm);
5fc7a5f6 1775 goto out_zero_page;
fc9fe822 1776 }
de466bd6 1777
628d47ce
KS
1778 src_page = pmd_page(pmd);
1779 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
96c772c2 1780 src_folio = page_folio(src_page);
d042035e 1781
96c772c2 1782 folio_get(src_folio);
405c4ef7 1783 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
fb3d824d 1784 /* Page maybe pinned: split and retry the fault on PTEs. */
96c772c2 1785 folio_put(src_folio);
d042035e
PX
1786 pte_free(dst_mm, pgtable);
1787 spin_unlock(src_ptl);
1788 spin_unlock(dst_ptl);
b960818d 1789 __split_huge_pmd(src_vma, src_pmd, addr, false);
d042035e
PX
1790 return -EAGAIN;
1791 }
628d47ce 1792 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
5fc7a5f6 1793out_zero_page:
c4812909 1794 mm_inc_nr_ptes(dst_mm);
628d47ce 1795 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
71e3aac0 1796 pmdp_set_wrprotect(src_mm, addr, src_pmd);
8f34f1ea
PX
1797 if (!userfaultfd_wp(dst_vma))
1798 pmd = pmd_clear_uffd_wp(pmd);
bc02afbd
PX
1799 pmd = pmd_wrprotect(pmd);
1800set_pmd:
1801 pmd = pmd_mkold(pmd);
71e3aac0 1802 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
71e3aac0
AA
1803
1804 ret = 0;
1805out_unlock:
c4088ebd
KS
1806 spin_unlock(src_ptl);
1807 spin_unlock(dst_ptl);
71e3aac0
AA
1808out:
1809 return ret;
1810}
1811
a00cc7d9 1812#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1b167618
PX
1813void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1814 pud_t *pud, bool write)
a00cc7d9
MW
1815{
1816 pud_t _pud;
1817
a8f97366 1818 _pud = pud_mkyoung(*pud);
5fe653e9 1819 if (write)
a8f97366 1820 _pud = pud_mkdirty(_pud);
a00cc7d9 1821 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
5fe653e9 1822 pud, _pud, write))
a00cc7d9
MW
1823 update_mmu_cache_pud(vma, addr, pud);
1824}
1825
a00cc7d9
MW
1826int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1827 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1828 struct vm_area_struct *vma)
1829{
1830 spinlock_t *dst_ptl, *src_ptl;
1831 pud_t pud;
1832 int ret;
1833
1834 dst_ptl = pud_lock(dst_mm, dst_pud);
1835 src_ptl = pud_lockptr(src_mm, src_pud);
1836 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1837
1838 ret = -EAGAIN;
1839 pud = *src_pud;
1840 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1841 goto out_unlock;
1842
fb3d824d 1843 /*
96c772c2
DH
1844 * TODO: once we support anonymous pages, use
1845 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
fb3d824d 1846 */
bc02afbd
PX
1847 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
1848 pudp_set_wrprotect(src_mm, addr, src_pud);
1849 pud = pud_wrprotect(pud);
1850 }
1851 pud = pud_mkold(pud);
a00cc7d9
MW
1852 set_pud_at(dst_mm, addr, dst_pud, pud);
1853
1854 ret = 0;
1855out_unlock:
1856 spin_unlock(src_ptl);
1857 spin_unlock(dst_ptl);
1858 return ret;
1859}
1860
1861void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1862{
a00cc7d9
MW
1863 bool write = vmf->flags & FAULT_FLAG_WRITE;
1864
1865 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1866 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1867 goto unlock;
1868
5fe653e9 1869 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
a00cc7d9
MW
1870unlock:
1871 spin_unlock(vmf->ptl);
1872}
1873#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1874
5db4f15c 1875void huge_pmd_set_accessed(struct vm_fault *vmf)
a1dd450b 1876{
20f664aa 1877 bool write = vmf->flags & FAULT_FLAG_WRITE;
a1dd450b 1878
82b0f8c3 1879 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
a69e4717 1880 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
a1dd450b
WD
1881 goto unlock;
1882
a69e4717 1883 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
a1dd450b
WD
1884
1885unlock:
82b0f8c3 1886 spin_unlock(vmf->ptl);
a1dd450b
WD
1887}
1888
1ced09e0
DJ
1889static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
1890{
1891 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1892 struct vm_area_struct *vma = vmf->vma;
1893 struct mmu_notifier_range range;
1894 struct folio *folio;
1895 vm_fault_t ret = 0;
1896
1897 folio = vma_alloc_anon_folio_pmd(vma, vmf->address);
1898 if (unlikely(!folio))
1899 return VM_FAULT_FALLBACK;
1900
1901 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, haddr,
1902 haddr + HPAGE_PMD_SIZE);
1903 mmu_notifier_invalidate_range_start(&range);
1904 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1905 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd)))
1906 goto release;
1907 ret = check_stable_address_space(vma->vm_mm);
1908 if (ret)
1909 goto release;
1910 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
1911 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
1912 goto unlock;
1913release:
1914 folio_put(folio);
1915unlock:
1916 spin_unlock(vmf->ptl);
1917 mmu_notifier_invalidate_range_end(&range);
1918 return ret;
1919}
1920
5db4f15c 1921vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
71e3aac0 1922{
c89357e2 1923 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 1924 struct vm_area_struct *vma = vmf->vma;
2fad3d14 1925 struct folio *folio;
3917c802 1926 struct page *page;
82b0f8c3 1927 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5db4f15c 1928 pmd_t orig_pmd = vmf->orig_pmd;
71e3aac0 1929
82b0f8c3 1930 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
81d1b09c 1931 VM_BUG_ON_VMA(!vma->anon_vma, vma);
3917c802 1932
1ced09e0
DJ
1933 if (is_huge_zero_pmd(orig_pmd)) {
1934 vm_fault_t ret = do_huge_zero_wp_pmd(vmf);
1935
1936 if (!(ret & VM_FAULT_FALLBACK))
1937 return ret;
1938
1939 /* Fallback to splitting PMD if THP cannot be allocated */
3917c802 1940 goto fallback;
1ced09e0 1941 }
3917c802 1942
82b0f8c3 1943 spin_lock(vmf->ptl);
3917c802
KS
1944
1945 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1946 spin_unlock(vmf->ptl);
1947 return 0;
1948 }
71e3aac0
AA
1949
1950 page = pmd_page(orig_pmd);
2fad3d14 1951 folio = page_folio(page);
f6004e73 1952 VM_BUG_ON_PAGE(!PageHead(page), page);
3917c802 1953
6c287605
DH
1954 /* Early check when only holding the PT lock. */
1955 if (PageAnonExclusive(page))
1956 goto reuse;
1957
2fad3d14
MWO
1958 if (!folio_trylock(folio)) {
1959 folio_get(folio);
ba3c4ce6 1960 spin_unlock(vmf->ptl);
2fad3d14 1961 folio_lock(folio);
ba3c4ce6
YH
1962 spin_lock(vmf->ptl);
1963 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
3917c802 1964 spin_unlock(vmf->ptl);
2fad3d14
MWO
1965 folio_unlock(folio);
1966 folio_put(folio);
3917c802 1967 return 0;
ba3c4ce6 1968 }
2fad3d14 1969 folio_put(folio);
ba3c4ce6 1970 }
3917c802 1971
6c287605
DH
1972 /* Recheck after temporarily dropping the PT lock. */
1973 if (PageAnonExclusive(page)) {
2fad3d14 1974 folio_unlock(folio);
6c287605
DH
1975 goto reuse;
1976 }
1977
3917c802 1978 /*
2fad3d14
MWO
1979 * See do_wp_page(): we can only reuse the folio exclusively if
1980 * there are no additional references. Note that we always drain
1fec6890 1981 * the LRU cache immediately after adding a THP.
3917c802 1982 */
2fad3d14
MWO
1983 if (folio_ref_count(folio) >
1984 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
3bff7e3f 1985 goto unlock_fallback;
2fad3d14
MWO
1986 if (folio_test_swapcache(folio))
1987 folio_free_swap(folio);
1988 if (folio_ref_count(folio) == 1) {
71e3aac0 1989 pmd_t entry;
6c54dc6c 1990
06968625 1991 folio_move_anon_rmap(folio, vma);
5ca43289 1992 SetPageAnonExclusive(page);
2fad3d14 1993 folio_unlock(folio);
6c287605 1994reuse:
c89357e2
DH
1995 if (unlikely(unshare)) {
1996 spin_unlock(vmf->ptl);
1997 return 0;
1998 }
71e3aac0 1999 entry = pmd_mkyoung(orig_pmd);
f55e1014 2000 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3917c802 2001 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
82b0f8c3 2002 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 2003 spin_unlock(vmf->ptl);
cb8d8633 2004 return 0;
71e3aac0 2005 }
3917c802 2006
3bff7e3f 2007unlock_fallback:
2fad3d14 2008 folio_unlock(folio);
82b0f8c3 2009 spin_unlock(vmf->ptl);
3917c802 2010fallback:
b960818d 2011 __split_huge_pmd(vma, vmf->pmd, vmf->address, false);
3917c802 2012 return VM_FAULT_FALLBACK;
71e3aac0
AA
2013}
2014
c27f479e
DH
2015static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
2016 unsigned long addr, pmd_t pmd)
2017{
2018 struct page *page;
2019
2020 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
2021 return false;
2022
2023 /* Don't touch entries that are not even readable (NUMA hinting). */
2024 if (pmd_protnone(pmd))
2025 return false;
2026
2027 /* Do we need write faults for softdirty tracking? */
f38ee285 2028 if (pmd_needs_soft_dirty_wp(vma, pmd))
c27f479e
DH
2029 return false;
2030
2031 /* Do we need write faults for uffd-wp tracking? */
2032 if (userfaultfd_huge_pmd_wp(vma, pmd))
2033 return false;
2034
2035 if (!(vma->vm_flags & VM_SHARED)) {
2036 /* See can_change_pte_writable(). */
2037 page = vm_normal_page_pmd(vma, addr, pmd);
2038 return page && PageAnon(page) && PageAnonExclusive(page);
2039 }
2040
2041 /* See can_change_pte_writable(). */
2042 return pmd_dirty(pmd);
2043}
2044
d10e63f2 2045/* NUMA hinting page fault entry point for trans huge pmds */
5db4f15c 2046vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 2047{
82b0f8c3 2048 struct vm_area_struct *vma = vmf->vma;
667ffc31 2049 struct folio *folio;
82b0f8c3 2050 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
667ffc31 2051 int nid = NUMA_NO_NODE;
727d50a7
ZY
2052 int target_nid, last_cpupid;
2053 pmd_t pmd, old_pmd;
4b88c23a 2054 bool writable = false;
6688cc05 2055 int flags = 0;
d10e63f2 2056
82b0f8c3 2057 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
727d50a7
ZY
2058 old_pmd = pmdp_get(vmf->pmd);
2059
2060 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
82b0f8c3 2061 spin_unlock(vmf->ptl);
fd8c35a9 2062 return 0;
de466bd6
MG
2063 }
2064
727d50a7 2065 pmd = pmd_modify(old_pmd, vma->vm_page_prot);
6a56ccbc
DH
2066
2067 /*
2068 * Detect now whether the PMD could be writable; this information
2069 * is only valid while holding the PT lock.
2070 */
2071 writable = pmd_write(pmd);
2072 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
2073 can_change_pmd_writable(vma, vmf->address, pmd))
2074 writable = true;
2075
667ffc31
KW
2076 folio = vm_normal_folio_pmd(vma, haddr, pmd);
2077 if (!folio)
c5b5a3dd
YS
2078 goto out_map;
2079
667ffc31 2080 nid = folio_nid(folio);
727d50a7
ZY
2081
2082 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
2083 &last_cpupid);
ee86814b
DH
2084 if (target_nid == NUMA_NO_NODE)
2085 goto out_map;
2086 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
2087 flags |= TNF_MIGRATE_FAIL;
c5b5a3dd
YS
2088 goto out_map;
2089 }
ee86814b 2090 /* The folio is isolated and isolation code holds a folio reference. */
82b0f8c3 2091 spin_unlock(vmf->ptl);
6a56ccbc 2092 writable = false;
8b1b436d 2093
bfc1d178 2094 if (!migrate_misplaced_folio(folio, target_nid)) {
6688cc05 2095 flags |= TNF_MIGRATED;
667ffc31 2096 nid = target_nid;
667ffc31 2097 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
fd8c35a9
ZY
2098 return 0;
2099 }
8191acbd 2100
fd8c35a9
ZY
2101 flags |= TNF_MIGRATE_FAIL;
2102 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
727d50a7 2103 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
fd8c35a9
ZY
2104 spin_unlock(vmf->ptl);
2105 return 0;
2106 }
c5b5a3dd
YS
2107out_map:
2108 /* Restore the PMD */
727d50a7 2109 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
c5b5a3dd 2110 pmd = pmd_mkyoung(pmd);
6a56ccbc 2111 if (writable)
161e393c 2112 pmd = pmd_mkwrite(pmd, vma);
c5b5a3dd
YS
2113 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
2114 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
2115 spin_unlock(vmf->ptl);
fd8c35a9
ZY
2116
2117 if (nid != NUMA_NO_NODE)
2118 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
2119 return 0;
d10e63f2
MG
2120}
2121
319904ad
YH
2122/*
2123 * Return true if we do MADV_FREE successfully on entire pmd page.
2124 * Otherwise, return false.
2125 */
2126bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
b8d3c4c3 2127 pmd_t *pmd, unsigned long addr, unsigned long next)
b8d3c4c3
MK
2128{
2129 spinlock_t *ptl;
2130 pmd_t orig_pmd;
fc986a38 2131 struct folio *folio;
b8d3c4c3 2132 struct mm_struct *mm = tlb->mm;
319904ad 2133 bool ret = false;
b8d3c4c3 2134
ed6a7935 2135 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 2136
b6ec57f4
KS
2137 ptl = pmd_trans_huge_lock(pmd, vma);
2138 if (!ptl)
25eedabe 2139 goto out_unlocked;
b8d3c4c3
MK
2140
2141 orig_pmd = *pmd;
319904ad 2142 if (is_huge_zero_pmd(orig_pmd))
b8d3c4c3 2143 goto out;
b8d3c4c3 2144
84c3fc4e
ZY
2145 if (unlikely(!pmd_present(orig_pmd))) {
2146 VM_BUG_ON(thp_migration_supported() &&
2147 !is_pmd_migration_entry(orig_pmd));
2148 goto out;
2149 }
2150
e06d03d5 2151 folio = pmd_folio(orig_pmd);
b8d3c4c3 2152 /*
fc986a38
KW
2153 * If other processes are mapping this folio, we couldn't discard
2154 * the folio unless they all do MADV_FREE so let's skip the folio.
b8d3c4c3 2155 */
003fde44 2156 if (folio_maybe_mapped_shared(folio))
b8d3c4c3
MK
2157 goto out;
2158
fc986a38 2159 if (!folio_trylock(folio))
b8d3c4c3
MK
2160 goto out;
2161
2162 /*
2163 * If user want to discard part-pages of THP, split it so MADV_FREE
2164 * will deactivate only them.
2165 */
2166 if (next - addr != HPAGE_PMD_SIZE) {
fc986a38 2167 folio_get(folio);
b8d3c4c3 2168 spin_unlock(ptl);
fc986a38
KW
2169 split_folio(folio);
2170 folio_unlock(folio);
2171 folio_put(folio);
b8d3c4c3
MK
2172 goto out_unlocked;
2173 }
2174
fc986a38
KW
2175 if (folio_test_dirty(folio))
2176 folio_clear_dirty(folio);
2177 folio_unlock(folio);
b8d3c4c3 2178
b8d3c4c3 2179 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
58ceeb6b 2180 pmdp_invalidate(vma, addr, pmd);
b8d3c4c3
MK
2181 orig_pmd = pmd_mkold(orig_pmd);
2182 orig_pmd = pmd_mkclean(orig_pmd);
2183
2184 set_pmd_at(mm, addr, pmd, orig_pmd);
2185 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2186 }
802a3a92 2187
6a6fe9eb 2188 folio_mark_lazyfree(folio);
319904ad 2189 ret = true;
b8d3c4c3
MK
2190out:
2191 spin_unlock(ptl);
2192out_unlocked:
2193 return ret;
2194}
2195
953c66c2
AK
2196static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2197{
2198 pgtable_t pgtable;
2199
2200 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2201 pte_free(mm, pgtable);
c4812909 2202 mm_dec_nr_ptes(mm);
953c66c2
AK
2203}
2204
71e3aac0 2205int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
f21760b1 2206 pmd_t *pmd, unsigned long addr)
71e3aac0 2207{
da146769 2208 pmd_t orig_pmd;
bf929152 2209 spinlock_t *ptl;
71e3aac0 2210
ed6a7935 2211 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 2212
b6ec57f4
KS
2213 ptl = __pmd_trans_huge_lock(pmd, vma);
2214 if (!ptl)
da146769
KS
2215 return 0;
2216 /*
2217 * For architectures like ppc64 we look at deposited pgtable
2218 * when calling pmdp_huge_get_and_clear. So do the
2219 * pgtable_trans_huge_withdraw after finishing pmdp related
2220 * operations.
2221 */
93a98695
AK
2222 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2223 tlb->fullmm);
e5136e87 2224 arch_check_zapped_pmd(vma, orig_pmd);
da146769 2225 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
38607c62 2226 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
3b6521f5
OH
2227 if (arch_needs_pgtable_deposit())
2228 zap_deposited_table(tlb->mm, pmd);
da146769 2229 spin_unlock(ptl);
da146769 2230 } else if (is_huge_zero_pmd(orig_pmd)) {
dbe54153
AP
2231 if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
2232 zap_deposited_table(tlb->mm, pmd);
da146769 2233 spin_unlock(ptl);
da146769 2234 } else {
0103b27a 2235 struct folio *folio = NULL;
616b8371
ZY
2236 int flush_needed = 1;
2237
2238 if (pmd_present(orig_pmd)) {
0103b27a
KW
2239 struct page *page = pmd_page(orig_pmd);
2240
2241 folio = page_folio(page);
2242 folio_remove_rmap_pmd(folio, page, vma);
0a7bda48 2243 WARN_ON_ONCE(folio_mapcount(folio) < 0);
616b8371
ZY
2244 VM_BUG_ON_PAGE(!PageHead(page), page);
2245 } else if (thp_migration_supported()) {
2246 swp_entry_t entry;
2247
2248 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
2249 entry = pmd_to_swp_entry(orig_pmd);
0103b27a 2250 folio = pfn_swap_entry_folio(entry);
616b8371
ZY
2251 flush_needed = 0;
2252 } else
2253 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2254
0103b27a 2255 if (folio_test_anon(folio)) {
c14a6eb4 2256 zap_deposited_table(tlb->mm, pmd);
b5072380
KS
2257 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2258 } else {
953c66c2
AK
2259 if (arch_needs_pgtable_deposit())
2260 zap_deposited_table(tlb->mm, pmd);
6b27cc6c 2261 add_mm_counter(tlb->mm, mm_counter_file(folio),
0103b27a 2262 -HPAGE_PMD_NR);
10d483f1
BW
2263
2264 /*
2265 * Use flush_needed to indicate whether the PMD entry
2266 * is present, instead of checking pmd_present() again.
2267 */
2268 if (flush_needed && pmd_young(orig_pmd) &&
2269 likely(vma_has_recency(vma)))
2270 folio_mark_accessed(folio);
b5072380 2271 }
616b8371 2272
da146769 2273 spin_unlock(ptl);
616b8371 2274 if (flush_needed)
0103b27a 2275 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
025c5b24 2276 }
da146769 2277 return 1;
71e3aac0
AA
2278}
2279
1dd38b6c
AK
2280#ifndef pmd_move_must_withdraw
2281static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2282 spinlock_t *old_pmd_ptl,
2283 struct vm_area_struct *vma)
2284{
2285 /*
2286 * With split pmd lock we also need to move preallocated
2287 * PTE page table if new_pmd is on different PMD page table.
2288 *
2289 * We also don't deposit and withdraw tables for file pages.
2290 */
2291 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2292}
2293#endif
2294
ab6e3d09
NH
2295static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2296{
2297#ifdef CONFIG_MEM_SOFT_DIRTY
2298 if (unlikely(is_pmd_migration_entry(pmd)))
2299 pmd = pmd_swp_mksoft_dirty(pmd);
2300 else if (pmd_present(pmd))
2301 pmd = pmd_mksoft_dirty(pmd);
2302#endif
2303 return pmd;
2304}
2305
0cef0bb8
RR
2306static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
2307{
2308 if (pmd_present(pmd))
2309 pmd = pmd_clear_uffd_wp(pmd);
2310 else if (is_swap_pmd(pmd))
2311 pmd = pmd_swp_clear_uffd_wp(pmd);
2312
2313 return pmd;
2314}
2315
bf8616d5 2316bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
b8aa9d9d 2317 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
37a1c49a 2318{
bf929152 2319 spinlock_t *old_ptl, *new_ptl;
37a1c49a 2320 pmd_t pmd;
37a1c49a 2321 struct mm_struct *mm = vma->vm_mm;
5d190420 2322 bool force_flush = false;
37a1c49a 2323
37a1c49a
AA
2324 /*
2325 * The destination pmd shouldn't be established, free_pgtables()
a5be621e
HD
2326 * should have released it; but move_page_tables() might have already
2327 * inserted a page table, if racing against shmem/file collapse.
37a1c49a 2328 */
a5be621e 2329 if (!pmd_none(*new_pmd)) {
37a1c49a 2330 VM_BUG_ON(pmd_trans_huge(*new_pmd));
4b471e88 2331 return false;
37a1c49a
AA
2332 }
2333
bf929152
KS
2334 /*
2335 * We don't have to worry about the ordering of src and dst
c1e8d7c6 2336 * ptlocks because exclusive mmap_lock prevents deadlock.
bf929152 2337 */
b6ec57f4
KS
2338 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2339 if (old_ptl) {
bf929152
KS
2340 new_ptl = pmd_lockptr(mm, new_pmd);
2341 if (new_ptl != old_ptl)
2342 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
8809aa2d 2343 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
eb66ae03 2344 if (pmd_present(pmd))
a2ce2666 2345 force_flush = true;
025c5b24 2346 VM_BUG_ON(!pmd_none(*new_pmd));
3592806c 2347
1dd38b6c 2348 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
b3084f4d 2349 pgtable_t pgtable;
3592806c
KS
2350 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2351 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
3592806c 2352 }
ab6e3d09 2353 pmd = move_soft_dirty_pmd(pmd);
0cef0bb8
RR
2354 if (vma_has_uffd_without_event_remap(vma))
2355 pmd = clear_uffd_wp_pmd(pmd);
ab6e3d09 2356 set_pmd_at(mm, new_addr, new_pmd, pmd);
5d190420 2357 if (force_flush)
7c38f181 2358 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
eb66ae03
LT
2359 if (new_ptl != old_ptl)
2360 spin_unlock(new_ptl);
bf929152 2361 spin_unlock(old_ptl);
4b471e88 2362 return true;
37a1c49a 2363 }
4b471e88 2364 return false;
37a1c49a
AA
2365}
2366
f123d74a
MG
2367/*
2368 * Returns
2369 * - 0 if PMD could not be locked
f0953a1b 2370 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
e346e668 2371 * or if prot_numa but THP migration is not supported
f0953a1b 2372 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
f123d74a 2373 */
4a18419f
NA
2374int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2375 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2376 unsigned long cp_flags)
cd7548ab
JW
2377{
2378 struct mm_struct *mm = vma->vm_mm;
bf929152 2379 spinlock_t *ptl;
c9fe6656 2380 pmd_t oldpmd, entry;
58705444 2381 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
292924b2
PX
2382 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2383 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6a56ccbc 2384 int ret = 1;
cd7548ab 2385
4a18419f
NA
2386 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2387
e346e668
YS
2388 if (prot_numa && !thp_migration_supported())
2389 return 1;
2390
b6ec57f4 2391 ptl = __pmd_trans_huge_lock(pmd, vma);
0a85e51d
KS
2392 if (!ptl)
2393 return 0;
e944fd67 2394
84c3fc4e
ZY
2395#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2396 if (is_swap_pmd(*pmd)) {
2397 swp_entry_t entry = pmd_to_swp_entry(*pmd);
5662400a 2398 struct folio *folio = pfn_swap_entry_folio(entry);
24bf08c4 2399 pmd_t newpmd;
84c3fc4e
ZY
2400
2401 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
4dd845b5 2402 if (is_writable_migration_entry(entry)) {
84c3fc4e
ZY
2403 /*
2404 * A protection check is difficult so
2405 * just be safe and disable write
2406 */
d986ba2b 2407 if (folio_test_anon(folio))
6c287605
DH
2408 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2409 else
2410 entry = make_readable_migration_entry(swp_offset(entry));
84c3fc4e 2411 newpmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
2412 if (pmd_swp_soft_dirty(*pmd))
2413 newpmd = pmd_swp_mksoft_dirty(newpmd);
24bf08c4
DH
2414 } else {
2415 newpmd = *pmd;
84c3fc4e 2416 }
24bf08c4
DH
2417
2418 if (uffd_wp)
2419 newpmd = pmd_swp_mkuffd_wp(newpmd);
2420 else if (uffd_wp_resolve)
2421 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2422 if (!pmd_same(*pmd, newpmd))
2423 set_pmd_at(mm, addr, pmd, newpmd);
84c3fc4e
ZY
2424 goto unlock;
2425 }
2426#endif
2427
a1a3a2fc 2428 if (prot_numa) {
d986ba2b 2429 struct folio *folio;
33024536 2430 bool toptier;
a1a3a2fc
YH
2431 /*
2432 * Avoid trapping faults against the zero page. The read-only
2433 * data is likely to be read-cached on the local CPU and
2434 * local/remote hits to the zero page are not interesting.
2435 */
2436 if (is_huge_zero_pmd(*pmd))
2437 goto unlock;
025c5b24 2438
a1a3a2fc
YH
2439 if (pmd_protnone(*pmd))
2440 goto unlock;
0a85e51d 2441
e06d03d5 2442 folio = pmd_folio(*pmd);
d986ba2b 2443 toptier = node_is_toptier(folio_nid(folio));
a1a3a2fc
YH
2444 /*
2445 * Skip scanning top tier node if normal numa
2446 * balancing is disabled
2447 */
2448 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
33024536 2449 toptier)
a1a3a2fc 2450 goto unlock;
33024536 2451
2a28713a 2452 if (folio_use_access_time(folio))
d986ba2b
KW
2453 folio_xchg_access_time(folio,
2454 jiffies_to_msecs(jiffies));
a1a3a2fc 2455 }
ced10803 2456 /*
3e4e28c5 2457 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
ced10803 2458 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
3e4e28c5 2459 * which is also under mmap_read_lock(mm):
ced10803
KS
2460 *
2461 * CPU0: CPU1:
2462 * change_huge_pmd(prot_numa=1)
2463 * pmdp_huge_get_and_clear_notify()
2464 * madvise_dontneed()
2465 * zap_pmd_range()
2466 * pmd_trans_huge(*pmd) == 0 (without ptl)
2467 * // skip the pmd
2468 * set_pmd_at();
2469 * // pmd is re-established
2470 *
2471 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2472 * which may break userspace.
2473 *
4f831457 2474 * pmdp_invalidate_ad() is required to make sure we don't miss
ced10803
KS
2475 * dirty/young flags set by hardware.
2476 */
4f831457 2477 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
ced10803 2478
c9fe6656 2479 entry = pmd_modify(oldpmd, newprot);
f1eb1bac 2480 if (uffd_wp)
292924b2 2481 entry = pmd_mkuffd_wp(entry);
f1eb1bac 2482 else if (uffd_wp_resolve)
292924b2
PX
2483 /*
2484 * Leave the write bit to be handled by PF interrupt
2485 * handler, then things like COW could be properly
2486 * handled.
2487 */
2488 entry = pmd_clear_uffd_wp(entry);
c27f479e
DH
2489
2490 /* See change_pte_range(). */
2491 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2492 can_change_pmd_writable(vma, addr, entry))
161e393c 2493 entry = pmd_mkwrite(entry, vma);
c27f479e 2494
0a85e51d
KS
2495 ret = HPAGE_PMD_NR;
2496 set_pmd_at(mm, addr, pmd, entry);
4a18419f 2497
c9fe6656
NA
2498 if (huge_pmd_needs_flush(oldpmd, entry))
2499 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
0a85e51d
KS
2500unlock:
2501 spin_unlock(ptl);
025c5b24
NH
2502 return ret;
2503}
2504
cb0f01be
PX
2505/*
2506 * Returns:
2507 *
2508 * - 0: if pud leaf changed from under us
2509 * - 1: if pud can be skipped
2510 * - HPAGE_PUD_NR: if pud was successfully processed
2511 */
2512#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2513int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2514 pud_t *pudp, unsigned long addr, pgprot_t newprot,
2515 unsigned long cp_flags)
2516{
2517 struct mm_struct *mm = vma->vm_mm;
2518 pud_t oldpud, entry;
2519 spinlock_t *ptl;
2520
2521 tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2522
2523 /* NUMA balancing doesn't apply to dax */
2524 if (cp_flags & MM_CP_PROT_NUMA)
2525 return 1;
2526
2527 /*
2528 * Huge entries on userfault-wp only works with anonymous, while we
2529 * don't have anonymous PUDs yet.
2530 */
2531 if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2532 return 1;
2533
2534 ptl = __pud_trans_huge_lock(pudp, vma);
2535 if (!ptl)
2536 return 0;
2537
2538 /*
2539 * Can't clear PUD or it can race with concurrent zapping. See
2540 * change_huge_pmd().
2541 */
2542 oldpud = pudp_invalidate(vma, addr, pudp);
2543 entry = pud_modify(oldpud, newprot);
2544 set_pud_at(mm, addr, pudp, entry);
2545 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2546
2547 spin_unlock(ptl);
2548 return HPAGE_PUD_NR;
2549}
2550#endif
2551
adef4406
AA
2552#ifdef CONFIG_USERFAULTFD
2553/*
867a43a3 2554 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
adef4406
AA
2555 * the caller, but it must return after releasing the page_table_lock.
2556 * Just move the page from src_pmd to dst_pmd if possible.
2557 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2558 * repeated by the caller, or other errors in case of failure.
2559 */
2560int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2561 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2562 unsigned long dst_addr, unsigned long src_addr)
2563{
2564 pmd_t _dst_pmd, src_pmdval;
2565 struct page *src_page;
2566 struct folio *src_folio;
2567 struct anon_vma *src_anon_vma;
2568 spinlock_t *src_ptl, *dst_ptl;
2569 pgtable_t src_pgtable;
2570 struct mmu_notifier_range range;
2571 int err = 0;
2572
2573 src_pmdval = *src_pmd;
2574 src_ptl = pmd_lockptr(mm, src_pmd);
2575
2576 lockdep_assert_held(src_ptl);
867a43a3
LG
2577 vma_assert_locked(src_vma);
2578 vma_assert_locked(dst_vma);
adef4406
AA
2579
2580 /* Sanity checks before the operation */
2581 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2582 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2583 spin_unlock(src_ptl);
2584 return -EINVAL;
2585 }
2586
2587 if (!pmd_trans_huge(src_pmdval)) {
2588 spin_unlock(src_ptl);
2589 if (is_pmd_migration_entry(src_pmdval)) {
2590 pmd_migration_entry_wait(mm, &src_pmdval);
2591 return -EAGAIN;
2592 }
2593 return -ENOENT;
2594 }
2595
2596 src_page = pmd_page(src_pmdval);
adef4406 2597
eb1521da
SB
2598 if (!is_huge_zero_pmd(src_pmdval)) {
2599 if (unlikely(!PageAnonExclusive(src_page))) {
2600 spin_unlock(src_ptl);
2601 return -EBUSY;
2602 }
2603
2604 src_folio = page_folio(src_page);
2605 folio_get(src_folio);
2606 } else
2607 src_folio = NULL;
2608
adef4406
AA
2609 spin_unlock(src_ptl);
2610
2611 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2612 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2613 src_addr + HPAGE_PMD_SIZE);
2614 mmu_notifier_invalidate_range_start(&range);
2615
eb1521da
SB
2616 if (src_folio) {
2617 folio_lock(src_folio);
adef4406 2618
eb1521da
SB
2619 /*
2620 * split_huge_page walks the anon_vma chain without the page
2621 * lock. Serialize against it with the anon_vma lock, the page
2622 * lock is not enough.
2623 */
2624 src_anon_vma = folio_get_anon_vma(src_folio);
2625 if (!src_anon_vma) {
2626 err = -EAGAIN;
2627 goto unlock_folio;
2628 }
2629 anon_vma_lock_write(src_anon_vma);
2630 } else
2631 src_anon_vma = NULL;
adef4406
AA
2632
2633 dst_ptl = pmd_lockptr(mm, dst_pmd);
2634 double_pt_lock(src_ptl, dst_ptl);
2635 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2636 !pmd_same(*dst_pmd, dst_pmdval))) {
2637 err = -EAGAIN;
2638 goto unlock_ptls;
2639 }
eb1521da
SB
2640 if (src_folio) {
2641 if (folio_maybe_dma_pinned(src_folio) ||
2642 !PageAnonExclusive(&src_folio->page)) {
2643 err = -EBUSY;
2644 goto unlock_ptls;
2645 }
adef4406 2646
eb1521da
SB
2647 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2648 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2649 err = -EBUSY;
2650 goto unlock_ptls;
2651 }
adef4406 2652
eb1521da
SB
2653 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2654 /* Folio got pinned from under us. Put it back and fail the move. */
2655 if (folio_maybe_dma_pinned(src_folio)) {
2656 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2657 err = -EBUSY;
2658 goto unlock_ptls;
2659 }
adef4406 2660
c0205eaf 2661 folio_move_anon_rmap(src_folio, dst_vma);
b5ba3a64 2662 src_folio->index = linear_page_index(dst_vma, dst_addr);
c0205eaf 2663
e3981db4 2664 _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
eb1521da
SB
2665 /* Follow mremap() behavior and treat the entry dirty after the move */
2666 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2667 } else {
2668 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
e3981db4 2669 _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
eb1521da 2670 }
adef4406
AA
2671 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2672
2673 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2674 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2675unlock_ptls:
2676 double_pt_unlock(src_ptl, dst_ptl);
eb1521da
SB
2677 if (src_anon_vma) {
2678 anon_vma_unlock_write(src_anon_vma);
2679 put_anon_vma(src_anon_vma);
2680 }
adef4406
AA
2681unlock_folio:
2682 /* unblock rmap walks */
eb1521da
SB
2683 if (src_folio)
2684 folio_unlock(src_folio);
adef4406 2685 mmu_notifier_invalidate_range_end(&range);
eb1521da
SB
2686 if (src_folio)
2687 folio_put(src_folio);
adef4406
AA
2688 return err;
2689}
2690#endif /* CONFIG_USERFAULTFD */
2691
025c5b24 2692/*
8f19b0c0 2693 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
025c5b24 2694 *
8f19b0c0
YH
2695 * Note that if it returns page table lock pointer, this routine returns without
2696 * unlocking page table lock. So callers must unlock it.
025c5b24 2697 */
b6ec57f4 2698spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
025c5b24 2699{
b6ec57f4
KS
2700 spinlock_t *ptl;
2701 ptl = pmd_lock(vma->vm_mm, pmd);
84c3fc4e
ZY
2702 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2703 pmd_devmap(*pmd)))
b6ec57f4
KS
2704 return ptl;
2705 spin_unlock(ptl);
2706 return NULL;
cd7548ab
JW
2707}
2708
a00cc7d9 2709/*
d965e390 2710 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
a00cc7d9 2711 *
d965e390
ML
2712 * Note that if it returns page table lock pointer, this routine returns without
2713 * unlocking page table lock. So callers must unlock it.
a00cc7d9
MW
2714 */
2715spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2716{
2717 spinlock_t *ptl;
2718
2719 ptl = pud_lock(vma->vm_mm, pud);
2720 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2721 return ptl;
2722 spin_unlock(ptl);
2723 return NULL;
2724}
2725
2726#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2727int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2728 pud_t *pud, unsigned long addr)
2729{
a00cc7d9 2730 spinlock_t *ptl;
1c399e74 2731 pud_t orig_pud;
a00cc7d9
MW
2732
2733 ptl = __pud_trans_huge_lock(pud, vma);
2734 if (!ptl)
2735 return 0;
74929079 2736
1c399e74
PX
2737 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2738 arch_check_zapped_pud(vma, orig_pud);
a00cc7d9 2739 tlb_remove_pud_tlb_entry(tlb, pud, addr);
dbe54153 2740 if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
a00cc7d9
MW
2741 spin_unlock(ptl);
2742 /* No zero page support yet */
2743 } else {
dbe54153
AP
2744 struct page *page = NULL;
2745 struct folio *folio;
2746
2747 /* No support for anonymous PUD pages or migration yet */
2748 VM_WARN_ON_ONCE(vma_is_anonymous(vma) ||
2749 !pud_present(orig_pud));
2750
2751 page = pud_page(orig_pud);
2752 folio = page_folio(page);
2753 folio_remove_rmap_pud(folio, page, vma);
2754 add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR);
2755
2756 spin_unlock(ptl);
2757 tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE);
a00cc7d9
MW
2758 }
2759 return 1;
2760}
2761
2762static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2763 unsigned long haddr)
2764{
dbe54153
AP
2765 struct folio *folio;
2766 struct page *page;
2767 pud_t old_pud;
2768
a00cc7d9
MW
2769 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2770 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2771 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2772 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2773
ce9311cf 2774 count_vm_event(THP_SPLIT_PUD);
a00cc7d9 2775
dbe54153
AP
2776 old_pud = pudp_huge_clear_flush(vma, haddr, pud);
2777
2778 if (!vma_is_dax(vma))
2779 return;
2780
2781 page = pud_page(old_pud);
2782 folio = page_folio(page);
2783
2784 if (!folio_test_dirty(folio) && pud_dirty(old_pud))
2785 folio_mark_dirty(folio);
2786 if (!folio_test_referenced(folio) && pud_young(old_pud))
2787 folio_set_referenced(folio);
2788 folio_remove_rmap_pud(folio, page, vma);
2789 folio_put(folio);
2790 add_mm_counter(vma->vm_mm, mm_counter_file(folio),
2791 -HPAGE_PUD_NR);
a00cc7d9
MW
2792}
2793
2794void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2795 unsigned long address)
2796{
2797 spinlock_t *ptl;
ac46d4f3 2798 struct mmu_notifier_range range;
a00cc7d9 2799
7d4a8be0 2800 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2801 address & HPAGE_PUD_MASK,
ac46d4f3
JG
2802 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2803 mmu_notifier_invalidate_range_start(&range);
2804 ptl = pud_lock(vma->vm_mm, pud);
a00cc7d9
MW
2805 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2806 goto out;
ac46d4f3 2807 __split_huge_pud_locked(vma, pud, range.start);
a00cc7d9
MW
2808
2809out:
2810 spin_unlock(ptl);
ec8832d0 2811 mmu_notifier_invalidate_range_end(&range);
a00cc7d9 2812}
cb0f01be
PX
2813#else
2814void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2815 unsigned long address)
2816{
2817}
a00cc7d9
MW
2818#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2819
eef1b3ba
KS
2820static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2821 unsigned long haddr, pmd_t *pmd)
2822{
2823 struct mm_struct *mm = vma->vm_mm;
2824 pgtable_t pgtable;
42b2af2c 2825 pmd_t _pmd, old_pmd;
c9c1ee20
HD
2826 unsigned long addr;
2827 pte_t *pte;
eef1b3ba
KS
2828 int i;
2829
0f10851e
JG
2830 /*
2831 * Leave pmd empty until pte is filled note that it is fine to delay
2832 * notification until mmu_notifier_invalidate_range_end() as we are
2833 * replacing a zero pmd write protected page with a zero pte write
2834 * protected page.
2835 *
ee65728e 2836 * See Documentation/mm/mmu_notifier.rst
0f10851e 2837 */
42b2af2c 2838 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
eef1b3ba
KS
2839
2840 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2841 pmd_populate(mm, &_pmd, pgtable);
2842
c9c1ee20
HD
2843 pte = pte_offset_map(&_pmd, haddr);
2844 VM_BUG_ON(!pte);
2845 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2846 pte_t entry;
2847
2848 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
eef1b3ba 2849 entry = pte_mkspecial(entry);
42b2af2c
DH
2850 if (pmd_uffd_wp(old_pmd))
2851 entry = pte_mkuffd_wp(entry);
c33c7948 2852 VM_BUG_ON(!pte_none(ptep_get(pte)));
c9c1ee20
HD
2853 set_pte_at(mm, addr, pte, entry);
2854 pte++;
eef1b3ba 2855 }
c9c1ee20 2856 pte_unmap(pte - 1);
eef1b3ba
KS
2857 smp_wmb(); /* make pte visible before pmd */
2858 pmd_populate(mm, pmd, pgtable);
eef1b3ba
KS
2859}
2860
2861static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
ba988280 2862 unsigned long haddr, bool freeze)
eef1b3ba
KS
2863{
2864 struct mm_struct *mm = vma->vm_mm;
91b2978a 2865 struct folio *folio;
eef1b3ba
KS
2866 struct page *page;
2867 pgtable_t pgtable;
423ac9af 2868 pmd_t old_pmd, _pmd;
292924b2 2869 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
0ccf7f16 2870 bool anon_exclusive = false, dirty = false;
2ac015e2 2871 unsigned long addr;
c9c1ee20 2872 pte_t *pte;
eef1b3ba
KS
2873 int i;
2874
2875 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2876 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2877 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
84c3fc4e
ZY
2878 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2879 && !pmd_devmap(*pmd));
eef1b3ba
KS
2880
2881 count_vm_event(THP_SPLIT_PMD);
2882
d21b9e57 2883 if (!vma_is_anonymous(vma)) {
ec8832d0 2884 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
953c66c2
AK
2885 /*
2886 * We are going to unmap this huge page. So
2887 * just go ahead and zap it
2888 */
2889 if (arch_needs_pgtable_deposit())
2890 zap_deposited_table(mm, pmd);
38607c62 2891 if (!vma_is_dax(vma) && vma_is_special_huge(vma))
d21b9e57 2892 return;
99fa8a48
HD
2893 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2894 swp_entry_t entry;
2895
2896 entry = pmd_to_swp_entry(old_pmd);
439992ff 2897 folio = pfn_swap_entry_folio(entry);
38607c62
AP
2898 } else if (is_huge_zero_pmd(old_pmd)) {
2899 return;
99fa8a48
HD
2900 } else {
2901 page = pmd_page(old_pmd);
a8e61d58
DH
2902 folio = page_folio(page);
2903 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
db44c658 2904 folio_mark_dirty(folio);
a8e61d58
DH
2905 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2906 folio_set_referenced(folio);
2907 folio_remove_rmap_pmd(folio, page, vma);
2908 folio_put(folio);
99fa8a48 2909 }
6b27cc6c 2910 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
eef1b3ba 2911 return;
99fa8a48
HD
2912 }
2913
3b77e8c8 2914 if (is_huge_zero_pmd(*pmd)) {
4645b9fe
JG
2915 /*
2916 * FIXME: Do we want to invalidate secondary mmu by calling
1af5a810
AP
2917 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2918 * inside __split_huge_pmd() ?
4645b9fe
JG
2919 *
2920 * We are going from a zero huge page write protected to zero
2921 * small page also write protected so it does not seems useful
2922 * to invalidate secondary mmu at this time.
2923 */
eef1b3ba
KS
2924 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2925 }
2926
3a5a8d34 2927 pmd_migration = is_pmd_migration_entry(*pmd);
2e83ee1d 2928 if (unlikely(pmd_migration)) {
84c3fc4e
ZY
2929 swp_entry_t entry;
2930
3a5a8d34 2931 old_pmd = *pmd;
423ac9af 2932 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2933 page = pfn_swap_entry_to_page(entry);
4dd845b5 2934 write = is_writable_migration_entry(entry);
6c287605
DH
2935 if (PageAnon(page))
2936 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2e346877
PX
2937 young = is_migration_entry_young(entry);
2938 dirty = is_migration_entry_dirty(entry);
2e83ee1d 2939 soft_dirty = pmd_swp_soft_dirty(old_pmd);
f45ec5ff 2940 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2e83ee1d 2941 } else {
3a5a8d34
RR
2942 /*
2943 * Up to this point the pmd is present and huge and userland has
2944 * the whole access to the hugepage during the split (which
2945 * happens in place). If we overwrite the pmd with the not-huge
2946 * version pointing to the pte here (which of course we could if
2947 * all CPUs were bug free), userland could trigger a small page
2948 * size TLB miss on the small sized TLB while the hugepage TLB
2949 * entry is still established in the huge TLB. Some CPU doesn't
2950 * like that. See
2951 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2952 * 383 on page 105. Intel should be safe but is also warns that
2953 * it's only safe if the permission and cache attributes of the
2954 * two entries loaded in the two TLB is identical (which should
2955 * be the case here). But it is generally safer to never allow
2956 * small and huge TLB entries for the same virtual address to be
2957 * loaded simultaneously. So instead of doing "pmd_populate();
2958 * flush_pmd_tlb_range();" we first mark the current pmd
2959 * notpresent (atomically because here the pmd_trans_huge must
2960 * remain set at all times on the pmd until the split is
2961 * complete for this pmd), then we flush the SMP TLB and finally
2962 * we write the non-huge version of the pmd entry with
2963 * pmd_populate.
2964 */
2965 old_pmd = pmdp_invalidate(vma, haddr, pmd);
423ac9af 2966 page = pmd_page(old_pmd);
91b2978a 2967 folio = page_folio(page);
0ccf7f16
PX
2968 if (pmd_dirty(old_pmd)) {
2969 dirty = true;
91b2978a 2970 folio_set_dirty(folio);
0ccf7f16 2971 }
2e83ee1d
PX
2972 write = pmd_write(old_pmd);
2973 young = pmd_young(old_pmd);
2974 soft_dirty = pmd_soft_dirty(old_pmd);
292924b2 2975 uffd_wp = pmd_uffd_wp(old_pmd);
6c287605 2976
91b2978a
DH
2977 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2978 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
6c287605
DH
2979
2980 /*
2981 * Without "freeze", we'll simply split the PMD, propagating the
2982 * PageAnonExclusive() flag for each PTE by setting it for
2983 * each subpage -- no need to (temporarily) clear.
2984 *
2985 * With "freeze" we want to replace mapped pages by
2986 * migration entries right away. This is only possible if we
2987 * managed to clear PageAnonExclusive() -- see
2988 * set_pmd_migration_entry().
2989 *
2990 * In case we cannot clear PageAnonExclusive(), split the PMD
2991 * only and let try_to_migrate_one() fail later.
088b8aa5 2992 *
e3b4b137 2993 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
6c287605 2994 */
91b2978a 2995 anon_exclusive = PageAnonExclusive(page);
e3b4b137
DH
2996 if (freeze && anon_exclusive &&
2997 folio_try_share_anon_rmap_pmd(folio, page))
6c287605 2998 freeze = false;
91b2978a
DH
2999 if (!freeze) {
3000 rmap_t rmap_flags = RMAP_NONE;
3001
3002 folio_ref_add(folio, HPAGE_PMD_NR - 1);
3003 if (anon_exclusive)
3004 rmap_flags |= RMAP_EXCLUSIVE;
3005 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
3006 vma, haddr, rmap_flags);
3007 }
2e83ee1d 3008 }
eef1b3ba 3009
423ac9af
AK
3010 /*
3011 * Withdraw the table only after we mark the pmd entry invalid.
3012 * This's critical for some architectures (Power).
3013 */
eef1b3ba
KS
3014 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3015 pmd_populate(mm, &_pmd, pgtable);
3016
c9c1ee20
HD
3017 pte = pte_offset_map(&_pmd, haddr);
3018 VM_BUG_ON(!pte);
2bdba986
RR
3019
3020 /*
3021 * Note that NUMA hinting access restrictions are not transferred to
3022 * avoid any possibility of altering permissions across VMAs.
3023 */
3024 if (freeze || pmd_migration) {
3025 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
3026 pte_t entry;
ba988280 3027 swp_entry_t swp_entry;
2bdba986 3028
4dd845b5
AP
3029 if (write)
3030 swp_entry = make_writable_migration_entry(
3031 page_to_pfn(page + i));
6c287605
DH
3032 else if (anon_exclusive)
3033 swp_entry = make_readable_exclusive_migration_entry(
3034 page_to_pfn(page + i));
4dd845b5
AP
3035 else
3036 swp_entry = make_readable_migration_entry(
3037 page_to_pfn(page + i));
2e346877
PX
3038 if (young)
3039 swp_entry = make_migration_entry_young(swp_entry);
3040 if (dirty)
3041 swp_entry = make_migration_entry_dirty(swp_entry);
ba988280 3042 entry = swp_entry_to_pte(swp_entry);
804dd150
AA
3043 if (soft_dirty)
3044 entry = pte_swp_mksoft_dirty(entry);
f45ec5ff
PX
3045 if (uffd_wp)
3046 entry = pte_swp_mkuffd_wp(entry);
2bdba986
RR
3047
3048 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3049 set_pte_at(mm, addr, pte + i, entry);
ba988280 3050 }
2bdba986
RR
3051 } else {
3052 pte_t entry;
3053
3054 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
3055 if (write)
3056 entry = pte_mkwrite(entry, vma);
3057 if (!young)
3058 entry = pte_mkold(entry);
3059 /* NOTE: this may set soft-dirty too on some archs */
3060 if (dirty)
3061 entry = pte_mkdirty(entry);
3062 if (soft_dirty)
3063 entry = pte_mksoft_dirty(entry);
3064 if (uffd_wp)
3065 entry = pte_mkuffd_wp(entry);
3066
3067 for (i = 0; i < HPAGE_PMD_NR; i++)
3068 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
3069
3070 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
eef1b3ba 3071 }
2bdba986 3072 pte_unmap(pte);
eef1b3ba 3073
cb67f428 3074 if (!pmd_migration)
a8e61d58 3075 folio_remove_rmap_pmd(folio, page, vma);
96d82deb
HD
3076 if (freeze)
3077 put_page(page);
eef1b3ba
KS
3078
3079 smp_wmb(); /* make pte visible before pmd */
3080 pmd_populate(mm, pmd, pgtable);
3081}
3082
29e847d2 3083void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
b960818d 3084 pmd_t *pmd, bool freeze)
29e847d2 3085{
29e847d2 3086 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
60fbb143
GG
3087 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
3088 is_pmd_migration_entry(*pmd))
29e847d2 3089 __split_huge_pmd_locked(vma, pmd, address, freeze);
29e847d2
LY
3090}
3091
eef1b3ba 3092void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
b960818d 3093 unsigned long address, bool freeze)
eef1b3ba
KS
3094{
3095 spinlock_t *ptl;
ac46d4f3 3096 struct mmu_notifier_range range;
eef1b3ba 3097
7d4a8be0 3098 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 3099 address & HPAGE_PMD_MASK,
ac46d4f3
JG
3100 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
3101 mmu_notifier_invalidate_range_start(&range);
3102 ptl = pmd_lock(vma->vm_mm, pmd);
b960818d 3103 split_huge_pmd_locked(vma, range.start, pmd, freeze);
eef1b3ba 3104 spin_unlock(ptl);
ec8832d0 3105 mmu_notifier_invalidate_range_end(&range);
eef1b3ba
KS
3106}
3107
fec89c10 3108void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
b960818d 3109 bool freeze)
94fcc585 3110{
50722804 3111 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
94fcc585 3112
50722804 3113 if (!pmd)
f72e7dcd
HD
3114 return;
3115
b960818d 3116 __split_huge_pmd(vma, pmd, address, freeze);
94fcc585
AA
3117}
3118
71f9e58e
ML
3119static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
3120{
3121 /*
3122 * If the new address isn't hpage aligned and it could previously
3123 * contain an hugepage: check if we need to split an huge pmd.
3124 */
3125 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
3126 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
3127 ALIGN(address, HPAGE_PMD_SIZE)))
b960818d 3128 split_huge_pmd_address(vma, address, false);
71f9e58e
ML
3129}
3130
e1b9996b 3131void vma_adjust_trans_huge(struct vm_area_struct *vma,
c372473a
LS
3132 unsigned long start,
3133 unsigned long end,
3134 struct vm_area_struct *next)
94fcc585 3135{
71f9e58e
ML
3136 /* Check if we need to split start first. */
3137 split_huge_pmd_if_needed(vma, start);
94fcc585 3138
71f9e58e
ML
3139 /* Check if we need to split end next. */
3140 split_huge_pmd_if_needed(vma, end);
94fcc585 3141
c372473a
LS
3142 /* If we're incrementing next->vm_start, we might need to split it. */
3143 if (next)
3144 split_huge_pmd_if_needed(next, end);
94fcc585 3145}
e9b61f19 3146
684555aa 3147static void unmap_folio(struct folio *folio)
e9b61f19 3148{
319a624e
ZY
3149 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
3150 TTU_BATCH_FLUSH;
e9b61f19 3151
684555aa 3152 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 3153
319a624e
ZY
3154 if (folio_test_pmd_mappable(folio))
3155 ttu_flags |= TTU_SPLIT_HUGE_PMD;
3156
a98a2f0c
AP
3157 /*
3158 * Anon pages need migration entries to preserve them, but file
3159 * pages can simply be left unmapped, then faulted back on demand.
3160 * If that is ever changed (perhaps for mlock), update remap_page().
3161 */
4b8554c5
MWO
3162 if (folio_test_anon(folio))
3163 try_to_migrate(folio, ttu_flags);
a98a2f0c 3164 else
869f7ee6 3165 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3027c6f8
BW
3166
3167 try_to_unmap_flush();
e9b61f19
KS
3168}
3169
735ecdfa
LY
3170static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
3171 unsigned long addr, pmd_t *pmdp,
3172 struct folio *folio)
3173{
3174 struct mm_struct *mm = vma->vm_mm;
3175 int ref_count, map_count;
3176 pmd_t orig_pmd = *pmdp;
735ecdfa 3177
2f9b43d6
BS
3178 if (pmd_dirty(orig_pmd))
3179 folio_set_dirty(folio);
3180 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3181 folio_set_swapbacked(folio);
735ecdfa 3182 return false;
2f9b43d6 3183 }
735ecdfa
LY
3184
3185 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
3186
3187 /*
3188 * Syncing against concurrent GUP-fast:
3189 * - clear PMD; barrier; read refcount
3190 * - inc refcount; barrier; read PMD
3191 */
3192 smp_mb();
3193
3194 ref_count = folio_ref_count(folio);
3195 map_count = folio_mapcount(folio);
3196
3197 /*
3198 * Order reads for folio refcount and dirty flag
3199 * (see comments in __remove_mapping()).
3200 */
3201 smp_rmb();
3202
3203 /*
3204 * If the folio or its PMD is redirtied at this point, or if there
3205 * are unexpected references, we will give up to discard this folio
3206 * and remap it.
3207 *
3208 * The only folio refs must be one from isolation plus the rmap(s).
3209 */
2f9b43d6
BS
3210 if (pmd_dirty(orig_pmd))
3211 folio_set_dirty(folio);
3212 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
3213 folio_set_swapbacked(folio);
3214 set_pmd_at(mm, addr, pmdp, orig_pmd);
3215 return false;
3216 }
3217
3218 if (ref_count != map_count + 1) {
735ecdfa
LY
3219 set_pmd_at(mm, addr, pmdp, orig_pmd);
3220 return false;
3221 }
3222
d40f74ab 3223 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
735ecdfa
LY
3224 zap_deposited_table(mm, pmdp);
3225 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3226 if (vma->vm_flags & VM_LOCKED)
3227 mlock_drain_local();
3228 folio_put(folio);
3229
3230 return true;
3231}
3232
3233bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3234 pmd_t *pmdp, struct folio *folio)
3235{
3236 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3237 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2f9b43d6
BS
3238 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3239 VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
735ecdfa
LY
3240 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3241
2f9b43d6 3242 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
735ecdfa
LY
3243}
3244
b1f20206 3245static void remap_page(struct folio *folio, unsigned long nr, int flags)
e9b61f19 3246{
4eecb8b9 3247 int i = 0;
ab02c252 3248
684555aa 3249 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
4eecb8b9 3250 if (!folio_test_anon(folio))
ab02c252 3251 return;
4eecb8b9 3252 for (;;) {
b1f20206 3253 remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
4eecb8b9
MWO
3254 i += folio_nr_pages(folio);
3255 if (i >= nr)
3256 break;
3257 folio = folio_next(folio);
ace71a19 3258 }
e9b61f19
KS
3259}
3260
8defffa4 3261static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
88dcb9a3
AS
3262 struct lruvec *lruvec, struct list_head *list)
3263{
8defffa4 3264 VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
6168d0da 3265 lockdep_assert_held(&lruvec->lru_lock);
88dcb9a3 3266
6dbb5741 3267 if (list) {
88dcb9a3 3268 /* page reclaim is reclaiming a huge page */
cb29e794 3269 VM_WARN_ON(folio_test_lru(folio));
8defffa4
MWO
3270 folio_get(new_folio);
3271 list_add_tail(&new_folio->lru, list);
88dcb9a3 3272 } else {
6dbb5741 3273 /* head is still on lru (and we have it frozen) */
cb29e794
MWO
3274 VM_WARN_ON(!folio_test_lru(folio));
3275 if (folio_test_unevictable(folio))
8defffa4 3276 new_folio->mlock_count = 0;
07ca7606 3277 else
8defffa4
MWO
3278 list_add_tail(&new_folio->lru, &folio->lru);
3279 folio_set_lru(new_folio);
88dcb9a3
AS
3280 }
3281}
3282
b8f593cd 3283/* Racy check whether the huge page can be split */
8710f6ed 3284bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
b8f593cd
YH
3285{
3286 int extra_pins;
3287
aa5dc07f 3288 /* Additional pins from page cache */
d4b4084a
MWO
3289 if (folio_test_anon(folio))
3290 extra_pins = folio_test_swapcache(folio) ?
3291 folio_nr_pages(folio) : 0;
b8f593cd 3292 else
d4b4084a 3293 extra_pins = folio_nr_pages(folio);
b8f593cd
YH
3294 if (pextra_pins)
3295 *pextra_pins = extra_pins;
8710f6ed
DH
3296 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
3297 caller_pins;
b8f593cd
YH
3298}
3299
00527733
ZY
3300/*
3301 * It splits @folio into @new_order folios and copies the @folio metadata to
3302 * all the resulting folios.
3303 */
3304static void __split_folio_to_order(struct folio *folio, int old_order,
3305 int new_order)
3306{
3307 long new_nr_pages = 1 << new_order;
3308 long nr_pages = 1 << old_order;
3309 long i;
3310
3311 /*
3312 * Skip the first new_nr_pages, since the new folio from them have all
3313 * the flags from the original folio.
3314 */
3315 for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
3316 struct page *new_head = &folio->page + i;
3317
3318 /*
3319 * Careful: new_folio is not a "real" folio before we cleared PageTail.
3320 * Don't pass it around before clear_compound_head().
3321 */
3322 struct folio *new_folio = (struct folio *)new_head;
3323
3324 VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head);
3325
3326 /*
3327 * Clone page flags before unfreezing refcount.
3328 *
3329 * After successful get_page_unless_zero() might follow flags change,
3330 * for example lock_page() which set PG_waiters.
3331 *
3332 * Note that for mapped sub-pages of an anonymous THP,
3333 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3334 * the migration entry instead from where remap_page() will restore it.
3335 * We can still have PG_anon_exclusive set on effectively unmapped and
3336 * unreferenced sub-pages of an anonymous THP: we can simply drop
3337 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3338 */
3339 new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3340 new_folio->flags |= (folio->flags &
3341 ((1L << PG_referenced) |
3342 (1L << PG_swapbacked) |
3343 (1L << PG_swapcache) |
3344 (1L << PG_mlocked) |
3345 (1L << PG_uptodate) |
3346 (1L << PG_active) |
3347 (1L << PG_workingset) |
3348 (1L << PG_locked) |
3349 (1L << PG_unevictable) |
3350#ifdef CONFIG_ARCH_USES_PG_ARCH_2
3351 (1L << PG_arch_2) |
3352#endif
3353#ifdef CONFIG_ARCH_USES_PG_ARCH_3
3354 (1L << PG_arch_3) |
3355#endif
3356 (1L << PG_dirty) |
3357 LRU_GEN_MASK | LRU_REFS_MASK));
3358
3359 new_folio->mapping = folio->mapping;
3360 new_folio->index = folio->index + i;
3361
3362 /*
3363 * page->private should not be set in tail pages. Fix up and warn once
3364 * if private is unexpectedly set.
3365 */
3366 if (unlikely(new_folio->private)) {
3367 VM_WARN_ON_ONCE_PAGE(true, new_head);
3368 new_folio->private = NULL;
3369 }
3370
3371 if (folio_test_swapcache(folio))
3372 new_folio->swap.val = folio->swap.val + i;
3373
3374 /* Page flags must be visible before we make the page non-compound. */
3375 smp_wmb();
3376
3377 /*
3378 * Clear PageTail before unfreezing page refcount.
3379 *
3380 * After successful get_page_unless_zero() might follow put_page()
3381 * which needs correct compound_head().
3382 */
3383 clear_compound_head(new_head);
3384 if (new_order) {
3385 prep_compound_page(new_head, new_order);
3386 folio_set_large_rmappable(new_folio);
3387 }
3388
3389 if (folio_test_young(folio))
3390 folio_set_young(new_folio);
3391 if (folio_test_idle(folio))
3392 folio_set_idle(new_folio);
fa23a338
MWO
3393#ifdef CONFIG_MEMCG
3394 new_folio->memcg_data = folio->memcg_data;
3395#endif
00527733
ZY
3396
3397 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3398 }
3399
3400 if (new_order)
3401 folio_set_order(folio, new_order);
3402 else
3403 ClearPageCompound(&folio->page);
3404}
3405
3406/*
3407 * It splits an unmapped @folio to lower order smaller folios in two ways.
3408 * @folio: the to-be-split folio
3409 * @new_order: the smallest order of the after split folios (since buddy
3410 * allocator like split generates folios with orders from @folio's
3411 * order - 1 to new_order).
3412 * @split_at: in buddy allocator like split, the folio containing @split_at
3413 * will be split until its order becomes @new_order.
3414 * @lock_at: the folio containing @lock_at is left locked for caller.
3415 * @list: the after split folios will be added to @list if it is not NULL,
3416 * otherwise to LRU lists.
3417 * @end: the end of the file @folio maps to. -1 if @folio is anonymous memory.
3418 * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
3419 * @mapping: @folio->mapping
3420 * @uniform_split: if the split is uniform or not (buddy allocator like split)
3421 *
3422 *
3423 * 1. uniform split: the given @folio into multiple @new_order small folios,
3424 * where all small folios have the same order. This is done when
3425 * uniform_split is true.
3426 * 2. buddy allocator like (non-uniform) split: the given @folio is split into
3427 * half and one of the half (containing the given page) is split into half
3428 * until the given @page's order becomes @new_order. This is done when
3429 * uniform_split is false.
3430 *
3431 * The high level flow for these two methods are:
3432 * 1. uniform split: a single __split_folio_to_order() is called to split the
3433 * @folio into @new_order, then we traverse all the resulting folios one by
3434 * one in PFN ascending order and perform stats, unfreeze, adding to list,
3435 * and file mapping index operations.
3436 * 2. non-uniform split: in general, folio_order - @new_order calls to
3437 * __split_folio_to_order() are made in a for loop to split the @folio
3438 * to one lower order at a time. The resulting small folios are processed
3439 * like what is done during the traversal in 1, except the one containing
3440 * @page, which is split in next for loop.
3441 *
3442 * After splitting, the caller's folio reference will be transferred to the
3443 * folio containing @page. The other folios may be freed if they are not mapped.
3444 *
3445 * In terms of locking, after splitting,
3446 * 1. uniform split leaves @page (or the folio contains it) locked;
3447 * 2. buddy allocator like (non-uniform) split leaves @folio locked.
3448 *
3449 *
3450 * For !uniform_split, when -ENOMEM is returned, the original folio might be
3451 * split. The caller needs to check the input folio.
3452 */
3453static int __split_unmapped_folio(struct folio *folio, int new_order,
3454 struct page *split_at, struct page *lock_at,
3455 struct list_head *list, pgoff_t end,
3456 struct xa_state *xas, struct address_space *mapping,
3457 bool uniform_split)
3458{
3459 struct lruvec *lruvec;
3460 struct address_space *swap_cache = NULL;
3461 struct folio *origin_folio = folio;
3462 struct folio *next_folio = folio_next(folio);
3463 struct folio *new_folio;
3464 struct folio *next;
3465 int order = folio_order(folio);
3466 int split_order;
3467 int start_order = uniform_split ? new_order : order - 1;
3468 int nr_dropped = 0;
3469 int ret = 0;
3470 bool stop_split = false;
3471
3472 if (folio_test_swapcache(folio)) {
3473 VM_BUG_ON(mapping);
3474
3475 /* a swapcache folio can only be uniformly split to order-0 */
3476 if (!uniform_split || new_order != 0)
3477 return -EINVAL;
3478
3479 swap_cache = swap_address_space(folio->swap);
3480 xa_lock(&swap_cache->i_pages);
3481 }
3482
3483 if (folio_test_anon(folio))
3484 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
3485
3486 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3487 lruvec = folio_lruvec_lock(folio);
3488
3489 folio_clear_has_hwpoisoned(folio);
3490
3491 /*
3492 * split to new_order one order at a time. For uniform split,
3493 * folio is split to new_order directly.
3494 */
3495 for (split_order = start_order;
3496 split_order >= new_order && !stop_split;
3497 split_order--) {
3498 int old_order = folio_order(folio);
3499 struct folio *release;
3500 struct folio *end_folio = folio_next(folio);
3501
3502 /* order-1 anonymous folio is not supported */
3503 if (folio_test_anon(folio) && split_order == 1)
3504 continue;
3505 if (uniform_split && split_order != new_order)
3506 continue;
3507
3508 if (mapping) {
3509 /*
3510 * uniform split has xas_split_alloc() called before
3511 * irq is disabled to allocate enough memory, whereas
3512 * non-uniform split can handle ENOMEM.
3513 */
3514 if (uniform_split)
3515 xas_split(xas, folio, old_order);
3516 else {
3517 xas_set_order(xas, folio->index, split_order);
3518 xas_try_split(xas, folio, old_order);
3519 if (xas_error(xas)) {
3520 ret = xas_error(xas);
3521 stop_split = true;
3522 goto after_split;
3523 }
3524 }
3525 }
3526
fa23a338 3527 folio_split_memcg_refs(folio, old_order, split_order);
00527733
ZY
3528 split_page_owner(&folio->page, old_order, split_order);
3529 pgalloc_tag_split(folio, old_order, split_order);
3530
3531 __split_folio_to_order(folio, old_order, split_order);
3532
3533after_split:
3534 /*
3535 * Iterate through after-split folios and perform related
3536 * operations. But in buddy allocator like split, the folio
3537 * containing the specified page is skipped until its order
3538 * is new_order, since the folio will be worked on in next
3539 * iteration.
3540 */
3541 for (release = folio; release != end_folio; release = next) {
3542 next = folio_next(release);
3543 /*
3544 * for buddy allocator like split, the folio containing
3545 * page will be split next and should not be released,
3546 * until the folio's order is new_order or stop_split
3547 * is set to true by the above xas_split() failure.
3548 */
3549 if (release == page_folio(split_at)) {
3550 folio = release;
3551 if (split_order != new_order && !stop_split)
3552 continue;
3553 }
3554 if (folio_test_anon(release)) {
3555 mod_mthp_stat(folio_order(release),
3556 MTHP_STAT_NR_ANON, 1);
3557 }
3558
3559 /*
3560 * origin_folio should be kept frozon until page cache
3561 * entries are updated with all the other after-split
3562 * folios to prevent others seeing stale page cache
3563 * entries.
3564 */
3565 if (release == origin_folio)
3566 continue;
3567
3568 folio_ref_unfreeze(release, 1 +
3569 ((mapping || swap_cache) ?
3570 folio_nr_pages(release) : 0));
3571
8defffa4
MWO
3572 lru_add_split_folio(origin_folio, release, lruvec,
3573 list);
00527733
ZY
3574
3575 /* Some pages can be beyond EOF: drop them from cache */
3576 if (release->index >= end) {
3577 if (shmem_mapping(mapping))
3578 nr_dropped += folio_nr_pages(release);
3579 else if (folio_test_clear_dirty(release))
3580 folio_account_cleaned(release,
3581 inode_to_wb(mapping->host));
3582 __filemap_remove_folio(release, NULL);
3583 folio_put_refs(release, folio_nr_pages(release));
3584 } else if (mapping) {
3585 __xa_store(&mapping->i_pages,
3586 release->index, release, 0);
3587 } else if (swap_cache) {
3588 __xa_store(&swap_cache->i_pages,
3589 swap_cache_index(release->swap),
3590 release, 0);
3591 }
3592 }
3593 }
3594
3595 /*
3596 * Unfreeze origin_folio only after all page cache entries, which used
3597 * to point to it, have been updated with new folios. Otherwise,
3598 * a parallel folio_try_get() can grab origin_folio and its caller can
3599 * see stale page cache entries.
3600 */
3601 folio_ref_unfreeze(origin_folio, 1 +
3602 ((mapping || swap_cache) ? folio_nr_pages(origin_folio) : 0));
3603
3604 unlock_page_lruvec(lruvec);
3605
3606 if (swap_cache)
3607 xa_unlock(&swap_cache->i_pages);
3608 if (mapping)
3609 xa_unlock(&mapping->i_pages);
3610
3611 /* Caller disabled irqs, so they are still disabled here */
3612 local_irq_enable();
3613
3614 if (nr_dropped)
3615 shmem_uncharge(mapping->host, nr_dropped);
3616
3617 remap_page(origin_folio, 1 << order,
3618 folio_test_anon(origin_folio) ?
3619 RMP_USE_SHARED_ZEROPAGE : 0);
3620
3621 /*
3622 * At this point, folio should contain the specified page.
3623 * For uniform split, it is left for caller to unlock.
3624 * For buddy allocator like split, the first after-split folio is left
3625 * for caller to unlock.
3626 */
3627 for (new_folio = origin_folio; new_folio != next_folio; new_folio = next) {
3628 next = folio_next(new_folio);
3629 if (new_folio == page_folio(lock_at))
3630 continue;
3631
3632 folio_unlock(new_folio);
3633 /*
3634 * Subpages may be freed if there wasn't any mapping
3635 * like if add_to_swap() is running on a lru page that
3636 * had its mapping zapped. And freeing these pages
3637 * requires taking the lru_lock so we do the put_page
3638 * of the tail pages after the split is complete.
3639 */
06340b92 3640 free_folio_and_swap_cache(new_folio);
00527733
ZY
3641 }
3642 return ret;
3643}
3644
7460b470 3645bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
58729c04
ZY
3646 bool warns)
3647{
3648 if (folio_test_anon(folio)) {
3649 /* order-1 is not supported for anonymous THP. */
3650 VM_WARN_ONCE(warns && new_order == 1,
3651 "Cannot split to order-1 folio");
3652 return new_order != 1;
3653 } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3654 !mapping_large_folio_support(folio->mapping)) {
3655 /*
3656 * No split if the file system does not support large folio.
3657 * Note that we might still have THPs in such mappings due to
3658 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
3659 * does not actually support large folios properly.
3660 */
3661 VM_WARN_ONCE(warns,
3662 "Cannot split file folio to non-0 order");
3663 return false;
3664 }
3665
3666 /* Only swapping a whole PMD-mapped folio is supported */
3667 if (folio_test_swapcache(folio)) {
3668 VM_WARN_ONCE(warns,
3669 "Cannot split swapcache folio to non-0 order");
3670 return false;
3671 }
3672
3673 return true;
3674}
3675
3676/* See comments in non_uniform_split_supported() */
7460b470 3677bool uniform_split_supported(struct folio *folio, unsigned int new_order,
58729c04
ZY
3678 bool warns)
3679{
3680 if (folio_test_anon(folio)) {
3681 VM_WARN_ONCE(warns && new_order == 1,
3682 "Cannot split to order-1 folio");
3683 return new_order != 1;
3684 } else if (new_order) {
3685 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3686 !mapping_large_folio_support(folio->mapping)) {
3687 VM_WARN_ONCE(warns,
3688 "Cannot split file folio to non-0 order");
3689 return false;
3690 }
3691 }
3692
3693 if (new_order && folio_test_swapcache(folio)) {
3694 VM_WARN_ONCE(warns,
3695 "Cannot split swapcache folio to non-0 order");
3696 return false;
3697 }
3698
3699 return true;
3700}
3701
3702/*
3703 * __folio_split: split a folio at @split_at to a @new_order folio
3704 * @folio: folio to split
3705 * @new_order: the order of the new folio
3706 * @split_at: a page within the new folio
3707 * @lock_at: a page within @folio to be left locked to caller
3708 * @list: after-split folios will be put on it if non NULL
3709 * @uniform_split: perform uniform split or not (non-uniform split)
3710 *
3711 * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
3712 * It is in charge of checking whether the split is supported or not and
3713 * preparing @folio for __split_unmapped_folio().
3714 *
3715 * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be
3716 * split but not to @new_order, the caller needs to check)
3717 */
6384dd1d 3718static int __folio_split(struct folio *folio, unsigned int new_order,
58729c04
ZY
3719 struct page *split_at, struct page *lock_at,
3720 struct list_head *list, bool uniform_split)
e9b61f19 3721{
f8baa6be 3722 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
58729c04 3723 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
5d65c8d7 3724 bool is_anon = folio_test_anon(folio);
baa355fd 3725 struct address_space *mapping = NULL;
5d65c8d7 3726 struct anon_vma *anon_vma = NULL;
f216c845 3727 int order = folio_order(folio);
504e070d 3728 int extra_pins, ret;
006d3ff2 3729 pgoff_t end;
478d134e 3730 bool is_hzp;
e9b61f19 3731
3e9a13da
MWO
3732 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3733 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 3734
58729c04
ZY
3735 if (folio != page_folio(split_at) || folio != page_folio(lock_at))
3736 return -EINVAL;
3737
1412ecb3
ZY
3738 if (new_order >= folio_order(folio))
3739 return -EINVAL;
3740
58729c04
ZY
3741 if (uniform_split && !uniform_split_supported(folio, new_order, true))
3742 return -EINVAL;
c010d47f 3743
58729c04
ZY
3744 if (!uniform_split &&
3745 !non_uniform_split_supported(folio, new_order, true))
6a50c9b5 3746 return -EINVAL;
c010d47f 3747
5beaee54 3748 is_hzp = is_huge_zero_folio(folio);
4737edbb
NH
3749 if (is_hzp) {
3750 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
478d134e 3751 return -EBUSY;
4737edbb 3752 }
478d134e 3753
3e9a13da 3754 if (folio_test_writeback(folio))
59807685
YH
3755 return -EBUSY;
3756
5d65c8d7 3757 if (is_anon) {
baa355fd 3758 /*
c1e8d7c6 3759 * The caller does not necessarily hold an mmap_lock that would
baa355fd
KS
3760 * prevent the anon_vma disappearing so we first we take a
3761 * reference to it and then lock the anon_vma for write. This
2f031c6f 3762 * is similar to folio_lock_anon_vma_read except the write lock
baa355fd
KS
3763 * is taken to serialise against parallel split or collapse
3764 * operations.
3765 */
29eea9b5 3766 anon_vma = folio_get_anon_vma(folio);
baa355fd
KS
3767 if (!anon_vma) {
3768 ret = -EBUSY;
3769 goto out;
3770 }
006d3ff2 3771 end = -1;
baa355fd
KS
3772 mapping = NULL;
3773 anon_vma_lock_write(anon_vma);
3774 } else {
e220917f 3775 unsigned int min_order;
6a3edd29
YF
3776 gfp_t gfp;
3777
3e9a13da 3778 mapping = folio->mapping;
baa355fd
KS
3779
3780 /* Truncated ? */
6384dd1d
ZY
3781 /*
3782 * TODO: add support for large shmem folio in swap cache.
3783 * When shmem is in swap cache, mapping is NULL and
3784 * folio_test_swapcache() is true.
3785 */
baa355fd
KS
3786 if (!mapping) {
3787 ret = -EBUSY;
3788 goto out;
3789 }
3790
e220917f
LC
3791 min_order = mapping_min_folio_order(folio->mapping);
3792 if (new_order < min_order) {
3793 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3794 min_order);
3795 ret = -EINVAL;
3796 goto out;
3797 }
3798
6a3edd29
YF
3799 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3800 GFP_RECLAIM_MASK);
3801
0201ebf2 3802 if (!filemap_release_folio(folio, gfp)) {
6a3edd29
YF
3803 ret = -EBUSY;
3804 goto out;
3805 }
3806
58729c04
ZY
3807 if (uniform_split) {
3808 xas_set_order(&xas, folio->index, new_order);
3809 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3810 if (xas_error(&xas)) {
3811 ret = xas_error(&xas);
3812 goto out;
3813 }
6b24ca4a
MWO
3814 }
3815
baa355fd
KS
3816 anon_vma = NULL;
3817 i_mmap_lock_read(mapping);
006d3ff2
HD
3818
3819 /*
58729c04
ZY
3820 *__split_unmapped_folio() may need to trim off pages beyond
3821 * EOF: but on 32-bit, i_size_read() takes an irq-unsafe
3822 * seqlock, which cannot be nested inside the page tree lock.
3823 * So note end now: i_size itself may be changed at any moment,
3824 * but folio lock is good enough to serialize the trimming.
006d3ff2
HD
3825 */
3826 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
d144bf62
HD
3827 if (shmem_mapping(mapping))
3828 end = shmem_fallocend(mapping->host, end);
e9b61f19 3829 }
e9b61f19
KS
3830
3831 /*
684555aa 3832 * Racy check if we can split the page, before unmap_folio() will
e9b61f19
KS
3833 * split PMDs
3834 */
8710f6ed 3835 if (!can_split_folio(folio, 1, &extra_pins)) {
fd4a7ac3 3836 ret = -EAGAIN;
e9b61f19
KS
3837 goto out_unlock;
3838 }
3839
684555aa 3840 unmap_folio(folio);
e9b61f19 3841
b6769834
AS
3842 /* block interrupt reentry in xa_lock and spinlock */
3843 local_irq_disable();
baa355fd 3844 if (mapping) {
baa355fd 3845 /*
3e9a13da
MWO
3846 * Check if the folio is present in page cache.
3847 * We assume all tail are present too, if folio is there.
baa355fd 3848 */
6b24ca4a
MWO
3849 xas_lock(&xas);
3850 xas_reset(&xas);
3e9a13da 3851 if (xas_load(&xas) != folio)
baa355fd
KS
3852 goto fail;
3853 }
3854
0139aa7b 3855 /* Prevent deferred_split_scan() touching ->_refcount */
364c1eeb 3856 spin_lock(&ds_queue->split_queue_lock);
3e9a13da 3857 if (folio_ref_freeze(folio, 1 + extra_pins)) {
8897277a
MWO
3858 if (folio_order(folio) > 1 &&
3859 !list_empty(&folio->_deferred_list)) {
364c1eeb 3860 ds_queue->split_queue_len--;
8422acdc 3861 if (folio_test_partially_mapped(folio)) {
42b2eb69 3862 folio_clear_partially_mapped(folio);
8422acdc
UA
3863 mod_mthp_stat(folio_order(folio),
3864 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3865 }
c010d47f
ZY
3866 /*
3867 * Reinitialize page_deferred_list after removing the
3868 * page from the split_queue, otherwise a subsequent
3869 * split will see list corruption when checking the
3870 * page_deferred_list.
3871 */
3872 list_del_init(&folio->_deferred_list);
9a982250 3873 }
afb97172 3874 spin_unlock(&ds_queue->split_queue_lock);
06d3eff6 3875 if (mapping) {
3e9a13da 3876 int nr = folio_nr_pages(folio);
bf9ecead 3877
c010d47f
ZY
3878 if (folio_test_pmd_mappable(folio) &&
3879 new_order < HPAGE_PMD_ORDER) {
a48d5bdc
SR
3880 if (folio_test_swapbacked(folio)) {
3881 __lruvec_stat_mod_folio(folio,
3882 NR_SHMEM_THPS, -nr);
3883 } else {
3884 __lruvec_stat_mod_folio(folio,
3885 NR_FILE_THPS, -nr);
3886 filemap_nr_thps_dec(mapping);
3887 }
1ca7554d 3888 }
06d3eff6
KS
3889 }
3890
58729c04
ZY
3891 ret = __split_unmapped_folio(folio, new_order,
3892 split_at, lock_at, list, end, &xas, mapping,
3893 uniform_split);
e9b61f19 3894 } else {
364c1eeb 3895 spin_unlock(&ds_queue->split_queue_lock);
504e070d
YS
3896fail:
3897 if (mapping)
6b24ca4a 3898 xas_unlock(&xas);
b6769834 3899 local_irq_enable();
b1f20206 3900 remap_page(folio, folio_nr_pages(folio), 0);
fd4a7ac3 3901 ret = -EAGAIN;
e9b61f19
KS
3902 }
3903
3904out_unlock:
baa355fd
KS
3905 if (anon_vma) {
3906 anon_vma_unlock_write(anon_vma);
3907 put_anon_vma(anon_vma);
3908 }
3909 if (mapping)
3910 i_mmap_unlock_read(mapping);
e9b61f19 3911out:
69a37a8b 3912 xas_destroy(&xas);
f216c845 3913 if (order == HPAGE_PMD_ORDER)
835c3a25 3914 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
f216c845 3915 count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
e9b61f19
KS
3916 return ret;
3917}
9a982250 3918
6384dd1d
ZY
3919/*
3920 * This function splits a large folio into smaller folios of order @new_order.
3921 * @page can point to any page of the large folio to split. The split operation
3922 * does not change the position of @page.
3923 *
3924 * Prerequisites:
3925 *
3926 * 1) The caller must hold a reference on the @page's owning folio, also known
3927 * as the large folio.
3928 *
3929 * 2) The large folio must be locked.
3930 *
3931 * 3) The folio must not be pinned. Any unexpected folio references, including
3932 * GUP pins, will result in the folio not getting split; instead, the caller
3933 * will receive an -EAGAIN.
3934 *
3935 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
3936 * supported for non-file-backed folios, because folio->_deferred_list, which
3937 * is used by partially mapped folios, is stored in subpage 2, but an order-1
3938 * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
3939 * since they do not use _deferred_list.
3940 *
3941 * After splitting, the caller's folio reference will be transferred to @page,
3942 * resulting in a raised refcount of @page after this call. The other pages may
3943 * be freed if they are not mapped.
3944 *
3945 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3946 *
3947 * Pages in @new_order will inherit the mapping, flags, and so on from the
3948 * huge page.
3949 *
3950 * Returns 0 if the huge page was split successfully.
3951 *
3952 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
3953 * the folio was concurrently removed from the page cache.
3954 *
3955 * Returns -EBUSY when trying to split the huge zeropage, if the folio is
3956 * under writeback, if fs-specific folio metadata cannot currently be
3957 * released, or if some unexpected race happened (e.g., anon VMA disappeared,
3958 * truncation).
3959 *
3960 * Callers should ensure that the order respects the address space mapping
3961 * min-order if one is set for non-anonymous folios.
3962 *
3963 * Returns -EINVAL when trying to split to an order that is incompatible
3964 * with the folio. Splitting to order 0 is compatible with all folios.
3965 */
3966int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3967 unsigned int new_order)
3968{
3969 struct folio *folio = page_folio(page);
3970
58729c04
ZY
3971 return __folio_split(folio, new_order, &folio->page, page, list, true);
3972}
3973
3974/*
3975 * folio_split: split a folio at @split_at to a @new_order folio
3976 * @folio: folio to split
3977 * @new_order: the order of the new folio
3978 * @split_at: a page within the new folio
3979 *
3980 * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be
3981 * split but not to @new_order, the caller needs to check)
3982 *
3983 * It has the same prerequisites and returns as
3984 * split_huge_page_to_list_to_order().
3985 *
3986 * Split a folio at @split_at to a new_order folio, leave the
3987 * remaining subpages of the original folio as large as possible. For example,
3988 * in the case of splitting an order-9 folio at its third order-3 subpages to
3989 * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio.
3990 * After the split, there will be a group of folios with different orders and
3991 * the new folio containing @split_at is marked in bracket:
3992 * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
3993 *
3994 * After split, folio is left locked for caller.
3995 */
7460b470 3996int folio_split(struct folio *folio, unsigned int new_order,
58729c04
ZY
3997 struct page *split_at, struct list_head *list)
3998{
3999 return __folio_split(folio, new_order, split_at, &folio->page, list,
4000 false);
6384dd1d
ZY
4001}
4002
e220917f
LC
4003int min_order_for_split(struct folio *folio)
4004{
4005 if (folio_test_anon(folio))
4006 return 0;
4007
4008 if (!folio->mapping) {
4009 if (folio_test_pmd_mappable(folio))
4010 count_vm_event(THP_SPLIT_PAGE_FAILED);
4011 return -EBUSY;
4012 }
4013
4014 return mapping_min_folio_order(folio->mapping);
4015}
4016
4017int split_folio_to_list(struct folio *folio, struct list_head *list)
4018{
4019 int ret = min_order_for_split(folio);
4020
4021 if (ret < 0)
4022 return ret;
4023
4024 return split_huge_page_to_list_to_order(&folio->page, list, ret);
4025}
4026
f8f931bb
HD
4027/*
4028 * __folio_unqueue_deferred_split() is not to be called directly:
4029 * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
4030 * limits its calls to those folios which may have a _deferred_list for
4031 * queueing THP splits, and that list is (racily observed to be) non-empty.
4032 *
4033 * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
4034 * zero: because even when split_queue_lock is held, a non-empty _deferred_list
4035 * might be in use on deferred_split_scan()'s unlocked on-stack list.
4036 *
4037 * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
4038 * therefore important to unqueue deferred split before changing folio memcg.
4039 */
4040bool __folio_unqueue_deferred_split(struct folio *folio)
9a982250 4041{
8dc4a8f1 4042 struct deferred_split *ds_queue;
9a982250 4043 unsigned long flags;
f8f931bb
HD
4044 bool unqueued = false;
4045
4046 WARN_ON_ONCE(folio_ref_count(folio));
4047 WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio));
9a982250 4048
8dc4a8f1
MWO
4049 ds_queue = get_deferred_split_queue(folio);
4050 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
4051 if (!list_empty(&folio->_deferred_list)) {
4052 ds_queue->split_queue_len--;
8422acdc 4053 if (folio_test_partially_mapped(folio)) {
42b2eb69 4054 folio_clear_partially_mapped(folio);
8422acdc
UA
4055 mod_mthp_stat(folio_order(folio),
4056 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4057 }
9bcef597 4058 list_del_init(&folio->_deferred_list);
f8f931bb 4059 unqueued = true;
9a982250 4060 }
8dc4a8f1 4061 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
f8f931bb
HD
4062
4063 return unqueued; /* useful for debug warnings */
9a982250
KS
4064}
4065
8422acdc
UA
4066/* partially_mapped=false won't clear PG_partially_mapped folio flag */
4067void deferred_split_folio(struct folio *folio, bool partially_mapped)
9a982250 4068{
f8baa6be 4069 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
87eaceb3 4070#ifdef CONFIG_MEMCG
8991de90 4071 struct mem_cgroup *memcg = folio_memcg(folio);
87eaceb3 4072#endif
9a982250
KS
4073 unsigned long flags;
4074
8897277a
MWO
4075 /*
4076 * Order 1 folios have no space for a deferred list, but we also
4077 * won't waste much memory by not adding them to the deferred list.
4078 */
4079 if (folio_order(folio) <= 1)
4080 return;
9a982250 4081
81d3ff3c
UA
4082 if (!partially_mapped && !split_underused_thp)
4083 return;
4084
87eaceb3 4085 /*
f8f931bb 4086 * Exclude swapcache: originally to avoid a corrupt deferred split
89ce924f 4087 * queue. Nowadays that is fully prevented by memcg1_swapout();
f8f931bb
HD
4088 * but if page reclaim is already handling the same folio, it is
4089 * unnecessary to handle it again in the shrinker, so excluding
4090 * swapcache here may still be a useful optimization.
87eaceb3 4091 */
8991de90 4092 if (folio_test_swapcache(folio))
87eaceb3
YS
4093 return;
4094
364c1eeb 4095 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
8422acdc
UA
4096 if (partially_mapped) {
4097 if (!folio_test_partially_mapped(folio)) {
42b2eb69 4098 folio_set_partially_mapped(folio);
8422acdc
UA
4099 if (folio_test_pmd_mappable(folio))
4100 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
4101 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
4102 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
4103
4104 }
4105 } else {
4106 /* partially mapped folios cannot become non-partially mapped */
4107 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
4108 }
8991de90 4109 if (list_empty(&folio->_deferred_list)) {
8991de90 4110 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
364c1eeb 4111 ds_queue->split_queue_len++;
87eaceb3
YS
4112#ifdef CONFIG_MEMCG
4113 if (memcg)
8991de90 4114 set_shrinker_bit(memcg, folio_nid(folio),
54d91729 4115 deferred_split_shrinker->id);
87eaceb3 4116#endif
9a982250 4117 }
364c1eeb 4118 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
4119}
4120
4121static unsigned long deferred_split_count(struct shrinker *shrink,
4122 struct shrink_control *sc)
4123{
a3d0a918 4124 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 4125 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
87eaceb3
YS
4126
4127#ifdef CONFIG_MEMCG
4128 if (sc->memcg)
4129 ds_queue = &sc->memcg->deferred_split_queue;
4130#endif
364c1eeb 4131 return READ_ONCE(ds_queue->split_queue_len);
9a982250
KS
4132}
4133
dafff3f4
UA
4134static bool thp_underused(struct folio *folio)
4135{
4136 int num_zero_pages = 0, num_filled_pages = 0;
4137 void *kaddr;
4138 int i;
4139
4140 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
4141 return false;
4142
4143 for (i = 0; i < folio_nr_pages(folio); i++) {
4144 kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
4145 if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
4146 num_zero_pages++;
4147 if (num_zero_pages > khugepaged_max_ptes_none) {
4148 kunmap_local(kaddr);
4149 return true;
4150 }
4151 } else {
4152 /*
4153 * Another path for early exit once the number
4154 * of non-zero filled pages exceeds threshold.
4155 */
4156 num_filled_pages++;
4157 if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
4158 kunmap_local(kaddr);
4159 return false;
4160 }
4161 }
4162 kunmap_local(kaddr);
4163 }
4164 return false;
4165}
4166
9a982250
KS
4167static unsigned long deferred_split_scan(struct shrinker *shrink,
4168 struct shrink_control *sc)
4169{
a3d0a918 4170 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 4171 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
9a982250 4172 unsigned long flags;
4375a553 4173 LIST_HEAD(list);
e66f3185
HD
4174 struct folio *folio, *next, *prev = NULL;
4175 int split = 0, removed = 0;
9a982250 4176
87eaceb3
YS
4177#ifdef CONFIG_MEMCG
4178 if (sc->memcg)
4179 ds_queue = &sc->memcg->deferred_split_queue;
4180#endif
4181
364c1eeb 4182 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
9a982250 4183 /* Take pin on all head pages to avoid freeing them under us */
4375a553
MWO
4184 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
4185 _deferred_list) {
4186 if (folio_try_get(folio)) {
4187 list_move(&folio->_deferred_list, &list);
e3ae1953 4188 } else {
4375a553 4189 /* We lost race with folio_put() */
8422acdc 4190 if (folio_test_partially_mapped(folio)) {
42b2eb69 4191 folio_clear_partially_mapped(folio);
8422acdc
UA
4192 mod_mthp_stat(folio_order(folio),
4193 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
4194 }
4375a553 4195 list_del_init(&folio->_deferred_list);
364c1eeb 4196 ds_queue->split_queue_len--;
9a982250 4197 }
e3ae1953
KS
4198 if (!--sc->nr_to_scan)
4199 break;
9a982250 4200 }
364c1eeb 4201 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 4202
4375a553 4203 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
dafff3f4
UA
4204 bool did_split = false;
4205 bool underused = false;
4206
4207 if (!folio_test_partially_mapped(folio)) {
4208 underused = thp_underused(folio);
4209 if (!underused)
4210 goto next;
4211 }
4375a553 4212 if (!folio_trylock(folio))
fa41b900 4213 goto next;
dafff3f4
UA
4214 if (!split_folio(folio)) {
4215 did_split = true;
4216 if (underused)
4217 count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
9a982250 4218 split++;
dafff3f4 4219 }
4375a553 4220 folio_unlock(folio);
fa41b900 4221next:
dafff3f4
UA
4222 /*
4223 * split_folio() removes folio from list on success.
4224 * Only add back to the queue if folio is partially mapped.
4225 * If thp_underused returns false, or if split_folio fails
4226 * in the case it was underused, then consider it used and
4227 * don't add it back to split_queue.
4228 */
a3477c9e
HD
4229 if (did_split) {
4230 ; /* folio already removed from list */
4231 } else if (!folio_test_partially_mapped(folio)) {
dafff3f4 4232 list_del_init(&folio->_deferred_list);
e66f3185
HD
4233 removed++;
4234 } else {
4235 /*
4236 * That unlocked list_del_init() above would be unsafe,
4237 * unless its folio is separated from any earlier folios
4238 * left on the list (which may be concurrently unqueued)
4239 * by one safe folio with refcount still raised.
4240 */
4241 swap(folio, prev);
dafff3f4 4242 }
e66f3185
HD
4243 if (folio)
4244 folio_put(folio);
9a982250
KS
4245 }
4246
364c1eeb
YS
4247 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
4248 list_splice_tail(&list, &ds_queue->split_queue);
e66f3185 4249 ds_queue->split_queue_len -= removed;
364c1eeb 4250 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 4251
e66f3185
HD
4252 if (prev)
4253 folio_put(prev);
4254
cb8d68ec
KS
4255 /*
4256 * Stop shrinker if we didn't split any page, but the queue is empty.
4257 * This can happen if pages were freed under us.
4258 */
364c1eeb 4259 if (!split && list_empty(&ds_queue->split_queue))
cb8d68ec
KS
4260 return SHRINK_STOP;
4261 return split;
9a982250
KS
4262}
4263
49071d43 4264#ifdef CONFIG_DEBUG_FS
fa6c0231 4265static void split_huge_pages_all(void)
49071d43
KS
4266{
4267 struct zone *zone;
4268 struct page *page;
630e7c5e 4269 struct folio *folio;
49071d43
KS
4270 unsigned long pfn, max_zone_pfn;
4271 unsigned long total = 0, split = 0;
4272
fa6c0231 4273 pr_debug("Split all THPs\n");
a17206da
ML
4274 for_each_zone(zone) {
4275 if (!managed_zone(zone))
4276 continue;
49071d43
KS
4277 max_zone_pfn = zone_end_pfn(zone);
4278 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
a17206da 4279 int nr_pages;
49071d43 4280
2b7aa91b 4281 page = pfn_to_online_page(pfn);
630e7c5e
KW
4282 if (!page || PageTail(page))
4283 continue;
4284 folio = page_folio(page);
4285 if (!folio_try_get(folio))
49071d43
KS
4286 continue;
4287
630e7c5e 4288 if (unlikely(page_folio(page) != folio))
49071d43
KS
4289 goto next;
4290
630e7c5e 4291 if (zone != folio_zone(folio))
49071d43
KS
4292 goto next;
4293
630e7c5e
KW
4294 if (!folio_test_large(folio)
4295 || folio_test_hugetlb(folio)
4296 || !folio_test_lru(folio))
49071d43
KS
4297 goto next;
4298
4299 total++;
630e7c5e
KW
4300 folio_lock(folio);
4301 nr_pages = folio_nr_pages(folio);
4302 if (!split_folio(folio))
49071d43 4303 split++;
a17206da 4304 pfn += nr_pages - 1;
630e7c5e 4305 folio_unlock(folio);
49071d43 4306next:
630e7c5e 4307 folio_put(folio);
fa6c0231 4308 cond_resched();
49071d43
KS
4309 }
4310 }
4311
fa6c0231
ZY
4312 pr_debug("%lu of %lu THP split\n", split, total);
4313}
49071d43 4314
fa6c0231
ZY
4315static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
4316{
4317 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
4318 is_vm_hugetlb_page(vma);
4319}
4320
4321static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
4b94c18d
ZY
4322 unsigned long vaddr_end, unsigned int new_order,
4323 long in_folio_offset)
fa6c0231
ZY
4324{
4325 int ret = 0;
4326 struct task_struct *task;
4327 struct mm_struct *mm;
4328 unsigned long total = 0, split = 0;
4329 unsigned long addr;
4330
4331 vaddr_start &= PAGE_MASK;
4332 vaddr_end &= PAGE_MASK;
4333
e4bfc678 4334 task = find_get_task_by_vpid(pid);
fa6c0231 4335 if (!task) {
fa6c0231
ZY
4336 ret = -ESRCH;
4337 goto out;
4338 }
fa6c0231
ZY
4339
4340 /* Find the mm_struct */
4341 mm = get_task_mm(task);
4342 put_task_struct(task);
4343
4344 if (!mm) {
4345 ret = -EINVAL;
4346 goto out;
4347 }
4348
4349 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
4350 pid, vaddr_start, vaddr_end);
4351
4352 mmap_read_lock(mm);
4353 /*
4354 * always increase addr by PAGE_SIZE, since we could have a PTE page
4355 * table filled with PTE-mapped THPs, each of which is distinct.
4356 */
4357 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
74ba2b38 4358 struct vm_area_struct *vma = vma_lookup(mm, addr);
8710f6ed 4359 struct folio_walk fw;
a644b0ab 4360 struct folio *folio;
e220917f
LC
4361 struct address_space *mapping;
4362 unsigned int target_order = new_order;
fa6c0231 4363
74ba2b38 4364 if (!vma)
fa6c0231
ZY
4365 break;
4366
4367 /* skip special VMA and hugetlb VMA */
4368 if (vma_not_suitable_for_thp_split(vma)) {
4369 addr = vma->vm_end;
4370 continue;
4371 }
4372
8710f6ed
DH
4373 folio = folio_walk_start(&fw, vma, addr, 0);
4374 if (!folio)
fa6c0231
ZY
4375 continue;
4376
a644b0ab 4377 if (!is_transparent_hugepage(folio))
fa6c0231
ZY
4378 goto next;
4379
e220917f
LC
4380 if (!folio_test_anon(folio)) {
4381 mapping = folio->mapping;
4382 target_order = max(new_order,
4383 mapping_min_folio_order(mapping));
4384 }
4385
4386 if (target_order >= folio_order(folio))
2394aef6
ZY
4387 goto next;
4388
fa6c0231 4389 total++;
fc4d1823
ZY
4390 /*
4391 * For folios with private, split_huge_page_to_list_to_order()
4392 * will try to drop it before split and then check if the folio
4393 * can be split or not. So skip the check here.
4394 */
4395 if (!folio_test_private(folio) &&
8710f6ed 4396 !can_split_folio(folio, 0, NULL))
fa6c0231
ZY
4397 goto next;
4398
a644b0ab 4399 if (!folio_trylock(folio))
fa6c0231 4400 goto next;
8710f6ed
DH
4401 folio_get(folio);
4402 folio_walk_end(&fw, vma);
fa6c0231 4403
e220917f
LC
4404 if (!folio_test_anon(folio) && folio->mapping != mapping)
4405 goto unlock;
4406
4b94c18d
ZY
4407 if (in_folio_offset < 0 ||
4408 in_folio_offset >= folio_nr_pages(folio)) {
4409 if (!split_folio_to_order(folio, target_order))
4410 split++;
4411 } else {
4412 struct page *split_at = folio_page(folio,
4413 in_folio_offset);
4414 if (!folio_split(folio, target_order, split_at, NULL))
4415 split++;
4416 }
fa6c0231 4417
e220917f
LC
4418unlock:
4419
a644b0ab 4420 folio_unlock(folio);
a644b0ab 4421 folio_put(folio);
8710f6ed
DH
4422
4423 cond_resched();
4424 continue;
4425next:
4426 folio_walk_end(&fw, vma);
fa6c0231
ZY
4427 cond_resched();
4428 }
4429 mmap_read_unlock(mm);
4430 mmput(mm);
4431
4432 pr_debug("%lu of %lu THP split\n", split, total);
4433
4434out:
4435 return ret;
49071d43 4436}
fa6c0231 4437
fbe37501 4438static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
4b94c18d
ZY
4439 pgoff_t off_end, unsigned int new_order,
4440 long in_folio_offset)
fbe37501
ZY
4441{
4442 struct filename *file;
4443 struct file *candidate;
4444 struct address_space *mapping;
4445 int ret = -EINVAL;
4446 pgoff_t index;
4447 int nr_pages = 1;
4448 unsigned long total = 0, split = 0;
e220917f
LC
4449 unsigned int min_order;
4450 unsigned int target_order;
fbe37501
ZY
4451
4452 file = getname_kernel(file_path);
4453 if (IS_ERR(file))
4454 return ret;
4455
4456 candidate = file_open_name(file, O_RDONLY, 0);
4457 if (IS_ERR(candidate))
4458 goto out;
4459
4460 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
4461 file_path, off_start, off_end);
4462
4463 mapping = candidate->f_mapping;
e220917f
LC
4464 min_order = mapping_min_folio_order(mapping);
4465 target_order = max(new_order, min_order);
fbe37501
ZY
4466
4467 for (index = off_start; index < off_end; index += nr_pages) {
1fb130b2 4468 struct folio *folio = filemap_get_folio(mapping, index);
fbe37501
ZY
4469
4470 nr_pages = 1;
66dabbb6 4471 if (IS_ERR(folio))
fbe37501
ZY
4472 continue;
4473
9ee2c086 4474 if (!folio_test_large(folio))
fbe37501
ZY
4475 goto next;
4476
4477 total++;
9ee2c086 4478 nr_pages = folio_nr_pages(folio);
fbe37501 4479
e220917f 4480 if (target_order >= folio_order(folio))
2394aef6
ZY
4481 goto next;
4482
9ee2c086 4483 if (!folio_trylock(folio))
fbe37501
ZY
4484 goto next;
4485
e220917f
LC
4486 if (folio->mapping != mapping)
4487 goto unlock;
4488
4b94c18d
ZY
4489 if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
4490 if (!split_folio_to_order(folio, target_order))
4491 split++;
4492 } else {
4493 struct page *split_at = folio_page(folio,
4494 in_folio_offset);
4495 if (!folio_split(folio, target_order, split_at, NULL))
4496 split++;
4497 }
fbe37501 4498
e220917f 4499unlock:
9ee2c086 4500 folio_unlock(folio);
fbe37501 4501next:
9ee2c086 4502 folio_put(folio);
fbe37501
ZY
4503 cond_resched();
4504 }
4505
4506 filp_close(candidate, NULL);
4507 ret = 0;
4508
4509 pr_debug("%lu of %lu file-backed THP split\n", split, total);
4510out:
4511 putname(file);
4512 return ret;
4513}
4514
fa6c0231
ZY
4515#define MAX_INPUT_BUF_SZ 255
4516
4517static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4518 size_t count, loff_t *ppops)
4519{
4520 static DEFINE_MUTEX(split_debug_mutex);
4521 ssize_t ret;
fc4d1823
ZY
4522 /*
4523 * hold pid, start_vaddr, end_vaddr, new_order or
4524 * file_path, off_start, off_end, new_order
4525 */
fbe37501 4526 char input_buf[MAX_INPUT_BUF_SZ];
fa6c0231
ZY
4527 int pid;
4528 unsigned long vaddr_start, vaddr_end;
fc4d1823 4529 unsigned int new_order = 0;
4b94c18d 4530 long in_folio_offset = -1;
fa6c0231
ZY
4531
4532 ret = mutex_lock_interruptible(&split_debug_mutex);
4533 if (ret)
4534 return ret;
4535
4536 ret = -EFAULT;
4537
4538 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4539 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4540 goto out;
4541
4542 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
fbe37501
ZY
4543
4544 if (input_buf[0] == '/') {
4545 char *tok;
1fc10653 4546 char *tok_buf = input_buf;
fbe37501
ZY
4547 char file_path[MAX_INPUT_BUF_SZ];
4548 pgoff_t off_start = 0, off_end = 0;
4549 size_t input_len = strlen(input_buf);
4550
1fc10653
AM
4551 tok = strsep(&tok_buf, ",");
4552 if (tok && tok_buf) {
93c1e57a 4553 strscpy(file_path, tok);
fbe37501
ZY
4554 } else {
4555 ret = -EINVAL;
4556 goto out;
4557 }
4558
4b94c18d
ZY
4559 ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end,
4560 &new_order, &in_folio_offset);
4561 if (ret != 2 && ret != 3 && ret != 4) {
fbe37501
ZY
4562 ret = -EINVAL;
4563 goto out;
4564 }
4b94c18d
ZY
4565 ret = split_huge_pages_in_file(file_path, off_start, off_end,
4566 new_order, in_folio_offset);
fbe37501
ZY
4567 if (!ret)
4568 ret = input_len;
4569
4570 goto out;
4571 }
4572
4b94c18d
ZY
4573 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start,
4574 &vaddr_end, &new_order, &in_folio_offset);
fa6c0231
ZY
4575 if (ret == 1 && pid == 1) {
4576 split_huge_pages_all();
4577 ret = strlen(input_buf);
4578 goto out;
4b94c18d 4579 } else if (ret != 3 && ret != 4 && ret != 5) {
fa6c0231
ZY
4580 ret = -EINVAL;
4581 goto out;
4582 }
4583
4b94c18d
ZY
4584 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order,
4585 in_folio_offset);
fa6c0231
ZY
4586 if (!ret)
4587 ret = strlen(input_buf);
4588out:
4589 mutex_unlock(&split_debug_mutex);
4590 return ret;
4591
4592}
4593
4594static const struct file_operations split_huge_pages_fops = {
4595 .owner = THIS_MODULE,
4596 .write = split_huge_pages_write,
fa6c0231 4597};
49071d43
KS
4598
4599static int __init split_huge_pages_debugfs(void)
4600{
d9f7979c
GKH
4601 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4602 &split_huge_pages_fops);
49071d43
KS
4603 return 0;
4604}
4605late_initcall(split_huge_pages_debugfs);
4606#endif
616b8371
ZY
4607
4608#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
7f5abe60 4609int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
616b8371
ZY
4610 struct page *page)
4611{
a8e61d58 4612 struct folio *folio = page_folio(page);
616b8371
ZY
4613 struct vm_area_struct *vma = pvmw->vma;
4614 struct mm_struct *mm = vma->vm_mm;
4615 unsigned long address = pvmw->address;
6c287605 4616 bool anon_exclusive;
616b8371
ZY
4617 pmd_t pmdval;
4618 swp_entry_t entry;
ab6e3d09 4619 pmd_t pmdswp;
616b8371
ZY
4620
4621 if (!(pvmw->pmd && !pvmw->pte))
7f5abe60 4622 return 0;
616b8371 4623
616b8371 4624 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
8a8683ad 4625 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
6c287605 4626
e3b4b137 4627 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
a8e61d58 4628 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
e3b4b137 4629 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
6c287605 4630 set_pmd_at(mm, address, pvmw->pmd, pmdval);
7f5abe60 4631 return -EBUSY;
6c287605
DH
4632 }
4633
616b8371 4634 if (pmd_dirty(pmdval))
db44c658 4635 folio_mark_dirty(folio);
4dd845b5
AP
4636 if (pmd_write(pmdval))
4637 entry = make_writable_migration_entry(page_to_pfn(page));
6c287605
DH
4638 else if (anon_exclusive)
4639 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4dd845b5
AP
4640 else
4641 entry = make_readable_migration_entry(page_to_pfn(page));
2e346877
PX
4642 if (pmd_young(pmdval))
4643 entry = make_migration_entry_young(entry);
4644 if (pmd_dirty(pmdval))
4645 entry = make_migration_entry_dirty(entry);
ab6e3d09
NH
4646 pmdswp = swp_entry_to_pmd(entry);
4647 if (pmd_soft_dirty(pmdval))
4648 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
24bf08c4
DH
4649 if (pmd_uffd_wp(pmdval))
4650 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
ab6e3d09 4651 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
a8e61d58
DH
4652 folio_remove_rmap_pmd(folio, page, vma);
4653 folio_put(folio);
283fd6fe 4654 trace_set_migration_pmd(address, pmd_val(pmdswp));
7f5abe60
DH
4655
4656 return 0;
616b8371
ZY
4657}
4658
4659void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
4660{
14d85a6e 4661 struct folio *folio = page_folio(new);
616b8371
ZY
4662 struct vm_area_struct *vma = pvmw->vma;
4663 struct mm_struct *mm = vma->vm_mm;
4664 unsigned long address = pvmw->address;
4fba8f2a 4665 unsigned long haddr = address & HPAGE_PMD_MASK;
616b8371
ZY
4666 pmd_t pmde;
4667 swp_entry_t entry;
4668
4669 if (!(pvmw->pmd && !pvmw->pte))
4670 return;
4671
4672 entry = pmd_to_swp_entry(*pvmw->pmd);
14d85a6e 4673 folio_get(folio);
e3981db4 4674 pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
ab6e3d09
NH
4675 if (pmd_swp_soft_dirty(*pvmw->pmd))
4676 pmde = pmd_mksoft_dirty(pmde);
3c811f78 4677 if (is_writable_migration_entry(entry))
161e393c 4678 pmde = pmd_mkwrite(pmde, vma);
8f34f1ea 4679 if (pmd_swp_uffd_wp(*pvmw->pmd))
f1eb1bac 4680 pmde = pmd_mkuffd_wp(pmde);
2e346877
PX
4681 if (!is_migration_entry_young(entry))
4682 pmde = pmd_mkold(pmde);
4683 /* NOTE: this may contain setting soft-dirty on some archs */
14d85a6e 4684 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
2e346877 4685 pmde = pmd_mkdirty(pmde);
616b8371 4686
14d85a6e 4687 if (folio_test_anon(folio)) {
395db7b1 4688 rmap_t rmap_flags = RMAP_NONE;
6c287605
DH
4689
4690 if (!is_readable_migration_entry(entry))
4691 rmap_flags |= RMAP_EXCLUSIVE;
4692
395db7b1 4693 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
6c287605 4694 } else {
14d85a6e 4695 folio_add_file_rmap_pmd(folio, new, vma);
6c287605 4696 }
14d85a6e 4697 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4fba8f2a 4698 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5cbcf225
MS
4699
4700 /* No need to invalidate - it was non-present before */
616b8371 4701 update_mmu_cache_pmd(vma, address, pvmw->pmd);
283fd6fe 4702 trace_remove_migration_pmd(address, pmd_val(pmde));
616b8371
ZY
4703}
4704#endif