2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
;
42 * struct hmm - HMM per mm struct
44 * @mm: mm struct this HMM struct is bound to
45 * @lock: lock protecting ranges list
46 * @ranges: list of range being snapshotted
47 * @mirrors: list of mirrors for this mm
48 * @mmu_notifier: mmu notifier to track updates to CPU page table
49 * @mirrors_sem: read/write semaphore protecting the mirrors list
54 struct list_head ranges
;
55 struct list_head mirrors
;
56 struct mmu_notifier mmu_notifier
;
57 struct rw_semaphore mirrors_sem
;
61 * hmm_register - register HMM against an mm (HMM internal)
63 * @mm: mm struct to attach to
65 * This is not intended to be used directly by device drivers. It allocates an
66 * HMM struct if mm does not have one, and initializes it.
68 static struct hmm
*hmm_register(struct mm_struct
*mm
)
70 struct hmm
*hmm
= READ_ONCE(mm
->hmm
);
74 * The hmm struct can only be freed once the mm_struct goes away,
75 * hence we should always have pre-allocated an new hmm struct
81 hmm
= kmalloc(sizeof(*hmm
), GFP_KERNEL
);
84 INIT_LIST_HEAD(&hmm
->mirrors
);
85 init_rwsem(&hmm
->mirrors_sem
);
86 hmm
->mmu_notifier
.ops
= NULL
;
87 INIT_LIST_HEAD(&hmm
->ranges
);
88 spin_lock_init(&hmm
->lock
);
91 spin_lock(&mm
->page_table_lock
);
96 spin_unlock(&mm
->page_table_lock
);
102 * We should only get here if hold the mmap_sem in write mode ie on
103 * registration of first mirror through hmm_mirror_register()
105 hmm
->mmu_notifier
.ops
= &hmm_mmu_notifier_ops
;
106 if (__mmu_notifier_register(&hmm
->mmu_notifier
, mm
))
112 spin_lock(&mm
->page_table_lock
);
115 spin_unlock(&mm
->page_table_lock
);
121 void hmm_mm_destroy(struct mm_struct
*mm
)
126 static int hmm_invalidate_range(struct hmm
*hmm
, bool device
,
127 const struct hmm_update
*update
)
129 struct hmm_mirror
*mirror
;
130 struct hmm_range
*range
;
132 spin_lock(&hmm
->lock
);
133 list_for_each_entry(range
, &hmm
->ranges
, list
) {
134 unsigned long addr
, idx
, npages
;
136 if (update
->end
< range
->start
|| update
->start
>= range
->end
)
139 range
->valid
= false;
140 addr
= max(update
->start
, range
->start
);
141 idx
= (addr
- range
->start
) >> PAGE_SHIFT
;
142 npages
= (min(range
->end
, update
->end
) - addr
) >> PAGE_SHIFT
;
143 memset(&range
->pfns
[idx
], 0, sizeof(*range
->pfns
) * npages
);
145 spin_unlock(&hmm
->lock
);
150 down_read(&hmm
->mirrors_sem
);
151 list_for_each_entry(mirror
, &hmm
->mirrors
, list
) {
154 ret
= mirror
->ops
->sync_cpu_device_pagetables(mirror
, update
);
155 if (!update
->blockable
&& ret
== -EAGAIN
) {
156 up_read(&hmm
->mirrors_sem
);
160 up_read(&hmm
->mirrors_sem
);
165 static void hmm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
167 struct hmm_mirror
*mirror
;
168 struct hmm
*hmm
= mm
->hmm
;
170 down_write(&hmm
->mirrors_sem
);
171 mirror
= list_first_entry_or_null(&hmm
->mirrors
, struct hmm_mirror
,
174 list_del_init(&mirror
->list
);
175 if (mirror
->ops
->release
) {
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
181 up_write(&hmm
->mirrors_sem
);
182 mirror
->ops
->release(mirror
);
183 down_write(&hmm
->mirrors_sem
);
185 mirror
= list_first_entry_or_null(&hmm
->mirrors
,
186 struct hmm_mirror
, list
);
188 up_write(&hmm
->mirrors_sem
);
191 static int hmm_invalidate_range_start(struct mmu_notifier
*mn
,
192 struct mm_struct
*mm
,
197 struct hmm_update update
;
198 struct hmm
*hmm
= mm
->hmm
;
202 update
.start
= start
;
204 update
.event
= HMM_UPDATE_INVALIDATE
;
205 update
.blockable
= blockable
;
206 return hmm_invalidate_range(hmm
, true, &update
);
209 static void hmm_invalidate_range_end(struct mmu_notifier
*mn
,
210 struct mm_struct
*mm
,
214 struct hmm_update update
;
215 struct hmm
*hmm
= mm
->hmm
;
219 update
.start
= start
;
221 update
.event
= HMM_UPDATE_INVALIDATE
;
222 update
.blockable
= true;
223 hmm_invalidate_range(hmm
, false, &update
);
226 static const struct mmu_notifier_ops hmm_mmu_notifier_ops
= {
227 .release
= hmm_release
,
228 .invalidate_range_start
= hmm_invalidate_range_start
,
229 .invalidate_range_end
= hmm_invalidate_range_end
,
233 * hmm_mirror_register() - register a mirror against an mm
235 * @mirror: new mirror struct to register
236 * @mm: mm to register against
238 * To start mirroring a process address space, the device driver must register
239 * an HMM mirror struct.
241 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
243 int hmm_mirror_register(struct hmm_mirror
*mirror
, struct mm_struct
*mm
)
246 if (!mm
|| !mirror
|| !mirror
->ops
)
250 mirror
->hmm
= hmm_register(mm
);
254 down_write(&mirror
->hmm
->mirrors_sem
);
255 if (mirror
->hmm
->mm
== NULL
) {
257 * A racing hmm_mirror_unregister() is about to destroy the hmm
258 * struct. Try again to allocate a new one.
260 up_write(&mirror
->hmm
->mirrors_sem
);
264 list_add(&mirror
->list
, &mirror
->hmm
->mirrors
);
265 up_write(&mirror
->hmm
->mirrors_sem
);
270 EXPORT_SYMBOL(hmm_mirror_register
);
273 * hmm_mirror_unregister() - unregister a mirror
275 * @mirror: new mirror struct to register
277 * Stop mirroring a process address space, and cleanup.
279 void hmm_mirror_unregister(struct hmm_mirror
*mirror
)
281 bool should_unregister
= false;
282 struct mm_struct
*mm
;
285 if (mirror
->hmm
== NULL
)
289 down_write(&hmm
->mirrors_sem
);
290 list_del_init(&mirror
->list
);
291 should_unregister
= list_empty(&hmm
->mirrors
);
295 up_write(&hmm
->mirrors_sem
);
297 if (!should_unregister
|| mm
== NULL
)
300 mmu_notifier_unregister_no_release(&hmm
->mmu_notifier
, mm
);
302 spin_lock(&mm
->page_table_lock
);
305 spin_unlock(&mm
->page_table_lock
);
309 EXPORT_SYMBOL(hmm_mirror_unregister
);
311 struct hmm_vma_walk
{
312 struct hmm_range
*range
;
318 static int hmm_vma_do_fault(struct mm_walk
*walk
, unsigned long addr
,
319 bool write_fault
, uint64_t *pfn
)
321 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_REMOTE
;
322 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
323 struct hmm_range
*range
= hmm_vma_walk
->range
;
324 struct vm_area_struct
*vma
= walk
->vma
;
327 flags
|= hmm_vma_walk
->block
? 0 : FAULT_FLAG_ALLOW_RETRY
;
328 flags
|= write_fault
? FAULT_FLAG_WRITE
: 0;
329 ret
= handle_mm_fault(vma
, addr
, flags
);
330 if (ret
& VM_FAULT_RETRY
)
332 if (ret
& VM_FAULT_ERROR
) {
333 *pfn
= range
->values
[HMM_PFN_ERROR
];
340 static int hmm_pfns_bad(unsigned long addr
,
342 struct mm_walk
*walk
)
344 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
345 struct hmm_range
*range
= hmm_vma_walk
->range
;
346 uint64_t *pfns
= range
->pfns
;
349 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
350 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
351 pfns
[i
] = range
->values
[HMM_PFN_ERROR
];
357 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
358 * @start: range virtual start address (inclusive)
359 * @end: range virtual end address (exclusive)
360 * @fault: should we fault or not ?
361 * @write_fault: write fault ?
362 * @walk: mm_walk structure
363 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
365 * This function will be called whenever pmd_none() or pte_none() returns true,
366 * or whenever there is no page directory covering the virtual address range.
368 static int hmm_vma_walk_hole_(unsigned long addr
, unsigned long end
,
369 bool fault
, bool write_fault
,
370 struct mm_walk
*walk
)
372 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
373 struct hmm_range
*range
= hmm_vma_walk
->range
;
374 uint64_t *pfns
= range
->pfns
;
377 hmm_vma_walk
->last
= addr
;
378 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
379 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++) {
380 pfns
[i
] = range
->values
[HMM_PFN_NONE
];
381 if (fault
|| write_fault
) {
384 ret
= hmm_vma_do_fault(walk
, addr
, write_fault
,
391 return (fault
|| write_fault
) ? -EAGAIN
: 0;
394 static inline void hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
395 uint64_t pfns
, uint64_t cpu_flags
,
396 bool *fault
, bool *write_fault
)
398 struct hmm_range
*range
= hmm_vma_walk
->range
;
400 *fault
= *write_fault
= false;
401 if (!hmm_vma_walk
->fault
)
404 /* We aren't ask to do anything ... */
405 if (!(pfns
& range
->flags
[HMM_PFN_VALID
]))
407 /* If this is device memory than only fault if explicitly requested */
408 if ((cpu_flags
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
])) {
409 /* Do we fault on device memory ? */
410 if (pfns
& range
->flags
[HMM_PFN_DEVICE_PRIVATE
]) {
411 *write_fault
= pfns
& range
->flags
[HMM_PFN_WRITE
];
417 /* If CPU page table is not valid then we need to fault */
418 *fault
= !(cpu_flags
& range
->flags
[HMM_PFN_VALID
]);
419 /* Need to write fault ? */
420 if ((pfns
& range
->flags
[HMM_PFN_WRITE
]) &&
421 !(cpu_flags
& range
->flags
[HMM_PFN_WRITE
])) {
427 static void hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
428 const uint64_t *pfns
, unsigned long npages
,
429 uint64_t cpu_flags
, bool *fault
,
434 if (!hmm_vma_walk
->fault
) {
435 *fault
= *write_fault
= false;
439 for (i
= 0; i
< npages
; ++i
) {
440 hmm_pte_need_fault(hmm_vma_walk
, pfns
[i
], cpu_flags
,
442 if ((*fault
) || (*write_fault
))
447 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
448 struct mm_walk
*walk
)
450 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
451 struct hmm_range
*range
= hmm_vma_walk
->range
;
452 bool fault
, write_fault
;
453 unsigned long i
, npages
;
456 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
457 npages
= (end
- addr
) >> PAGE_SHIFT
;
458 pfns
= &range
->pfns
[i
];
459 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
460 0, &fault
, &write_fault
);
461 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
464 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range
*range
, pmd_t pmd
)
466 if (pmd_protnone(pmd
))
468 return pmd_write(pmd
) ? range
->flags
[HMM_PFN_VALID
] |
469 range
->flags
[HMM_PFN_WRITE
] :
470 range
->flags
[HMM_PFN_VALID
];
473 static int hmm_vma_handle_pmd(struct mm_walk
*walk
,
479 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
480 struct hmm_range
*range
= hmm_vma_walk
->range
;
481 unsigned long pfn
, npages
, i
;
482 bool fault
, write_fault
;
485 npages
= (end
- addr
) >> PAGE_SHIFT
;
486 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
487 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
, cpu_flags
,
488 &fault
, &write_fault
);
490 if (pmd_protnone(pmd
) || fault
|| write_fault
)
491 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
493 pfn
= pmd_pfn(pmd
) + pte_index(addr
);
494 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
495 pfns
[i
] = hmm_pfn_from_pfn(range
, pfn
) | cpu_flags
;
496 hmm_vma_walk
->last
= end
;
500 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range
*range
, pte_t pte
)
502 if (pte_none(pte
) || !pte_present(pte
))
504 return pte_write(pte
) ? range
->flags
[HMM_PFN_VALID
] |
505 range
->flags
[HMM_PFN_WRITE
] :
506 range
->flags
[HMM_PFN_VALID
];
509 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
510 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
513 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
514 struct hmm_range
*range
= hmm_vma_walk
->range
;
515 struct vm_area_struct
*vma
= walk
->vma
;
516 bool fault
, write_fault
;
519 uint64_t orig_pfn
= *pfn
;
521 *pfn
= range
->values
[HMM_PFN_NONE
];
522 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
523 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
524 &fault
, &write_fault
);
527 if (fault
|| write_fault
)
532 if (!pte_present(pte
)) {
533 swp_entry_t entry
= pte_to_swp_entry(pte
);
535 if (!non_swap_entry(entry
)) {
536 if (fault
|| write_fault
)
542 * This is a special swap entry, ignore migration, use
543 * device and report anything else as error.
545 if (is_device_private_entry(entry
)) {
546 cpu_flags
= range
->flags
[HMM_PFN_VALID
] |
547 range
->flags
[HMM_PFN_DEVICE_PRIVATE
];
548 cpu_flags
|= is_write_device_private_entry(entry
) ?
549 range
->flags
[HMM_PFN_WRITE
] : 0;
550 hmm_pte_need_fault(hmm_vma_walk
, orig_pfn
, cpu_flags
,
551 &fault
, &write_fault
);
552 if (fault
|| write_fault
)
554 *pfn
= hmm_pfn_from_pfn(range
, swp_offset(entry
));
559 if (is_migration_entry(entry
)) {
560 if (fault
|| write_fault
) {
562 hmm_vma_walk
->last
= addr
;
563 migration_entry_wait(vma
->vm_mm
,
570 /* Report error for everything else */
571 *pfn
= range
->values
[HMM_PFN_ERROR
];
575 if (fault
|| write_fault
)
578 *pfn
= hmm_pfn_from_pfn(range
, pte_pfn(pte
)) | cpu_flags
;
583 /* Fault any virtual address we were asked to fault */
584 return hmm_vma_walk_hole_(addr
, end
, fault
, write_fault
, walk
);
587 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
590 struct mm_walk
*walk
)
592 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
593 struct hmm_range
*range
= hmm_vma_walk
->range
;
594 struct vm_area_struct
*vma
= walk
->vma
;
595 uint64_t *pfns
= range
->pfns
;
596 unsigned long addr
= start
, i
;
602 pmd
= READ_ONCE(*pmdp
);
604 return hmm_vma_walk_hole(start
, end
, walk
);
606 if (pmd_huge(pmd
) && (range
->vma
->vm_flags
& VM_HUGETLB
))
607 return hmm_pfns_bad(start
, end
, walk
);
609 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
610 bool fault
, write_fault
;
611 unsigned long npages
;
614 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
615 npages
= (end
- addr
) >> PAGE_SHIFT
;
616 pfns
= &range
->pfns
[i
];
618 hmm_range_need_fault(hmm_vma_walk
, pfns
, npages
,
619 0, &fault
, &write_fault
);
620 if (fault
|| write_fault
) {
621 hmm_vma_walk
->last
= addr
;
622 pmd_migration_entry_wait(vma
->vm_mm
, pmdp
);
626 } else if (!pmd_present(pmd
))
627 return hmm_pfns_bad(start
, end
, walk
);
629 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
631 * No need to take pmd_lock here, even if some other threads
632 * is splitting the huge pmd we will get that event through
633 * mmu_notifier callback.
635 * So just read pmd value and check again its a transparent
636 * huge or device mapping one and compute corresponding pfn
639 pmd
= pmd_read_atomic(pmdp
);
641 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
644 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
645 return hmm_vma_handle_pmd(walk
, addr
, end
, &pfns
[i
], pmd
);
649 * We have handled all the valid case above ie either none, migration,
650 * huge or transparent huge. At this point either it is a valid pmd
651 * entry pointing to pte directory or it is a bad pmd that will not
655 return hmm_pfns_bad(start
, end
, walk
);
657 ptep
= pte_offset_map(pmdp
, addr
);
658 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
659 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, i
++) {
662 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, &pfns
[i
]);
664 /* hmm_vma_handle_pte() did unmap pte directory */
665 hmm_vma_walk
->last
= addr
;
671 hmm_vma_walk
->last
= addr
;
675 static void hmm_pfns_clear(struct hmm_range
*range
,
680 for (; addr
< end
; addr
+= PAGE_SIZE
, pfns
++)
681 *pfns
= range
->values
[HMM_PFN_NONE
];
684 static void hmm_pfns_special(struct hmm_range
*range
)
686 unsigned long addr
= range
->start
, i
= 0;
688 for (; addr
< range
->end
; addr
+= PAGE_SIZE
, i
++)
689 range
->pfns
[i
] = range
->values
[HMM_PFN_SPECIAL
];
693 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
694 * @range: range being snapshotted
695 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
696 * vma permission, 0 success
698 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
699 * validity is tracked by range struct. See hmm_vma_range_done() for further
702 * The range struct is initialized here. It tracks the CPU page table, but only
703 * if the function returns success (0), in which case the caller must then call
704 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
706 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
707 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
709 int hmm_vma_get_pfns(struct hmm_range
*range
)
711 struct vm_area_struct
*vma
= range
->vma
;
712 struct hmm_vma_walk hmm_vma_walk
;
713 struct mm_walk mm_walk
;
716 /* Sanity check, this really should not happen ! */
717 if (range
->start
< vma
->vm_start
|| range
->start
>= vma
->vm_end
)
719 if (range
->end
< vma
->vm_start
|| range
->end
> vma
->vm_end
)
722 hmm
= hmm_register(vma
->vm_mm
);
725 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
726 if (!hmm
->mmu_notifier
.ops
)
729 /* FIXME support hugetlb fs */
730 if (is_vm_hugetlb_page(vma
) || (vma
->vm_flags
& VM_SPECIAL
) ||
732 hmm_pfns_special(range
);
736 if (!(vma
->vm_flags
& VM_READ
)) {
738 * If vma do not allow read access, then assume that it does
739 * not allow write access, either. Architecture that allow
740 * write without read access are not supported by HMM, because
741 * operations such has atomic access would not work.
743 hmm_pfns_clear(range
, range
->pfns
, range
->start
, range
->end
);
747 /* Initialize range to track CPU page table update */
748 spin_lock(&hmm
->lock
);
750 list_add_rcu(&range
->list
, &hmm
->ranges
);
751 spin_unlock(&hmm
->lock
);
753 hmm_vma_walk
.fault
= false;
754 hmm_vma_walk
.range
= range
;
755 mm_walk
.private = &hmm_vma_walk
;
758 mm_walk
.mm
= vma
->vm_mm
;
759 mm_walk
.pte_entry
= NULL
;
760 mm_walk
.test_walk
= NULL
;
761 mm_walk
.hugetlb_entry
= NULL
;
762 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
763 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
765 walk_page_range(range
->start
, range
->end
, &mm_walk
);
768 EXPORT_SYMBOL(hmm_vma_get_pfns
);
771 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
772 * @range: range being tracked
773 * Returns: false if range data has been invalidated, true otherwise
775 * Range struct is used to track updates to the CPU page table after a call to
776 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
777 * using the data, or wants to lock updates to the data it got from those
778 * functions, it must call the hmm_vma_range_done() function, which will then
779 * stop tracking CPU page table updates.
781 * Note that device driver must still implement general CPU page table update
782 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
783 * the mmu_notifier API directly.
785 * CPU page table update tracking done through hmm_range is only temporary and
786 * to be used while trying to duplicate CPU page table contents for a range of
789 * There are two ways to use this :
791 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
792 * trans = device_build_page_table_update_transaction(pfns);
793 * device_page_table_lock();
794 * if (!hmm_vma_range_done(range)) {
795 * device_page_table_unlock();
798 * device_commit_transaction(trans);
799 * device_page_table_unlock();
802 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
803 * device_page_table_lock();
804 * hmm_vma_range_done(range);
805 * device_update_page_table(range->pfns);
806 * device_page_table_unlock();
808 bool hmm_vma_range_done(struct hmm_range
*range
)
810 unsigned long npages
= (range
->end
- range
->start
) >> PAGE_SHIFT
;
813 if (range
->end
<= range
->start
) {
818 hmm
= hmm_register(range
->vma
->vm_mm
);
820 memset(range
->pfns
, 0, sizeof(*range
->pfns
) * npages
);
824 spin_lock(&hmm
->lock
);
825 list_del_rcu(&range
->list
);
826 spin_unlock(&hmm
->lock
);
830 EXPORT_SYMBOL(hmm_vma_range_done
);
833 * hmm_vma_fault() - try to fault some address in a virtual address range
834 * @range: range being faulted
835 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
836 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
838 * This is similar to a regular CPU page fault except that it will not trigger
839 * any memory migration if the memory being faulted is not accessible by CPUs.
841 * On error, for one virtual address in the range, the function will mark the
842 * corresponding HMM pfn entry with an error flag.
844 * Expected use pattern:
846 * down_read(&mm->mmap_sem);
847 * // Find vma and address device wants to fault, initialize hmm_pfn_t
848 * // array accordingly
849 * ret = hmm_vma_fault(range, write, block);
852 * hmm_vma_range_done(range);
853 * // You might want to rate limit or yield to play nicely, you may
854 * // also commit any valid pfn in the array assuming that you are
855 * // getting true from hmm_vma_range_monitor_end()
864 * up_read(&mm->mmap_sem)
867 * // Take device driver lock that serialize device page table update
868 * driver_lock_device_page_table_update();
869 * hmm_vma_range_done(range);
870 * // Commit pfns we got from hmm_vma_fault()
871 * driver_unlock_device_page_table_update();
872 * up_read(&mm->mmap_sem)
874 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
875 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
877 * YOU HAVE BEEN WARNED !
879 int hmm_vma_fault(struct hmm_range
*range
, bool block
)
881 struct vm_area_struct
*vma
= range
->vma
;
882 unsigned long start
= range
->start
;
883 struct hmm_vma_walk hmm_vma_walk
;
884 struct mm_walk mm_walk
;
888 /* Sanity check, this really should not happen ! */
889 if (range
->start
< vma
->vm_start
|| range
->start
>= vma
->vm_end
)
891 if (range
->end
< vma
->vm_start
|| range
->end
> vma
->vm_end
)
894 hmm
= hmm_register(vma
->vm_mm
);
896 hmm_pfns_clear(range
, range
->pfns
, range
->start
, range
->end
);
899 /* Caller must have registered a mirror using hmm_mirror_register() */
900 if (!hmm
->mmu_notifier
.ops
)
903 /* FIXME support hugetlb fs */
904 if (is_vm_hugetlb_page(vma
) || (vma
->vm_flags
& VM_SPECIAL
) ||
906 hmm_pfns_special(range
);
910 if (!(vma
->vm_flags
& VM_READ
)) {
912 * If vma do not allow read access, then assume that it does
913 * not allow write access, either. Architecture that allow
914 * write without read access are not supported by HMM, because
915 * operations such has atomic access would not work.
917 hmm_pfns_clear(range
, range
->pfns
, range
->start
, range
->end
);
921 /* Initialize range to track CPU page table update */
922 spin_lock(&hmm
->lock
);
924 list_add_rcu(&range
->list
, &hmm
->ranges
);
925 spin_unlock(&hmm
->lock
);
927 hmm_vma_walk
.fault
= true;
928 hmm_vma_walk
.block
= block
;
929 hmm_vma_walk
.range
= range
;
930 mm_walk
.private = &hmm_vma_walk
;
931 hmm_vma_walk
.last
= range
->start
;
934 mm_walk
.mm
= vma
->vm_mm
;
935 mm_walk
.pte_entry
= NULL
;
936 mm_walk
.test_walk
= NULL
;
937 mm_walk
.hugetlb_entry
= NULL
;
938 mm_walk
.pmd_entry
= hmm_vma_walk_pmd
;
939 mm_walk
.pte_hole
= hmm_vma_walk_hole
;
942 ret
= walk_page_range(start
, range
->end
, &mm_walk
);
943 start
= hmm_vma_walk
.last
;
944 } while (ret
== -EAGAIN
);
949 i
= (hmm_vma_walk
.last
- range
->start
) >> PAGE_SHIFT
;
950 hmm_pfns_clear(range
, &range
->pfns
[i
], hmm_vma_walk
.last
,
952 hmm_vma_range_done(range
);
956 EXPORT_SYMBOL(hmm_vma_fault
);
957 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
960 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
961 struct page
*hmm_vma_alloc_locked_page(struct vm_area_struct
*vma
,
966 page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
972 EXPORT_SYMBOL(hmm_vma_alloc_locked_page
);
975 static void hmm_devmem_ref_release(struct percpu_ref
*ref
)
977 struct hmm_devmem
*devmem
;
979 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
980 complete(&devmem
->completion
);
983 static void hmm_devmem_ref_exit(void *data
)
985 struct percpu_ref
*ref
= data
;
986 struct hmm_devmem
*devmem
;
988 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
989 wait_for_completion(&devmem
->completion
);
990 percpu_ref_exit(ref
);
993 static void hmm_devmem_ref_kill(struct percpu_ref
*ref
)
995 struct hmm_devmem
*devmem
;
997 devmem
= container_of(ref
, struct hmm_devmem
, ref
);
998 percpu_ref_kill(ref
);
1001 static int hmm_devmem_fault(struct vm_area_struct
*vma
,
1003 const struct page
*page
,
1007 struct hmm_devmem
*devmem
= page
->pgmap
->data
;
1009 return devmem
->ops
->fault(devmem
, vma
, addr
, page
, flags
, pmdp
);
1012 static void hmm_devmem_free(struct page
*page
, void *data
)
1014 struct hmm_devmem
*devmem
= data
;
1016 page
->mapping
= NULL
;
1018 devmem
->ops
->free(devmem
, page
);
1022 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1024 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1025 * @device: device struct to bind the resource too
1026 * @size: size in bytes of the device memory to add
1027 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1029 * This function first finds an empty range of physical address big enough to
1030 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1031 * in turn allocates struct pages. It does not do anything beyond that; all
1032 * events affecting the memory will go through the various callbacks provided
1033 * by hmm_devmem_ops struct.
1035 * Device driver should call this function during device initialization and
1036 * is then responsible of memory management. HMM only provides helpers.
1038 struct hmm_devmem
*hmm_devmem_add(const struct hmm_devmem_ops
*ops
,
1039 struct device
*device
,
1042 struct hmm_devmem
*devmem
;
1043 resource_size_t addr
;
1047 dev_pagemap_get_ops();
1049 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1051 return ERR_PTR(-ENOMEM
);
1053 init_completion(&devmem
->completion
);
1054 devmem
->pfn_first
= -1UL;
1055 devmem
->pfn_last
= -1UL;
1056 devmem
->resource
= NULL
;
1057 devmem
->device
= device
;
1060 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1063 return ERR_PTR(ret
);
1065 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
, &devmem
->ref
);
1067 return ERR_PTR(ret
);
1069 size
= ALIGN(size
, PA_SECTION_SIZE
);
1070 addr
= min((unsigned long)iomem_resource
.end
,
1071 (1UL << MAX_PHYSMEM_BITS
) - 1);
1072 addr
= addr
- size
+ 1UL;
1075 * FIXME add a new helper to quickly walk resource tree and find free
1078 * FIXME what about ioport_resource resource ?
1080 for (; addr
> size
&& addr
>= iomem_resource
.start
; addr
-= size
) {
1081 ret
= region_intersects(addr
, size
, 0, IORES_DESC_NONE
);
1082 if (ret
!= REGION_DISJOINT
)
1085 devmem
->resource
= devm_request_mem_region(device
, addr
, size
,
1087 if (!devmem
->resource
)
1088 return ERR_PTR(-ENOMEM
);
1091 if (!devmem
->resource
)
1092 return ERR_PTR(-ERANGE
);
1094 devmem
->resource
->desc
= IORES_DESC_DEVICE_PRIVATE_MEMORY
;
1095 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1096 devmem
->pfn_last
= devmem
->pfn_first
+
1097 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1099 devmem
->pagemap
.type
= MEMORY_DEVICE_PRIVATE
;
1100 devmem
->pagemap
.res
= *devmem
->resource
;
1101 devmem
->pagemap
.page_fault
= hmm_devmem_fault
;
1102 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1103 devmem
->pagemap
.altmap_valid
= false;
1104 devmem
->pagemap
.ref
= &devmem
->ref
;
1105 devmem
->pagemap
.data
= devmem
;
1106 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1108 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1113 EXPORT_SYMBOL_GPL(hmm_devmem_add
);
1115 struct hmm_devmem
*hmm_devmem_add_resource(const struct hmm_devmem_ops
*ops
,
1116 struct device
*device
,
1117 struct resource
*res
)
1119 struct hmm_devmem
*devmem
;
1123 if (res
->desc
!= IORES_DESC_DEVICE_PUBLIC_MEMORY
)
1124 return ERR_PTR(-EINVAL
);
1126 dev_pagemap_get_ops();
1128 devmem
= devm_kzalloc(device
, sizeof(*devmem
), GFP_KERNEL
);
1130 return ERR_PTR(-ENOMEM
);
1132 init_completion(&devmem
->completion
);
1133 devmem
->pfn_first
= -1UL;
1134 devmem
->pfn_last
= -1UL;
1135 devmem
->resource
= res
;
1136 devmem
->device
= device
;
1139 ret
= percpu_ref_init(&devmem
->ref
, &hmm_devmem_ref_release
,
1142 return ERR_PTR(ret
);
1144 ret
= devm_add_action_or_reset(device
, hmm_devmem_ref_exit
,
1147 return ERR_PTR(ret
);
1149 devmem
->pfn_first
= devmem
->resource
->start
>> PAGE_SHIFT
;
1150 devmem
->pfn_last
= devmem
->pfn_first
+
1151 (resource_size(devmem
->resource
) >> PAGE_SHIFT
);
1153 devmem
->pagemap
.type
= MEMORY_DEVICE_PUBLIC
;
1154 devmem
->pagemap
.res
= *devmem
->resource
;
1155 devmem
->pagemap
.page_fault
= hmm_devmem_fault
;
1156 devmem
->pagemap
.page_free
= hmm_devmem_free
;
1157 devmem
->pagemap
.altmap_valid
= false;
1158 devmem
->pagemap
.ref
= &devmem
->ref
;
1159 devmem
->pagemap
.data
= devmem
;
1160 devmem
->pagemap
.kill
= hmm_devmem_ref_kill
;
1162 result
= devm_memremap_pages(devmem
->device
, &devmem
->pagemap
);
1167 EXPORT_SYMBOL_GPL(hmm_devmem_add_resource
);
1170 * A device driver that wants to handle multiple devices memory through a
1171 * single fake device can use hmm_device to do so. This is purely a helper
1172 * and it is not needed to make use of any HMM functionality.
1174 #define HMM_DEVICE_MAX 256
1176 static DECLARE_BITMAP(hmm_device_mask
, HMM_DEVICE_MAX
);
1177 static DEFINE_SPINLOCK(hmm_device_lock
);
1178 static struct class *hmm_device_class
;
1179 static dev_t hmm_device_devt
;
1181 static void hmm_device_release(struct device
*device
)
1183 struct hmm_device
*hmm_device
;
1185 hmm_device
= container_of(device
, struct hmm_device
, device
);
1186 spin_lock(&hmm_device_lock
);
1187 clear_bit(hmm_device
->minor
, hmm_device_mask
);
1188 spin_unlock(&hmm_device_lock
);
1193 struct hmm_device
*hmm_device_new(void *drvdata
)
1195 struct hmm_device
*hmm_device
;
1197 hmm_device
= kzalloc(sizeof(*hmm_device
), GFP_KERNEL
);
1199 return ERR_PTR(-ENOMEM
);
1201 spin_lock(&hmm_device_lock
);
1202 hmm_device
->minor
= find_first_zero_bit(hmm_device_mask
, HMM_DEVICE_MAX
);
1203 if (hmm_device
->minor
>= HMM_DEVICE_MAX
) {
1204 spin_unlock(&hmm_device_lock
);
1206 return ERR_PTR(-EBUSY
);
1208 set_bit(hmm_device
->minor
, hmm_device_mask
);
1209 spin_unlock(&hmm_device_lock
);
1211 dev_set_name(&hmm_device
->device
, "hmm_device%d", hmm_device
->minor
);
1212 hmm_device
->device
.devt
= MKDEV(MAJOR(hmm_device_devt
),
1214 hmm_device
->device
.release
= hmm_device_release
;
1215 dev_set_drvdata(&hmm_device
->device
, drvdata
);
1216 hmm_device
->device
.class = hmm_device_class
;
1217 device_initialize(&hmm_device
->device
);
1221 EXPORT_SYMBOL(hmm_device_new
);
1223 void hmm_device_put(struct hmm_device
*hmm_device
)
1225 put_device(&hmm_device
->device
);
1227 EXPORT_SYMBOL(hmm_device_put
);
1229 static int __init
hmm_init(void)
1233 ret
= alloc_chrdev_region(&hmm_device_devt
, 0,
1239 hmm_device_class
= class_create(THIS_MODULE
, "hmm_device");
1240 if (IS_ERR(hmm_device_class
)) {
1241 unregister_chrdev_region(hmm_device_devt
, HMM_DEVICE_MAX
);
1242 return PTR_ERR(hmm_device_class
);
1247 device_initcall(hmm_init
);
1248 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */