]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - include/linux/mm.h
mm: add functions to track page directory modifications
[thirdparty/linux.git] / include / linux / mm.h
index 5a323422d783d076c01b41b2a9a1f4bbd7d1a6a5..fda41eb7f1c8bcd2513a3943e2dbf546fa7be946 100644 (file)
@@ -782,6 +782,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
 
 extern void kvfree(const void *addr);
 
+/*
+ * Mapcount of compound page as a whole, does not include mapped sub-pages.
+ *
+ * Must be called only for compound pages or any their tail sub-pages.
+ */
 static inline int compound_mapcount(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageCompound(page), page);
@@ -801,10 +806,16 @@ static inline void page_mapcount_reset(struct page *page)
 
 int __page_mapcount(struct page *page);
 
+/*
+ * Mapcount of 0-order page; when compound sub-page, includes
+ * compound_mapcount().
+ *
+ * Result is undefined for pages which cannot be mapped into userspace.
+ * For example SLAB or special types of pages. See function page_has_type().
+ * They use this place in struct page differently.
+ */
 static inline int page_mapcount(struct page *page)
 {
-       VM_BUG_ON_PAGE(PageSlab(page), page);
-
        if (unlikely(PageCompound(page)))
                return __page_mapcount(page);
        return atomic_read(&page->_mapcount) + 1;
@@ -1702,6 +1713,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    unsigned int gup_flags, struct page **pages, int *locked);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    struct page **pages, unsigned int gup_flags);
+long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+                   struct page **pages, unsigned int gup_flags);
 
 int get_user_pages_fast(unsigned long start, int nr_pages,
                        unsigned int gup_flags, struct page **pages);
@@ -2078,13 +2091,54 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
        return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
                NULL : pud_offset(p4d, address);
 }
+
+static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
+                                    unsigned long address,
+                                    pgtbl_mod_mask *mod_mask)
+
+{
+       if (unlikely(pgd_none(*pgd))) {
+               if (__p4d_alloc(mm, pgd, address))
+                       return NULL;
+               *mod_mask |= PGTBL_PGD_MODIFIED;
+       }
+
+       return p4d_offset(pgd, address);
+}
+
 #endif /* !__ARCH_HAS_5LEVEL_HACK */
 
+static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
+                                    unsigned long address,
+                                    pgtbl_mod_mask *mod_mask)
+{
+       if (unlikely(p4d_none(*p4d))) {
+               if (__pud_alloc(mm, p4d, address))
+                       return NULL;
+               *mod_mask |= PGTBL_P4D_MODIFIED;
+       }
+
+       return pud_offset(p4d, address);
+}
+
 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
        return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
                NULL: pmd_offset(pud, address);
 }
+
+static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud,
+                                    unsigned long address,
+                                    pgtbl_mod_mask *mod_mask)
+{
+       if (unlikely(pud_none(*pud))) {
+               if (__pmd_alloc(mm, pud, address))
+                       return NULL;
+               *mod_mask |= PGTBL_PUD_MODIFIED;
+       }
+
+       return pmd_offset(pud, address);
+}
 #endif /* CONFIG_MMU */
 
 #if USE_SPLIT_PTE_PTLOCKS
@@ -2200,6 +2254,11 @@ static inline void pgtable_pte_page_dtor(struct page *page)
        ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
                NULL: pte_offset_kernel(pmd, address))
 
+#define pte_alloc_kernel_track(pmd, address, mask)                     \
+       ((unlikely(pmd_none(*(pmd))) &&                                 \
+         (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\
+               NULL: pte_offset_kernel(pmd, address))
+
 #if USE_SPLIT_PMD_PTLOCKS
 
 static struct page *pmd_to_page(pmd_t *pmd)
@@ -2601,25 +2660,6 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
 int __must_check write_one_page(struct page *page);
 void task_dirty_inc(struct task_struct *tsk);
 
-/* readahead.c */
-#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
-
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
-                       pgoff_t offset, unsigned long nr_to_read);
-
-void page_cache_sync_readahead(struct address_space *mapping,
-                              struct file_ra_state *ra,
-                              struct file *filp,
-                              pgoff_t offset,
-                              unsigned long size);
-
-void page_cache_async_readahead(struct address_space *mapping,
-                               struct file_ra_state *ra,
-                               struct file *filp,
-                               struct page *pg,
-                               pgoff_t offset,
-                               unsigned long size);
-
 extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);