* file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
* it. Same to PFNMAPs where there's neither page* nor pagecache.
*/
-#define THP_ORDERS_ALL_SPECIAL \
+#define THP_ORDERS_ALL_SPECIAL_DAX \
(BIT(PMD_ORDER) | BIT(PUD_ORDER))
#define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
* Mask of all large folio orders supported for THP.
*/
#define THP_ORDERS_ALL \
- (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
enum tva_type {
TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */
const void __user *usr_src,
bool allow_pagefault);
-/**
- * vma_is_special_huge - Are transhuge page-table entries considered special?
- * @vma: Pointer to the struct vm_area_struct to consider
- *
- * Whether transhuge page-table entries are considered "special" following
- * the definition in vm_normal_page().
- *
- * Return: true if transhuge page-table entries should be considered special,
- * false otherwise.
- */
-static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
-{
- return vma_is_dax(vma) || (vma->vm_file &&
- (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
-}
-
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if MAX_NUMNODES > 1
return !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
+/* If returns true, we are unable to access the VMA's folios. */
+static bool vma_is_special_huge(const struct vm_area_struct *vma)
+{
+ if (vma_is_dax(vma))
+ return false;
+ return vma_test_any(vma, VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT);
+}
+
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
vm_flags_t vm_flags,
enum tva_type type,
/* Check the intersection of requested and supported orders. */
if (vma_is_anonymous(vma))
supported_orders = THP_ORDERS_ALL_ANON;
- else if (vma_is_special_huge(vma))
- supported_orders = THP_ORDERS_ALL_SPECIAL;
+ else if (vma_is_dax(vma) || vma_is_special_huge(vma))
+ supported_orders = THP_ORDERS_ALL_SPECIAL_DAX;
else
supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
tlb->fullmm);
arch_check_zapped_pmd(vma, orig_pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
- if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
+ if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
arch_check_zapped_pud(vma, orig_pud);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
- if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
+ if (vma_is_special_huge(vma)) {
spin_unlock(ptl);
/* No zero page support yet */
} else {
*/
if (arch_needs_pgtable_deposit())
zap_deposited_table(mm, pmd);
- if (!vma_is_dax(vma) && vma_is_special_huge(vma))
+ if (vma_is_special_huge(vma))
return;
if (unlikely(pmd_is_migration_entry(old_pmd))) {
const softleaf_t old_entry = softleaf_from_pmd(old_pmd);
static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
{
- return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
- is_vm_hugetlb_page(vma);
+ if (vma_is_dax(vma))
+ return true;
+ if (vma_is_special_huge(vma))
+ return true;
+ if (vma_test(vma, VMA_IO_BIT))
+ return true;
+ if (is_vm_hugetlb_page(vma))
+ return true;
+
+ return false;
}
static int split_huge_pages_pid(int pid, unsigned long vaddr_start,