int folio_split_unmapped(struct folio *folio, unsigned int new_order);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
-bool folio_split_supported(struct folio *folio, unsigned int new_order,
- enum split_type split_type, bool warns);
+int folio_check_splittable(struct folio *folio, unsigned int new_order,
+ enum split_type split_type);
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
struct list_head *list);
static inline int try_folio_split_to_order(struct folio *folio,
struct page *page, unsigned int new_order)
{
- if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
+ if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM))
return split_huge_page_to_order(&folio->page, new_order);
return folio_split(folio, new_order, page, NULL);
}
return 0;
}
-bool folio_split_supported(struct folio *folio, unsigned int new_order,
- enum split_type split_type, bool warns)
+/**
+ * folio_check_splittable() - check if a folio can be split to a given order
+ * @folio: folio to be split
+ * @new_order: the smallest order of the after split folios (since buddy
+ * allocator like split generates folios with orders from @folio's
+ * order - 1 to new_order).
+ * @split_type: uniform or non-uniform split
+ *
+ * folio_check_splittable() checks if @folio can be split to @new_order using
+ * @split_type method. The truncated folio check must come first.
+ *
+ * Context: folio must be locked.
+ *
+ * Return: 0 - @folio can be split to @new_order, otherwise an error number is
+ * returned.
+ */
+int folio_check_splittable(struct folio *folio, unsigned int new_order,
+ enum split_type split_type)
{
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ /*
+ * Folios that just got truncated cannot get split. Signal to the
+ * caller that there was a race.
+ *
+ * TODO: this will also currently refuse folios without a mapping in the
+ * swapcache (shmem or to-be-anon folios).
+ */
+ if (!folio->mapping && !folio_test_anon(folio))
+ return -EBUSY;
+
if (folio_test_anon(folio)) {
/* order-1 is not supported for anonymous THP. */
- VM_WARN_ONCE(warns && new_order == 1,
- "Cannot split to order-1 folio");
if (new_order == 1)
- return false;
+ return -EINVAL;
} else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
!mapping_large_folio_support(folio->mapping)) {
* case, the mapping does not actually support large
* folios properly.
*/
- VM_WARN_ONCE(warns,
- "Cannot split file folio to non-0 order");
- return false;
+ return -EINVAL;
}
}
* here.
*/
if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
- VM_WARN_ONCE(warns,
- "Cannot split swapcache folio to non-0 order");
- return false;
+ return -EINVAL;
}
- return true;
+ if (is_huge_zero_folio(folio))
+ return -EINVAL;
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+
+ return 0;
}
static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
int remap_flags = 0;
int extra_pins, ret;
pgoff_t end = 0;
- bool is_hzp;
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
return -EINVAL;
- /*
- * Folios that just got truncated cannot get split. Signal to the
- * caller that there was a race.
- *
- * TODO: this will also currently refuse shmem folios that are in the
- * swapcache.
- */
- if (!is_anon && !folio->mapping)
- return -EBUSY;
-
if (new_order >= old_order)
return -EINVAL;
- if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true))
- return -EINVAL;
-
- is_hzp = is_huge_zero_folio(folio);
- if (is_hzp) {
- pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
- return -EBUSY;
+ ret = folio_check_splittable(folio, new_order, split_type);
+ if (ret) {
+ VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio");
+ return ret;
}
- if (folio_test_writeback(folio))
- return -EBUSY;
-
if (is_anon) {
/*
* The caller does not necessarily hold an mmap_lock that would