From: Linus Torvalds Date: Wed, 24 Jul 2024 17:29:50 +0000 (-0700) Subject: Merge tag 'random-6.11-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Tag: v6.11-rc1~55 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7a3fad30fd8b4b5e370906b3c554f64026f56c2f;p=thirdparty%2Fkernel%2Flinux.git Merge tag 'random-6.11-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random Pull random number generator updates from Jason Donenfeld: "This adds getrandom() support to the vDSO. First, it adds a new kind of mapping to mmap(2), MAP_DROPPABLE, which lets the kernel zero out pages anytime under memory pressure, which enables allocating memory that never gets swapped to disk but also doesn't count as being mlocked. Then, the vDSO implementation of getrandom() is introduced in a generic manner and hooked into random.c. Next, this is implemented on x86. (Also, though it's not ready for this pull, somebody has begun an arm64 implementation already) Finally, two vDSO selftests are added. There are also two housekeeping cleanup commits" * tag 'random-6.11-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: MAINTAINERS: add random.h headers to RNG subsection random: note that RNDGETPOOL was removed in 2.6.9-rc2 selftests/vDSO: add tests for vgetrandom x86: vdso: Wire up getrandom() vDSO implementation random: introduce generic vDSO getrandom() implementation mm: add MAP_DROPPABLE for designating always lazily freeable mappings --- 7a3fad30fd8b4b5e370906b3c554f64026f56c2f diff --cc mm/mempolicy.c index 327a19b0883db,32291ab259609..b858e22b259d8 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@@ -2303,12 -2298,16 +2303,15 @@@ struct folio *vma_alloc_folio_noprof(gf { struct mempolicy *pol; pgoff_t ilx; - struct page *page; + struct folio *folio; + if (vma->vm_flags & VM_DROPPABLE) + gfp |= __GFP_NOWARN; + pol = get_vma_policy(vma, addr, order, &ilx); - page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order, - pol, ilx, numa_node_id()); + folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id()); mpol_cond_put(pol); - return page_rmappable_folio(page); + return folio; } EXPORT_SYMBOL(vma_alloc_folio_noprof); diff --cc mm/rmap.c index 8616308610b9f,1f9b5a9cb121c..2490e727e2dcb --- a/mm/rmap.c +++ b/mm/rmap.c @@@ -1394,27 -1384,26 +1394,31 @@@ void folio_add_anon_rmap_pmd(struct fol * * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. - * The folio does not have to be locked. + * The folio doesn't necessarily need to be locked while it's exclusive + * unless two threads map it concurrently. However, the folio must be + * locked if it's shared. * - * If the folio is pmd-mappable, it is accounted as a THP. As the folio - * is new, it's assumed to be mapped exclusively by a single process. + * If the folio is pmd-mappable, it is accounted as a THP. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, - unsigned long address) + unsigned long address, rmap_t flags) { - int nr = folio_nr_pages(folio); + const int nr = folio_nr_pages(folio); + const bool exclusive = flags & RMAP_EXCLUSIVE; + int nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); + - if (!folio_test_swapbacked(folio)) + /* + * VM_DROPPABLE mappings don't swap; instead they're just dropped when + * under memory pressure. + */ - if (!(vma->vm_flags & VM_DROPPABLE)) ++ if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) __folio_set_swapbacked(folio); - __folio_set_anon(folio, vma, address, true); + __folio_set_anon(folio, vma, address, exclusive); if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ @@@ -1858,8 -1862,15 +1868,13 @@@ static bool try_to_unmap_one(struct fol * discarded. Remap the page to page table. */ set_pte_at(mm, address, pvmw.pte, pteval); - folio_set_swapbacked(folio); + /* + * Unlike MADV_FREE mappings, VM_DROPPABLE ones + * never get swap backed on failure to drop. + */ + if (!(vma->vm_flags & VM_DROPPABLE)) + folio_set_swapbacked(folio); - ret = false; - page_vma_mapped_walk_done(&pvmw); - break; + goto walk_abort; } if (swap_duplicate(entry) < 0) { diff --cc tools/testing/selftests/mm/Makefile index e1aa09ddaa3da,e3e5740e13e15..901e0d07765b6 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@@ -75,7 -73,7 +75,8 @@@ TEST_GEN_FILES += ksm_functional_test TEST_GEN_FILES += mdwe_test TEST_GEN_FILES += hugetlb_fault_after_madv TEST_GEN_FILES += hugetlb_madv_vs_map +TEST_GEN_FILES += hugetlb_dio + TEST_GEN_FILES += droppable ifneq ($(ARCH),arm64) TEST_GEN_FILES += soft-dirty