--- /dev/null
+From stable+bounces-241811-greg=kroah.com@vger.kernel.org Wed Apr 29 06:01:09 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Apr 2026 00:00:27 -0400
+Subject: mm: prevent droppable mappings from being locked
+To: stable@vger.kernel.org
+Cc: Anthony Yznaga <anthony.yznaga@oracle.com>, David Hildenbrand <david@kernel.org>, Pedro Falcato <pfalcato@suse.de>, "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>, Jann Horn <jannh@google.com>, "Jason A. Donenfeld" <jason@zx2c4.com>, Liam Howlett <liam.howlett@oracle.com>, Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>, Shuah Khan <shuah@kernel.org>, Suren Baghdasaryan <surenb@google.com>, Vlastimil Babka <vbabka@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260429040027.3341979-1-sashal@kernel.org>
+
+From: Anthony Yznaga <anthony.yznaga@oracle.com>
+
+[ Upstream commit d239462787b072c78eb19fc1f155c3d411256282 ]
+
+Droppable mappings must not be lockable. There is a check for VMAs with
+VM_DROPPABLE set in mlock_fixup() along with checks for other types of
+unlockable VMAs which ensures this when calling mlock()/mlock2().
+
+For mlockall(MCL_FUTURE), the check for unlockable VMAs is different. In
+apply_mlockall_flags(), if the flags parameter has MCL_FUTURE set, the
+current task's mm's default VMA flag field mm->def_flags has VM_LOCKED
+applied to it. VM_LOCKONFAULT is also applied if MCL_ONFAULT is also set.
+When these flags are set as default in this manner they are cleared in
+__mmap_complete() for new mappings that do not support mlock. A check for
+VM_DROPPABLE in __mmap_complete() is missing resulting in droppable
+mappings created with VM_LOCKED set. To fix this and reduce that chance
+of similar bugs in the future, introduce and use vma_supports_mlock().
+
+Link: https://lkml.kernel.org/r/20260310155821.17869-1-anthony.yznaga@oracle.com
+Fixes: 9651fcedf7b9 ("mm: add MAP_DROPPABLE for designating always lazily freeable mappings")
+Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
+Suggested-by: David Hildenbrand <david@kernel.org>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Reviewed-by: Pedro Falcato <pfalcato@suse.de>
+Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Tested-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Jason A. Donenfeld <jason@zx2c4.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ adapted change to `mm/mmap.c::__mmap_region()` instead of `mm/vma.c::__mmap_complete()` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hugetlb_inline.h | 4 ++--
+ mm/internal.h | 10 ++++++++++
+ mm/mlock.c | 10 ++++++----
+ mm/mmap.c | 4 +---
+ 4 files changed, 19 insertions(+), 9 deletions(-)
+
+--- a/include/linux/hugetlb_inline.h
++++ b/include/linux/hugetlb_inline.h
+@@ -6,14 +6,14 @@
+
+ #include <linux/mm.h>
+
+-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
++static inline bool is_vm_hugetlb_page(const struct vm_area_struct *vma)
+ {
+ return !!(vma->vm_flags & VM_HUGETLB);
+ }
+
+ #else
+
+-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
++static inline bool is_vm_hugetlb_page(const struct vm_area_struct *vma)
+ {
+ return false;
+ }
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -1015,6 +1015,16 @@ static inline struct file *maybe_unlock_
+ }
+ return fpin;
+ }
++
++static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
++{
++ if (vma->vm_flags & (VM_SPECIAL | VM_DROPPABLE))
++ return false;
++ if (vma_is_dax(vma) || is_vm_hugetlb_page(vma))
++ return false;
++ return vma != get_gate_vma(current->mm);
++}
++
+ #else /* !CONFIG_MMU */
+ static inline void unmap_mapping_folio(struct folio *folio) { }
+ static inline void mlock_new_folio(struct folio *folio) { }
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -472,10 +472,12 @@ static int mlock_fixup(struct vma_iterat
+ int ret = 0;
+ vm_flags_t oldflags = vma->vm_flags;
+
+- if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
+- is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
+- vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
+- /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
++ if (newflags == oldflags || vma_is_secretmem(vma) ||
++ !vma_supports_mlock(vma))
++ /*
++ * Don't set VM_LOCKED or VM_LOCKONFAULT and don't count.
++ * For secretmem, don't allow the memory to be unlocked.
++ */
+ goto out;
+
+ vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1547,9 +1547,7 @@ expanded:
+
+ vm_stat_account(mm, vm_flags, pglen);
+ if (vm_flags & VM_LOCKED) {
+- if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
+- is_vm_hugetlb_page(vma) ||
+- vma == get_gate_vma(current->mm))
++ if (!vma_supports_mlock(vma))
+ vm_flags_clear(vma, VM_LOCKED_MASK);
+ else
+ mm->locked_vm += pglen;