From: Greg Kroah-Hartman Date: Wed, 24 Mar 2021 17:25:46 +0000 (+0100) Subject: 5.4-stable patches X-Git-Tag: v5.10.26~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=74b71abae9c744acf447ae703469bb58e514c9a4;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: hugetlbfs-hugetlb_fault_mutex_hash-cleanup.patch --- diff --git a/queue-5.4/hugetlbfs-hugetlb_fault_mutex_hash-cleanup.patch b/queue-5.4/hugetlbfs-hugetlb_fault_mutex_hash-cleanup.patch new file mode 100644 index 00000000000..b05510d00d8 --- /dev/null +++ b/queue-5.4/hugetlbfs-hugetlb_fault_mutex_hash-cleanup.patch @@ -0,0 +1,127 @@ +From 552546366a30d88bd1d6f5efe848b2ab50fd57e5 Mon Sep 17 00:00:00 2001 +From: Mike Kravetz +Date: Sat, 30 Nov 2019 17:56:30 -0800 +Subject: hugetlbfs: hugetlb_fault_mutex_hash() cleanup + +From: Mike Kravetz + +commit 552546366a30d88bd1d6f5efe848b2ab50fd57e5 upstream. + +A new clang diagnostic (-Wsizeof-array-div) warns about the calculation +to determine the number of u32's in an array of unsigned longs. +Suppress warning by adding parentheses. + +While looking at the above issue, noticed that the 'address' parameter +to hugetlb_fault_mutex_hash is no longer used. So, remove it from the +definition and all callers. + +No functional change. + +Link: http://lkml.kernel.org/r/20190919011847.18400-1-mike.kravetz@oracle.com +Signed-off-by: Mike Kravetz +Reported-by: Nathan Chancellor +Reviewed-by: Nathan Chancellor +Reviewed-by: Davidlohr Bueso +Reviewed-by: Andrew Morton +Cc: Nick Desaulniers +Cc: Ilie Halip +Cc: David Bolvansky +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + fs/hugetlbfs/inode.c | 4 ++-- + include/linux/hugetlb.h | 2 +- + mm/hugetlb.c | 10 +++++----- + mm/userfaultfd.c | 2 +- + 4 files changed, 9 insertions(+), 9 deletions(-) + +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -440,7 +440,7 @@ static void remove_inode_hugepages(struc + u32 hash; + + index = page->index; +- hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); ++ hash = hugetlb_fault_mutex_hash(h, mapping, index); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* +@@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct f + addr = index * hpage_size; + + /* mutex taken here, fault path and hole punch */ +- hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); ++ hash = hugetlb_fault_mutex_hash(h, mapping, index); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* See if already present in mapping to avoid alloc/free */ +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -106,7 +106,7 @@ void free_huge_page(struct page *page); + void hugetlb_fix_reserve_counts(struct inode *inode); + extern struct mutex *hugetlb_fault_mutex_table; + u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, +- pgoff_t idx, unsigned long address); ++ pgoff_t idx); + + pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4020,7 +4020,7 @@ retry: + * handling userfault. Reacquire after handling + * fault to make calling code simpler. + */ +- hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); ++ hash = hugetlb_fault_mutex_hash(h, mapping, idx); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + ret = handle_userfault(&vmf, VM_UFFD_MISSING); + mutex_lock(&hugetlb_fault_mutex_table[hash]); +@@ -4148,7 +4148,7 @@ backout_unlocked: + + #ifdef CONFIG_SMP + u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, +- pgoff_t idx, unsigned long address) ++ pgoff_t idx) + { + unsigned long key[2]; + u32 hash; +@@ -4156,7 +4156,7 @@ u32 hugetlb_fault_mutex_hash(struct hsta + key[0] = (unsigned long) mapping; + key[1] = idx; + +- hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); ++ hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); + + return hash & (num_fault_mutexes - 1); + } +@@ -4166,7 +4166,7 @@ u32 hugetlb_fault_mutex_hash(struct hsta + * return 0 and avoid the hashing overhead. + */ + u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, +- pgoff_t idx, unsigned long address) ++ pgoff_t idx) + { + return 0; + } +@@ -4210,7 +4210,7 @@ vm_fault_t hugetlb_fault(struct mm_struc + * get spurious allocation failures if two CPUs race to instantiate + * the same page in the page cache. + */ +- hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); ++ hash = hugetlb_fault_mutex_hash(h, mapping, idx); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + entry = huge_ptep_get(ptep); +--- a/mm/userfaultfd.c ++++ b/mm/userfaultfd.c +@@ -269,7 +269,7 @@ retry: + */ + idx = linear_page_index(dst_vma, dst_addr); + mapping = dst_vma->vm_file->f_mapping; +- hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); ++ hash = hugetlb_fault_mutex_hash(h, mapping, idx); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + err = -ENOMEM; diff --git a/queue-5.4/series b/queue-5.4/series new file mode 100644 index 00000000000..efabdb32370 --- /dev/null +++ b/queue-5.4/series @@ -0,0 +1 @@ +hugetlbfs-hugetlb_fault_mutex_hash-cleanup.patch