From: Matthew Wilcox (Oracle) Date: Wed, 6 Jul 2022 18:51:00 +0000 (-0400) Subject: XArray: Add calls to might_alloc() X-Git-Tag: v5.10.241~353 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=1e9a17bce9c3c04afefb3335b3811431a253c59f;p=thirdparty%2Fkernel%2Fstable.git XArray: Add calls to might_alloc() [ Upstream commit 1dd685c414a7b9fdb3d23aca3aedae84f0b998ae ] Catch bogus GFP flags deterministically, instead of occasionally when we actually have to allocate memory. Reported-by: Nikolay Borisov Signed-off-by: Matthew Wilcox (Oracle) Stable-dep-of: 99765233ab42 ("NFS: Fixup allocation flags for nfsiod's __GFP_NORETRY") Signed-off-by: Sasha Levin --- diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 92c0160b33523..05c025c5c100d 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -583,6 +584,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_bh(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -609,6 +611,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_irq(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -684,6 +687,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock(xa); @@ -711,6 +715,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_bh(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_bh(xa); @@ -738,6 +743,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_irq(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_irq(xa); @@ -767,6 +773,7 @@ static inline int __must_check xa_insert(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock(xa); @@ -796,6 +803,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -825,6 +833,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -854,6 +863,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); @@ -883,6 +893,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); @@ -912,6 +923,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); @@ -945,6 +957,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock(xa); @@ -978,6 +991,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_bh(xa); @@ -1011,6 +1025,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_irq(xa); diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h index c8d9f19c1f357..967294b8edcfc 100644 --- a/tools/include/linux/sched/mm.h +++ b/tools/include/linux/sched/mm.h @@ -1,4 +1,6 @@ #ifndef _TOOLS_PERF_LINUX_SCHED_MM_H #define _TOOLS_PERF_LINUX_SCHED_MM_H +#define might_alloc(gfp) do { } while (0) + #endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */