Opening and closing an inode dirties the ->i_readcount field.
Depending on the alignment of the inode, it may happen to false-share
with other fields loaded both for both operations to various extent.
This notably concerns the ->i_flctx field.
Since most inodes don't have the field populated, this bit can be managed
with a flag in ->i_opflags instead which bypasses the problem.
Here are results I obtained while opening a file read-only in a loop
with 24 cores doing the work on Sapphire Rapids. Utilizing the flag as
opposed to reading ->i_flctx field was toggled at runtime as the benchmark
was running, to make sure both results come from the same alignment.
before:
3233740
after:
3373346 (+4%)
before:
3284313
after:
3518711 (+7%)
before:
3505545
after:
4092806 (+16%)
Or to put it differently, this varies wildly depending on how (un)lucky
you get.
The primary bottleneck before and after is the avoidable lockref trip in
do_dentry_open().
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://patch.msgid.link/20251203094837.290654-2-mjguzik@gmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
{
struct file_lock_context *ctx;
- /* paired with cmpxchg() below */
ctx = locks_inode_context(inode);
if (likely(ctx) || type == F_UNLCK)
goto out;
* Assign the pointer if it's not already assigned. If it is, then
* free the context we just allocated.
*/
- if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_opflags & IOP_FLCTX)) {
+ VFS_BUG_ON_INODE(inode->i_flctx, inode);
+ WRITE_ONCE(inode->i_flctx, ctx);
+ /*
+ * Paired with locks_inode_context().
+ */
+ smp_store_release(&inode->i_opflags, inode->i_opflags | IOP_FLCTX);
+ spin_unlock(&inode->i_lock);
+ } else {
+ VFS_BUG_ON_INODE(!inode->i_flctx, inode);
+ spin_unlock(&inode->i_lock);
kmem_cache_free(flctx_cache, ctx);
ctx = locks_inode_context(inode);
}
locks_inode_context(const struct inode *inode)
{
/*
- * Paired with the fence in locks_get_lock_context().
+ * Paired with smp_store_release in locks_get_lock_context().
+ *
+ * Ensures ->i_flctx will be visible if we spotted the flag.
*/
+ if (likely(!(smp_load_acquire(&inode->i_opflags) & IOP_FLCTX)))
+ return NULL;
return READ_ONCE(inode->i_flctx);
}
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
* could end up racing with tasks trying to set a new lease on this
* file.
*/
- flctx = READ_ONCE(inode->i_flctx);
+ flctx = locks_inode_context(inode);
if (!flctx)
return 0;
smp_mb();
static inline int break_layout(struct inode *inode, bool wait)
{
+ struct file_lock_context *flctx;
+
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ flctx = locks_inode_context(inode);
+ if (flctx && !list_empty_careful(&flctx->flc_lease)) {
unsigned int flags = LEASE_BREAK_LAYOUT;
if (!wait)
#define IOP_MGTIME 0x0020
#define IOP_CACHED_LINK 0x0040
#define IOP_FASTPERM_MAY_EXEC 0x0080
+#define IOP_FLCTX 0x0100
/*
* Inode state bits. Protected by inode->i_lock