struct eventfd_ctx *ki_eventfd;
};
+struct aio_inode_info {
+ struct inode vfs_inode;
+ spinlock_t migrate_lock;
+ struct kioctx *ctx;
+};
+
+static inline struct aio_inode_info *AIO_I(struct inode *inode)
+{
+ return container_of(inode, struct aio_inode_info, vfs_inode);
+}
+
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
static unsigned long aio_nr; /* current system wide number of aio requests */
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
+static struct kmem_cache *aio_inode_cachep;
static struct vfsmount *aio_mnt;
{
struct file *file;
struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
+
if (IS_ERR(inode))
return ERR_CAST(inode);
inode->i_mapping->a_ops = &aio_ctx_aops;
- inode->i_mapping->i_private_data = ctx;
+ AIO_I(inode)->ctx = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
return file;
}
+static struct inode *aio_alloc_inode(struct super_block *sb)
+{
+ struct aio_inode_info *ai;
+
+ ai = alloc_inode_sb(sb, aio_inode_cachep, GFP_KERNEL);
+ if (!ai)
+ return NULL;
+ ai->ctx = NULL;
+
+ return &ai->vfs_inode;
+}
+
+static void aio_free_inode(struct inode *inode)
+{
+ kmem_cache_free(aio_inode_cachep, AIO_I(inode));
+}
+
+static const struct super_operations aio_super_operations = {
+ .alloc_inode = aio_alloc_inode,
+ .free_inode = aio_free_inode,
+ .statfs = simple_statfs,
+};
+
static int aio_init_fs_context(struct fs_context *fc)
{
- if (!init_pseudo(fc, AIO_RING_MAGIC))
+ struct pseudo_fs_context *pfc;
+
+ pfc = init_pseudo(fc, AIO_RING_MAGIC);
+ if (!pfc)
return -ENOMEM;
fc->s_iflags |= SB_I_NOEXEC;
+ pfc->ops = &aio_super_operations;
return 0;
}
+static void init_once(void *obj)
+{
+ struct aio_inode_info *ai = obj;
+
+ inode_init_once(&ai->vfs_inode);
+ spin_lock_init(&ai->migrate_lock);
+}
+
/* aio_setup
* Creates the slab caches used by the aio routines, panic on
* failure as this is done early during the boot sequence.
.init_fs_context = aio_init_fs_context,
.kill_sb = kill_anon_super,
};
+
+ aio_inode_cachep = kmem_cache_create("aio_inode_cache",
+ sizeof(struct aio_inode_info), 0,
+ (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT),
+ init_once);
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
static void put_aio_ring_file(struct kioctx *ctx)
{
struct file *aio_ring_file = ctx->aio_ring_file;
- struct address_space *i_mapping;
if (aio_ring_file) {
- truncate_setsize(file_inode(aio_ring_file), 0);
+ struct inode *inode = file_inode(aio_ring_file);
+
+ truncate_setsize(inode, 0);
/* Prevent further access to the kioctx from migratepages */
- i_mapping = aio_ring_file->f_mapping;
- spin_lock(&i_mapping->i_private_lock);
- i_mapping->i_private_data = NULL;
+ spin_lock(&AIO_I(inode)->migrate_lock);
+ AIO_I(inode)->ctx = NULL;
ctx->aio_ring_file = NULL;
- spin_unlock(&i_mapping->i_private_lock);
+ spin_unlock(&AIO_I(inode)->migrate_lock);
fput(aio_ring_file);
}
struct folio *src, enum migrate_mode mode)
{
struct kioctx *ctx;
+ struct aio_inode_info *ai = AIO_I(mapping->host);
unsigned long flags;
pgoff_t idx;
int rc = 0;
- /* mapping->i_private_lock here protects against the kioctx teardown. */
- spin_lock(&mapping->i_private_lock);
- ctx = mapping->i_private_data;
+ /* ai->migrate_lock here protects against the kioctx teardown. */
+ spin_lock(&ai->migrate_lock);
+ ctx = ai->ctx;
if (!ctx) {
rc = -EINVAL;
goto out;
out_unlock:
mutex_unlock(&ctx->ring_lock);
out:
- spin_unlock(&mapping->i_private_lock);
+ spin_unlock(&ai->migrate_lock);
return rc;
}
#else