#include <linux/sizes.h>
#include <linux/hugetlb.h>
#include <linux/highmem.h>
+#include <linux/fs_struct.h>
#include <uapi/linux/io_uring.h>
u32 result;
u32 sequence;
+ struct fs_struct *fs;
+
struct work_struct work;
};
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->result = 0;
+ req->fs = NULL;
return req;
out:
percpu_ref_put(&ctx->refs);
ret = -EINTR;
}
+ if (req->fs) {
+ struct fs_struct *fs = req->fs;
+
+ spin_lock(&req->fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ }
io_cqring_add_event(req->ctx, sqe->user_data, ret);
io_put_req(req);
return 0;
static void io_sq_wq_submit_work(struct work_struct *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct fs_struct *old_fs_struct = current->fs;
struct io_ring_ctx *ctx = req->ctx;
struct mm_struct *cur_mm = NULL;
struct async_list *async_list;
/* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
+ if (req->fs != current->fs && current->fs != old_fs_struct) {
+ task_lock(current);
+ if (req->fs)
+ current->fs = req->fs;
+ else
+ current->fs = old_fs_struct;
+ task_unlock(current);
+ }
+
ret = 0;
if (io_sqe_needs_user(sqe) && !cur_mm) {
if (!mmget_not_zero(ctx->sqo_mm)) {
mmput(cur_mm);
}
revert_creds(old_cred);
+ if (old_fs_struct) {
+ task_lock(current);
+ current->fs = old_fs_struct;
+ task_unlock(current);
+ }
}
/*
req->user_data = s->sqe->user_data;
+#if defined(CONFIG_NET)
+ switch (READ_ONCE(s->sqe->opcode)) {
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ spin_lock(¤t->fs->lock);
+ if (!current->fs->in_exec) {
+ req->fs = current->fs;
+ req->fs->users++;
+ }
+ spin_unlock(¤t->fs->lock);
+ if (!req->fs) {
+ ret = -EAGAIN;
+ goto err_req;
+ }
+ }
+#endif
+
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but