struct fuse_pqueue *fpq = &fud->pq;
LIST_HEAD(to_end);
unsigned int i;
+ bool last;
spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
fuse_dev_end_requests(&to_end);
+ spin_lock(&fc->lock);
+ list_del(&fud->entry);
/* Are we the last open device? */
- if (atomic_dec_and_test(&fc->dev_count)) {
+ last = list_empty(&fc->devices);
+ spin_unlock(&fc->lock);
+
+ if (last) {
WARN_ON(fc->iq.fasync != NULL);
fuse_abort_conn(fc);
}
- spin_lock(&fc->lock);
- list_del(&fud->entry);
- spin_unlock(&fc->lock);
fuse_conn_put(fc);
}
fuse_dev_put(fud);
return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
}
-static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
-{
- struct fuse_dev *new_fud = fuse_file_to_fud(new);
-
- if (fuse_dev_fc_get(new_fud))
- return -EINVAL;
-
- fuse_dev_install(new_fud, fc);
- atomic_inc(&fc->dev_count);
-
- return 0;
-}
-
static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
{
int oldfd;
- struct fuse_dev *fud;
+ struct fuse_dev *fud, *new_fud;
if (get_user(oldfd, argp))
return -EFAULT;
if (IS_ERR(fud))
return PTR_ERR(fud);
- guard(mutex)(&fuse_mutex);
- return fuse_device_clone(fud->fc, file);
+ new_fud = fuse_file_to_fud(file);
+ if (fuse_dev_fc_get(new_fud))
+ return -EINVAL;
+
+ fuse_dev_install(new_fud, fud->fc);
+
+ return 0;
}
static long fuse_dev_ioctl_backing_open(struct file *file,
spin_lock_init(&fc->bg_lock);
init_rwsem(&fc->killsb);
refcount_set(&fc->count, 1);
- atomic_set(&fc->dev_count, 1);
atomic_set(&fc->epoch, 1);
INIT_WORK(&fc->epoch_work, fuse_epoch_work);
init_waitqueue_head(&fc->blocked_waitq);