return recv_res(rr_send(rr, *rlen), rr, rbuf, rlen);
}
-static void inode_acq(const struct f3_inode *inode)
+static pthread_mutex_t *inode_acq(const struct f3_inode *inode)
{
+ /* TODO: consider a hash function (e.g hash64shift) here */
pthread_mutex_t *mtx = &mutexes[inode->vid & MUTEX_MASK].mtx;
int ret = pthread_mutex_lock(mtx);
assert(ret == 0);
+ return mtx;
}
-static void inode_rel(const struct f3_inode *inode)
+static void inode_rel(pthread_mutex_t *mtx)
{
- pthread_mutex_t *mtx = &mutexes[inode->vid & MUTEX_MASK].mtx;
int ret = pthread_mutex_unlock(mtx);
assert(ret == 0);
}
}
/* n.b. @fi is always NULL in current (3.10.x) libfuse */
if (!far.err) {
- inode_acq(inode);
+ pthread_mutex_t *mtx = inode_acq(inode);
merge_rw_inode(&far, inode);
- inode_rel(inode);
+ inode_rel(mtx);
}
far.err ? fuse_reply_err(req, far.err)
: fuse_reply_attr(req, &far.sb, f3.attr_timeout);
return err;
}
-static int upgrade_rw(fuse_req_t req, struct f3_inode *inode)
+static int
+upgrade_rw(fuse_req_t req, struct f3_inode *inode, pthread_mutex_t *mtx)
{
- /* inode must be locked */
+ assert(mtx);
if (inode->rw == F3_RDONLY) { /* this doesn't touch SQLite */
struct f3_req_res rr;
int err = ro_init(&rr, inode->fd);
if (!err)
err = replace_fd(inode, rr.sk[0]);
if (err) {
- inode_rel(inode);
+ inode_rel(mtx);
fuse_reply_err(req, err);
return err;
}
fprintf(rr.wfp, "%cctime=%"PRId64,
0, ts2ms(&sb->st_ctim));
- inode_acq(inode);
+ pthread_mutex_t *mtx = inode_acq(inode);
rr.send_fd = fl & FUSE_SET_ATTR_SIZE ? inode->fd : -1;
far.err = rr_do(&rr, &far, &rlen);
if (!far.err && rr.send_fd >= 0) {
far.err = replace_fd(inode, rr.sk[0]);
inode->rw = F3_DIRTY;
}
- inode_rel(inode);
+ inode_rel(mtx);
far.err ? fuse_reply_err(req, far.err)
: fuse_reply_attr(req, &far.sb, f3.attr_timeout);
struct f3_inode *cur_inode;
cur_inode = caa_container_of(cur, struct f3_inode, nd);
- inode_acq(cur_inode);
+ pthread_mutex_t *mtx = inode_acq(cur_inode);
if (cur_inode->refcount) {
cur_inode->refcount++;
if (fi)
to_free = inode;
inode = cur_inode;
}
- inode_rel(cur_inode);
+ inode_rel(mtx);
if (!to_free) /* existing entry was invalid, replace it */
(void)cds_lfht_add_replace(f3.vid2inode, hash, vid_eq,
inode, &inode->nd);
static int fsync_inode(struct f3_inode *inode)
{
int err = 0;
+ pthread_mutex_t *mtx = inode_acq(inode);
- inode_acq(inode);
if (inode->rw == F3_DIRTY) {
struct f3_req_res rr;
size_t rlen = sizeof(err);
err = replace_fd(inode, rr.sk[0]);
}
}
- inode_rel(inode);
+ inode_rel(mtx);
return err;
}
static uint64_t unref_inode(struct f3_inode *inode, uint64_t n)
{
- int64_t rc;
+ pthread_mutex_t *mtx = inode_acq(inode);
+ int rc = --inode->refcount;
- inode_acq(inode);
- rc = --inode->refcount;
- inode_rel(inode);
+ inode_rel(mtx);
if (rc) return rc;
rcu_read_lock();
fuse_reply_err(req, far.err);
} else {
uint64_t n;
+ pthread_mutex_t *mtx = inode_acq(inode);
- inode_acq(inode);
n = ++inode->refcount;
merge_rw_inode(&far, inode);
- inode_rel(inode);
+ inode_rel(mtx);
assert(n > 1);
e.attr = far.sb;
e.ino = (uintptr_t)inode;
ssize_t n;
int src_fd = (int)fi_in->fh;
int dst_fd = (int)fi_out->fh;
+ pthread_mutex_t *mtx = inode_acq(dst);
- inode_acq(dst);
- if (upgrade_rw(req, dst)) return;
+ if (upgrade_rw(req, dst, mtx)) return;
n = copy_file_range(src_fd, &off_in, dst_fd, &off_out, len, flags);
dst->rw = F3_DIRTY;
- inode_rel(dst);
+ inode_rel(mtx);
n < 0 ? fuse_reply_err(req, errno) : fuse_reply_write(req, (size_t)n);
}
* We always a read-only handle for non-empty files, upgrade_rw
* happens lazily on first write. Empty files are always R/W.
*/
- inode_acq(inode);
+ pthread_mutex_t *mtx = inode_acq(inode);
if (inode->fd >= 0) {
fi->fh = (uint64_t)inode->fd;
} else {
}
}
}
- inode_rel(inode);
+ inode_rel(mtx);
err ? fuse_reply_err(req, err) : fuse_reply_open(req, fi);
}
{
struct f3_inode *inode = f3_inode(ino);
int err = 0;
+ pthread_mutex_t *mtx = inode_acq(inode);
- inode_acq(inode);
if (inode->rw == F3_DIRTY) {
struct f3_req_res rr;
size_t rlen = sizeof(err);
err = replace_fd(inode, rr.sk[0]);
}
}
- inode_rel(inode);
+ inode_rel(mtx);
fuse_reply_err(req, err);
}
out_buf.buf[0].fd = (int)fi->fh;
out_buf.buf[0].pos = off;
- inode_acq(inode);
- if (upgrade_rw(req, inode))
+ pthread_mutex_t *mtx = inode_acq(inode);
+ if (upgrade_rw(req, inode, mtx))
return;
n = fuse_buf_copy(&out_buf, in_buf, FUSE_BUF_SPLICE_MOVE);
inode->rw = F3_DIRTY;
- inode_rel(inode);
+ inode_rel(mtx);
n < 0 ? fuse_reply_err(req, -n) : fuse_reply_write(req, (size_t)n);
}
{
struct f3_inode *inode = f3_inode(ino);
int err = 0;
+ pthread_mutex_t *mtx = inode_acq(inode);
- inode_acq(inode);
- if (upgrade_rw(req, inode))
+ if (upgrade_rw(req, inode, mtx))
return;
if (fallocate((int)fi->fh, mode, offset, length))
err = errno;
inode->rw = F3_DIRTY;
- inode_rel(inode);
+ inode_rel(mtx);
fuse_reply_err(req, err);
}