static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
{
- if (!has_file_id(fp->volatile_id))
- return;
-
down_write(&fp->f_ci->m_lock);
list_del_init(&fp->node);
up_write(&fp->f_ci->m_lock);
+ if (!has_file_id(fp->volatile_id))
+ return;
+
write_lock(&ft->lock);
idr_remove(ft->idr, fp->volatile_id);
write_unlock(&ft->lock);
return ERR_PTR(ret);
}
-void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
- unsigned int state)
+/**
+ * ksmbd_update_fstate() - update an fp state under the file-table lock
+ * @ft: file table that publishes @fp's volatile id
+ * @fp: file pointer to update
+ * @state: new state
+ *
+ * Return: 0 on success. The FP_NEW -> FP_INITED transition is special:
+ * -ENOENT if teardown already unpublished @fp by advancing the state or
+ * clearing the volatile id. Other state updates preserve the historical
+ * fire-and-forget behavior.
+ */
+int ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+ unsigned int state)
{
+ int ret;
+
if (!fp)
- return;
+ return -ENOENT;
write_lock(&ft->lock);
- fp->f_state = state;
+ if (state == FP_INITED &&
+ (fp->f_state != FP_NEW || !has_file_id(fp->volatile_id))) {
+ ret = -ENOENT;
+ } else {
+ fp->f_state = state;
+ ret = 0;
+ }
write_unlock(&ft->lock);
+
+ return ret;
+}
+
+/*
+ * ksmbd_mark_fp_closed() - mark fp closed under ft->lock and return how many
+ * refs the teardown path owns.
+ *
+ * FP_INITED has a normal idr-owned reference, so teardown owns both that
+ * reference and the transient lookup reference. FP_NEW is still owned by the
+ * in-flight opener/reopener, which will drop the original reference after
+ * ksmbd_update_fstate(..., FP_INITED) observes the cleared volatile id.
+ * FP_CLOSED on entry means an earlier ksmbd_close_fd() already consumed the
+ * idr-owned ref.
+ */
+static int ksmbd_mark_fp_closed(struct ksmbd_file *fp)
+{
+ if (fp->f_state == FP_INITED) {
+ set_close_state_blocked_works(fp);
+ fp->f_state = FP_CLOSED;
+ return 2;
+ }
+
+ return 1;
}
static int
struct ksmbd_tree_connect *tcon,
bool (*skip)(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp,
- struct ksmbd_user *user))
+ struct ksmbd_user *user),
+ bool skip_preserves_fp)
{
struct ksmbd_file_table *ft = &sess->file_table;
struct ksmbd_file *fp;
int num = 0;
while (1) {
+ int n_to_drop;
+
write_lock(&ft->lock);
fp = idr_get_next(ft->idr, &id);
if (!fp) {
write_unlock(&ft->lock);
break;
}
-
- if (skip(tcon, fp, sess->user) ||
- !atomic_dec_and_test(&fp->refcount)) {
+ if (!atomic_inc_not_zero(&fp->refcount)) {
id++;
write_unlock(&ft->lock);
continue;
}
- set_close_state_blocked_works(fp);
- idr_remove(ft->idr, fp->volatile_id);
- fp->volatile_id = KSMBD_NO_FID;
- write_unlock(&ft->lock);
+ if (skip_preserves_fp) {
+ /*
+ * Session teardown: skip() is session_fd_check(),
+ * which may sleep and mutates fp->conn / fp->tcon /
+ * fp->volatile_id when it chooses to preserve fp
+ * for durable reconnect. Unpublish fp from the
+ * session idr here, under ft->lock, so that
+ * __ksmbd_lookup_fd() through this session cannot
+ * grant a new ksmbd_fp_get() reference to an fp
+ * whose fields are about to be rewritten outside
+ * the lock. Durable reconnect still reaches fp via
+ * global_ft.
+ */
+ idr_remove(ft->idr, id);
+ fp->volatile_id = KSMBD_NO_FID;
+ write_unlock(&ft->lock);
+
+ if (skip(tcon, fp, sess->user)) {
+ /*
+ * session_fd_check() has converted fp to
+ * durable-preserve state and cleared its
+ * per-conn fields. fp is already unpublished
+ * above; the original idr-owned ref keeps it
+ * alive for the durable scavenger. Drop only
+ * the transient ref. atomic_dec() is safe --
+ * atomic_inc_not_zero() succeeded on a
+ * positive value and we added one more, so
+ * refcount cannot be zero here.
+ */
+ atomic_dec(&fp->refcount);
+ id++;
+ continue;
+ }
+
+ /*
+ * Keep the close-state decision under the same lock
+ * observed by ksmbd_update_fstate(), which is how an
+ * in-flight FP_NEW opener learns that teardown has
+ * cleared its volatile id.
+ */
+ write_lock(&ft->lock);
+ n_to_drop = ksmbd_mark_fp_closed(fp);
+ write_unlock(&ft->lock);
+ } else {
+ /*
+ * Tree teardown: skip() is tree_conn_fd_check(), a
+ * cheap pointer compare that doesn't sleep and has
+ * no side effects, so keep the skip decision plus
+ * the unpublish-and-mark-closed sequence atomic
+ * under ft->lock. fps belonging to other tree
+ * connects (skip() == true) stay fully published in
+ * the session idr with no lock window.
+ */
+ if (skip(tcon, fp, sess->user)) {
+ atomic_dec(&fp->refcount);
+ write_unlock(&ft->lock);
+ id++;
+ continue;
+ }
+ idr_remove(ft->idr, id);
+ fp->volatile_id = KSMBD_NO_FID;
+ n_to_drop = ksmbd_mark_fp_closed(fp);
+ write_unlock(&ft->lock);
+ }
+ /*
+ * fp->volatile_id is already cleared to prevent stale idr
+ * removal from a deferred final close. Remove fp from
+ * m_fp_list here because __ksmbd_remove_fd() will skip the
+ * list unlink when volatile_id is KSMBD_NO_FID.
+ */
down_write(&fp->f_ci->m_lock);
list_del_init(&fp->node);
up_write(&fp->f_ci->m_lock);
- __ksmbd_close_fd(ft, fp);
-
- num++;
+ /*
+ * Drop the references this iteration owns:
+ *
+ * n_to_drop == 2: we observed FP_INITED and committed
+ * the FP_CLOSED transition ourselves, so we own the
+ * transient (+1) and the still-intact idr-owned ref.
+ *
+ * n_to_drop == 1: either a prior ksmbd_close_fd()
+ * already consumed the idr-owned ref, or fp was still
+ * FP_NEW and the in-flight opener/reopener must keep
+ * the original reference until ksmbd_update_fstate()
+ * observes the cleared volatile id.
+ *
+ * If we end up as the final putter, finalize fp and
+ * account the open_files_count decrement via the caller's
+ * atomic_sub(num, ...). Otherwise the remaining user's
+ * ksmbd_fd_put() reaches __put_fd_final(), which does its
+ * own atomic_dec(&open_files_count), so we must not count
+ * this fp here -- doing so would double-decrement the
+ * connection-wide counter.
+ */
+ if (atomic_sub_and_test(n_to_drop, &fp->refcount)) {
+ __ksmbd_close_fd(NULL, fp);
+ num++;
+ }
id++;
}
if (!is_reconnectable(fp))
return false;
+ if (fp->f_state != FP_INITED)
+ return false;
+
if (WARN_ON_ONCE(!fp->conn))
return false;
{
int num = __close_file_table_ids(work->sess,
work->tcon,
- tree_conn_fd_check);
+ tree_conn_fd_check,
+ false);
atomic_sub(num, &work->conn->stats.open_files_count);
}
{
int num = __close_file_table_ids(work->sess,
work->tcon,
- session_fd_check);
+ session_fd_check,
+ true);
atomic_sub(num, &work->conn->stats.open_files_count);
}
if (!ft->idr)
return;
- __close_file_table_ids(sess, NULL, session_fd_check);
+ __close_file_table_ids(sess, NULL, session_fd_check, true);
idr_destroy(ft->idr);
kfree(ft->idr);
ft->idr = NULL;