]> git.ipfire.org Git - thirdparty/samba.git/commitdiff
vfs_ceph_new: add smbprofile for async-ops
authorShachar Sharon <ssharon@redhat.com>
Tue, 14 Jan 2025 09:46:05 +0000 (11:46 +0200)
committerJule Anger <janger@samba.org>
Mon, 17 Feb 2025 16:09:09 +0000 (16:09 +0000)
Commit fcd3fc34b2ec5e ("vfs_ceph_new: add profiling support") added
PROFILE accounting for non-async VFS hooks. Add also SMBPROFILE for
async (read/write/fsync) hooks.

BUG: https://bugzilla.samba.org/show_bug.cgi?id=15703

Signed-off-by: Shachar Sharon <ssharon@redhat.com>
Reviewed-by: Anoop C S <anoopcs@samba.org>
Reviewed-by: Guenther Deschner <gd@samba.org>
Autobuild-User(master): Günther Deschner <gd@samba.org>
Autobuild-Date(master): Fri Jan 17 16:47:28 UTC 2025 on atb-devel-224

(cherry picked from commit 775e08ec7b6a32086266a0f7eba4a107869b1cf6)

source3/modules/vfs_ceph_new.c

index 4ce02fc031c7c2c302528bbec4ff628d2899c842..ee0a44476239342ee02c1ac704795b799d4df5ea 100644 (file)
@@ -2259,11 +2259,36 @@ out:
        return lstatus_code(result);
 }
 
+struct vfs_ceph_aio_state {
+       struct timespec start_time;
+       struct timespec finish_time;
+       struct vfs_aio_state vfs_aio_state;
+       SMBPROFILE_BYTES_ASYNC_STATE(profile_bytes);
+};
+
 struct vfs_ceph_pread_state {
        ssize_t bytes_read;
-       struct vfs_aio_state vfs_aio_state;
+       struct vfs_ceph_aio_state ceph_aio_state;
 };
 
+static void vfs_ceph_aio_start(struct vfs_ceph_aio_state *state)
+{
+       SMBPROFILE_BYTES_ASYNC_SET_BUSY(state->profile_bytes);
+       PROFILE_TIMESTAMP(&state->start_time);
+}
+
+static void vfs_ceph_aio_finish(struct vfs_ceph_aio_state *state,
+                               ssize_t result)
+{
+       PROFILE_TIMESTAMP(&state->finish_time);
+       state->vfs_aio_state.duration = nsec_time_diff(&state->finish_time,
+                                                      &state->start_time);
+       if (result < 0) {
+               state->vfs_aio_state.error = (int)result;
+       }
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->profile_bytes);
+}
+
 /*
  * Fake up an async ceph read by calling the synchronous API.
  */
@@ -2296,7 +2321,15 @@ static struct tevent_req *vfs_ceph_pread_send(struct vfs_handle_struct *handle,
                return tevent_req_post(req, ev);
        }
 
+       SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pread,
+                                    profile_p,
+                                    state->ceph_aio_state.profile_bytes,
+                                    n);
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->ceph_aio_state.profile_bytes);
+
+       vfs_ceph_aio_start(&state->ceph_aio_state);
        ret = vfs_ceph_ll_read(handle, cfh, offset, n, data);
+       vfs_ceph_aio_finish(&state->ceph_aio_state, ret);
        if (ret < 0) {
                /* ceph returns -errno on error. */
                tevent_req_error(req, -ret);
@@ -2316,10 +2349,13 @@ static ssize_t vfs_ceph_pread_recv(struct tevent_req *req,
                tevent_req_data(req, struct vfs_ceph_pread_state);
 
        DBG_DEBUG("[CEPH] pread_recv: bytes_read=%zd\n", state->bytes_read);
+
+       SMBPROFILE_BYTES_ASYNC_END(state->ceph_aio_state.profile_bytes);
+
        if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
                return -1;
        }
-       *vfs_aio_state = state->vfs_aio_state;
+       *vfs_aio_state = state->ceph_aio_state.vfs_aio_state;
        return state->bytes_read;
 }
 
@@ -2353,7 +2389,7 @@ out:
 
 struct vfs_ceph_pwrite_state {
        ssize_t bytes_written;
-       struct vfs_aio_state vfs_aio_state;
+       struct vfs_ceph_aio_state ceph_aio_state;
 };
 
 /*
@@ -2388,7 +2424,15 @@ static struct tevent_req *vfs_ceph_pwrite_send(struct vfs_handle_struct *handle,
                return tevent_req_post(req, ev);
        }
 
+       SMBPROFILE_BYTES_ASYNC_START(syscall_asys_pwrite,
+                                    profile_p,
+                                    state->ceph_aio_state.profile_bytes,
+                                    n);
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->ceph_aio_state.profile_bytes);
+
+       vfs_ceph_aio_start(&state->ceph_aio_state);
        ret = vfs_ceph_ll_write(handle, cfh, offset, n, data);
+       vfs_ceph_aio_finish(&state->ceph_aio_state, ret);
        if (ret < 0) {
                /* ceph returns -errno on error. */
                tevent_req_error(req, -ret);
@@ -2409,10 +2453,13 @@ static ssize_t vfs_ceph_pwrite_recv(struct tevent_req *req,
 
        DBG_DEBUG("[CEPH] pwrite_recv: bytes_written=%zd\n",
                  state->bytes_written);
+
+       SMBPROFILE_BYTES_ASYNC_END(state->ceph_aio_state.profile_bytes);
+
        if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
                return -1;
        }
-       *vfs_aio_state = state->vfs_aio_state;
+       *vfs_aio_state = state->ceph_aio_state.vfs_aio_state;
        return state->bytes_written;
 }
 
@@ -2525,6 +2572,10 @@ out:
  * Fake up an async ceph fsync by calling the synchronous API.
  */
 
+struct vfs_ceph_fsync_state {
+       struct vfs_ceph_aio_state ceph_aio_state;
+};
+
 static struct tevent_req *vfs_ceph_fsync_send(struct vfs_handle_struct *handle,
                                        TALLOC_CTX *mem_ctx,
                                        struct tevent_context *ev,
@@ -2532,12 +2583,12 @@ static struct tevent_req *vfs_ceph_fsync_send(struct vfs_handle_struct *handle,
 {
        struct vfs_ceph_fh *cfh = NULL;
        struct tevent_req *req = NULL;
-       struct vfs_aio_state *state = NULL;
+       struct vfs_ceph_fsync_state *state = NULL;
        int ret = -1;
 
        DBG_DEBUG("[CEPH] fsync_send(%p, %p)\n", handle, fsp);
 
-       req = tevent_req_create(mem_ctx, &state, struct vfs_aio_state);
+       req = tevent_req_create(mem_ctx, &state, struct vfs_ceph_fsync_state);
        if (req == NULL) {
                return NULL;
        }
@@ -2548,8 +2599,16 @@ static struct tevent_req *vfs_ceph_fsync_send(struct vfs_handle_struct *handle,
                return tevent_req_post(req, ev);
        }
 
+       SMBPROFILE_BYTES_ASYNC_START(syscall_asys_fsync,
+                                    profile_p,
+                                    state->ceph_aio_state.profile_bytes,
+                                    0);
+       SMBPROFILE_BYTES_ASYNC_SET_IDLE(state->ceph_aio_state.profile_bytes);
+
        /* Make sync call. */
+       vfs_ceph_aio_start(&state->ceph_aio_state);
        ret = vfs_ceph_ll_fsync(handle, cfh, false);
+       vfs_ceph_aio_finish(&state->ceph_aio_state, ret);
        if (ret != 0) {
                /* ceph_fsync returns -errno on error. */
                tevent_req_error(req, -ret);
@@ -2565,17 +2624,19 @@ static struct tevent_req *vfs_ceph_fsync_send(struct vfs_handle_struct *handle,
 static int vfs_ceph_fsync_recv(struct tevent_req *req,
                                struct vfs_aio_state *vfs_aio_state)
 {
-       struct vfs_aio_state *state =
-               tevent_req_data(req, struct vfs_aio_state);
+       struct vfs_ceph_fsync_state *state = tevent_req_data(
+               req, struct vfs_ceph_fsync_state);
 
        DBG_DEBUG("[CEPH] fsync_recv: error=%d duration=%" PRIu64 "\n",
-                 state->error,
-                 state->duration);
+                 state->ceph_aio_state.vfs_aio_state.error,
+                 state->ceph_aio_state.vfs_aio_state.duration);
+
+       SMBPROFILE_BYTES_ASYNC_END(state->ceph_aio_state.profile_bytes);
 
        if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) {
                return -1;
        }
-       *vfs_aio_state = *state;
+       *vfs_aio_state = state->ceph_aio_state.vfs_aio_state;
        return 0;
 }