MODULE_PARM_DESC(inter_copy_offload_enable,
"Enable inter server to server copy offload. Default: false");
+static void cleanup_async_copy(struct nfsd4_copy *copy);
+
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
static int nfsd4_ssc_umount_timeout = 900000; /* default to 15 mins */
module_param(nfsd4_ssc_umount_timeout, int, 0644);
{
struct nfsd4_cb_offload *cbo =
container_of(cb, struct nfsd4_cb_offload, co_cb);
+ struct nfsd4_copy *copy =
+ container_of(cbo, struct nfsd4_copy, cp_cb_offload);
- kfree(cbo);
+ cleanup_async_copy(copy);
}
static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
static void nfsd4_send_cb_offload(struct nfsd4_copy *copy)
{
- struct nfsd4_cb_offload *cbo;
-
- cbo = kzalloc(sizeof(*cbo), GFP_KERNEL);
- if (!cbo)
- return;
+ struct nfsd4_cb_offload *cbo = ©->cp_cb_offload;
memcpy(&cbo->co_res, ©->cp_res, sizeof(copy->cp_res));
memcpy(&cbo->co_fh, ©->fh, sizeof(copy->fh));
}
do_callback:
+ /* The kthread exits forthwith. Ensure that a subsequent
+ * OFFLOAD_CANCEL won't try to kill it again. */
+ set_bit(NFSD4_COPY_F_STOPPED, ©->cp_flags);
+
set_bit(NFSD4_COPY_F_COMPLETED, ©->cp_flags);
trace_nfsd_copy_async_done(copy);
nfsd4_send_cb_offload(copy);
- cleanup_async_copy(copy);
return 0;
}