#include <linux/drbd_genl_api.h>
#include "drbd_nla.h"
+
+static int drbd_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+static void drbd_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+
+#define GENL_MAGIC_FAMILY_PRE_DOIT drbd_pre_doit
+#define GENL_MAGIC_FAMILY_POST_DOIT drbd_post_doit
+
#include <linux/genl_magic_func.h>
static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
return 0;
}
-/* This would be a good candidate for a "pre_doit" hook,
- * and per-family private info->pointers.
- * But we need to stay compatible with older kernels.
- * If it returns successfully, adm_ctx members are valid.
- *
+/* Flags for drbd_adm_prepare() */
+#define DRBD_ADM_NEED_MINOR (1 << 0)
+#define DRBD_ADM_NEED_RESOURCE (1 << 1)
+#define DRBD_ADM_NEED_CONNECTION (1 << 2)
+
+/* Per-command flags for drbd_pre_doit() */
+static const unsigned int drbd_genl_cmd_flags[] = {
+ [DRBD_ADM_GET_STATUS] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_NEW_MINOR] = DRBD_ADM_NEED_RESOURCE,
+ [DRBD_ADM_DEL_MINOR] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_NEW_RESOURCE] = 0,
+ [DRBD_ADM_DEL_RESOURCE] = DRBD_ADM_NEED_RESOURCE,
+ [DRBD_ADM_RESOURCE_OPTS] = DRBD_ADM_NEED_RESOURCE,
+ [DRBD_ADM_CONNECT] = DRBD_ADM_NEED_RESOURCE,
+ [DRBD_ADM_CHG_NET_OPTS] = DRBD_ADM_NEED_CONNECTION,
+ [DRBD_ADM_DISCONNECT] = DRBD_ADM_NEED_CONNECTION,
+ [DRBD_ADM_ATTACH] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_CHG_DISK_OPTS] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_RESIZE] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_PRIMARY] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_SECONDARY] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_NEW_C_UUID] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_START_OV] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_DETACH] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_INVALIDATE] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_INVAL_PEER] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_PAUSE_SYNC] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_RESUME_SYNC] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_SUSPEND_IO] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_RESUME_IO] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_OUTDATE] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_GET_TIMEOUT_TYPE] = DRBD_ADM_NEED_MINOR,
+ [DRBD_ADM_DOWN] = DRBD_ADM_NEED_RESOURCE,
+};
+
+/*
* At this point, we still rely on the global genl_lock().
* If we want to avoid that, and allow "genl_family.parallel_ops", we may need
* to add additional synchronization against object destruction/modification.
*/
-#define DRBD_ADM_NEED_MINOR 1
-#define DRBD_ADM_NEED_RESOURCE 2
-#define DRBD_ADM_NEED_CONNECTION 4
static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
struct sk_buff *skb, struct genl_info *info, unsigned flags)
{
const u8 cmd = info->genlhdr->cmd;
int err;
- memset(adm_ctx, 0, sizeof(*adm_ctx));
-
/* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
return -EPERM;
return err;
}
-static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
- struct genl_info *info, int retcode)
+static int drbd_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_config_context *adm_ctx;
+ u8 cmd = info->genlhdr->cmd;
+ unsigned int flags;
+ int err;
+
+ adm_ctx = kzalloc_obj(*adm_ctx);
+ if (!adm_ctx)
+ return -ENOMEM;
+
+ flags = (cmd < ARRAY_SIZE(drbd_genl_cmd_flags))
+ ? drbd_genl_cmd_flags[cmd] : 0;
+
+ err = drbd_adm_prepare(adm_ctx, skb, info, flags);
+ if (err && !adm_ctx->reply_skb) {
+ /* Fatal error before reply_skb was allocated. */
+ kfree(adm_ctx);
+ return err;
+ }
+ if (err)
+ adm_ctx->reply_dh->ret_code = err;
+
+ info->user_ptr[0] = adm_ctx;
+ return 0;
+}
+
+static void drbd_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
+
+ if (!adm_ctx)
+ return;
+
+ if (adm_ctx->reply_skb)
+ drbd_adm_send_reply(adm_ctx->reply_skb, info);
+
if (adm_ctx->device) {
kref_put(&adm_ctx->device->kref, drbd_destroy_device);
adm_ctx->device = NULL;
adm_ctx->resource = NULL;
}
- if (!adm_ctx->reply_skb)
- return -ENOMEM;
-
- adm_ctx->reply_dh->ret_code = retcode;
- drbd_adm_send_reply(adm_ctx->reply_skb, info);
- return 0;
+ kfree(adm_ctx);
}
static void setup_khelper_env(struct drbd_connection *connection, char **envp)
int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct set_role_parms parms;
int err;
enum drbd_ret_code retcode;
enum drbd_state_rv rv;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
err = set_role_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
genl_unlock();
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
- rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
+ rv = drbd_set_role(adm_ctx->device, R_PRIMARY, parms.assume_uptodate);
else
- rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
+ rv = drbd_set_role(adm_ctx->device, R_SECONDARY, 0);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
genl_lock();
- drbd_adm_finish(&adm_ctx, info, rv);
+ adm_ctx->reply_dh->ret_code = rv;
return 0;
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
struct drbd_device *device;
struct disk_conf *new_disk_conf, *old_disk_conf;
int err;
unsigned int fifo_size;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto finish;
- device = adm_ctx.device;
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ device = adm_ctx->device;
+ mutex_lock(&adm_ctx->resource->adm_mutex);
/* we also need a disk
* to change the options on */
err = disk_conf_from_attrs_for_change(new_disk_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail_unlock;
}
if (err) {
/* Could be just "busy". Ignore?
* Introduce dedicated error code? */
- drbd_msg_put_info(adm_ctx.reply_skb,
+ drbd_msg_put_info(adm_ctx->reply_skb,
"Try again without changing current al-extents setting");
retcode = ERR_NOMEM;
goto fail_unlock;
success:
put_ldev(device);
out:
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
finish:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_device *device;
struct drbd_peer_device *peer_device;
struct drbd_connection *connection;
enum drbd_state_rv rv;
struct net_conf *nc;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto finish;
- device = adm_ctx.device;
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ device = adm_ctx->device;
+ mutex_lock(&adm_ctx->resource->adm_mutex);
peer_device = first_peer_device(device);
connection = peer_device->connection;
conn_reconfig_start(connection);
err = disk_conf_from_attrs(new_disk_conf, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
} else {
drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
- drbd_msg_sprintf_info(adm_ctx.reply_skb,
+ drbd_msg_sprintf_info(adm_ctx->reply_skb,
"To-be-attached device has last effective > current size, and is consistent\n"
"(%llu > %llu sectors). Refusing to attach.", eff, nsz);
retcode = ERR_IMPLICIT_SHRINK;
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(device);
conn_reconfig_done(connection);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
- drbd_adm_finish(&adm_ctx, info, retcode);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
force_diskless_dec:
kfree(new_disk_conf);
lc_destroy(resync_lru);
kfree(new_plan);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
finish:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
* Only then we have finally detached. */
int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
struct detach_parms parms = { };
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
err = detach_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
- retcode = adm_detach(adm_ctx.device, parms.force_detach);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ retcode = adm_detach(adm_ctx->device, parms.force_detach);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
struct drbd_connection *connection;
struct net_conf *old_net_conf, *new_net_conf = NULL;
int rsr; /* re-sync running */
struct crypto crypto = { };
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto finish;
- connection = adm_ctx.connection;
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ connection = adm_ctx->connection;
+ mutex_lock(&adm_ctx->resource->adm_mutex);
new_net_conf = kzalloc_obj(struct net_conf);
if (!new_net_conf) {
old_net_conf = connection->net_conf;
if (!old_net_conf) {
- drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
+ drbd_msg_put_info(adm_ctx->reply_skb, "net conf missing, try connect");
retcode = ERR_INVALID_REQUEST;
goto fail;
}
err = net_conf_from_attrs_for_change(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
done:
conn_reconfig_done(connection);
out:
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
finish:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
struct connection_info connection_info;
enum drbd_notification_type flags;
unsigned int peer_devices = 0;
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_peer_device *peer_device;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct crypto crypto = { };
int i;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
-
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
- drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
+ if (!(adm_ctx->my_addr && adm_ctx->peer_addr)) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "connection endpoint(s) missing");
retcode = ERR_INVALID_REQUEST;
goto out;
}
* concurrent reconfiguration/addition/deletion */
for_each_resource(resource, &drbd_resources) {
for_each_connection(connection, resource) {
- if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
- !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
+ if (nla_len(adm_ctx->my_addr) == connection->my_addr_len &&
+ !memcmp(nla_data(adm_ctx->my_addr), &connection->my_addr,
connection->my_addr_len)) {
retcode = ERR_LOCAL_ADDR;
goto out;
}
- if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
- !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
+ if (nla_len(adm_ctx->peer_addr) == connection->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx->peer_addr), &connection->peer_addr,
connection->peer_addr_len)) {
retcode = ERR_PEER_ADDR;
goto out;
}
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
- connection = first_connection(adm_ctx.resource);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ connection = first_connection(adm_ctx->resource);
conn_reconfig_start(connection);
if (connection->cstate > C_STANDALONE) {
err = net_conf_from_attrs(new_net_conf, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
drbd_flush_workqueue(&connection->sender_work);
- mutex_lock(&adm_ctx.resource->conf_update);
+ mutex_lock(&adm_ctx->resource->conf_update);
old_net_conf = connection->net_conf;
if (old_net_conf) {
retcode = ERR_NET_CONFIGURED;
- mutex_unlock(&adm_ctx.resource->conf_update);
+ mutex_unlock(&adm_ctx->resource->conf_update);
goto fail;
}
rcu_assign_pointer(connection->net_conf, new_net_conf);
connection->csums_tfm = crypto.csums_tfm;
connection->verify_tfm = crypto.verify_tfm;
- connection->my_addr_len = nla_len(adm_ctx.my_addr);
- memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
- connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
- memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
+ connection->my_addr_len = nla_len(adm_ctx->my_addr);
+ memcpy(&connection->my_addr, nla_data(adm_ctx->my_addr), connection->my_addr_len);
+ connection->peer_addr_len = nla_len(adm_ctx->peer_addr);
+ memcpy(&connection->peer_addr, nla_data(adm_ctx->peer_addr), connection->peer_addr_len);
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
peer_devices++;
notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
}
mutex_unlock(¬ification_mutex);
- mutex_unlock(&adm_ctx.resource->conf_update);
+ mutex_unlock(&adm_ctx->resource->conf_update);
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
- drbd_adm_finish(&adm_ctx, info, rv);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
+ adm_ctx->reply_dh->ret_code = rv;
return 0;
fail:
kfree(new_net_conf);
conn_reconfig_done(connection);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct disconnect_parms parms;
struct drbd_connection *connection;
enum drbd_state_rv rv;
enum drbd_ret_code retcode;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto fail;
- connection = adm_ctx.connection;
+ connection = adm_ctx->connection;
memset(&parms, 0, sizeof(parms));
if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
err = disconnect_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
if (rv < SS_SUCCESS) {
- drbd_adm_finish(&adm_ctx, info, rv);
+ adm_ctx->reply_dh->ret_code = rv;
return 0;
}
retcode = NO_ERROR;
fail:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
struct resize_parms rs;
struct drbd_device *device;
sector_t u_size;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto finish;
- mutex_lock(&adm_ctx.resource->adm_mutex);
- device = adm_ctx.device;
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ device = adm_ctx->device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto fail;
err = resize_parms_from_attrs(&rs, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail_ldev;
}
}
}
fail:
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
finish:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
fail_ldev:
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
struct res_opts res_opts;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto fail;
- res_opts = adm_ctx.resource->res_opts;
+ res_opts = adm_ctx->resource->res_opts;
if (should_set_defaults(info))
set_res_opts_defaults(&res_opts);
err = res_opts_from_attrs(&res_opts, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto fail;
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
- err = set_resource_options(adm_ctx.resource, &res_opts);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ err = set_resource_options(adm_ctx->resource, &res_opts);
if (err) {
retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM)
retcode = ERR_NOMEM;
}
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
fail:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- device = adm_ctx.device;
+ device = adm_ctx->device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
drbd_resume_io(device);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
put_ldev(device);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
union drbd_state mask, union drbd_state val)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- mutex_lock(&adm_ctx.resource->adm_mutex);
- retcode = drbd_request_state(adm_ctx.device, mask, val);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ retcode = drbd_request_state(adm_ctx->device, mask, val);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
int retcode; /* drbd_ret_code, drbd_state_rv */
struct drbd_device *device;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- device = adm_ctx.device;
+ device = adm_ctx->device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
goto out;
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
drbd_resume_io(device);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
put_ldev(device);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- mutex_lock(&adm_ctx.resource->adm_mutex);
- if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ if (drbd_request_state(adm_ctx->device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
retcode = ERR_PAUSE_IS_SET;
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
union drbd_dev_state s;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- mutex_lock(&adm_ctx.resource->adm_mutex);
- if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
- s = adm_ctx.device->state;
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ if (drbd_request_state(adm_ctx->device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = adm_ctx->device->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
retcode = ERR_PAUSE_IS_CLEAR;
}
}
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- mutex_lock(&adm_ctx.resource->adm_mutex);
- device = adm_ctx.device;
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ device = adm_ctx->device;
if (test_bit(NEW_CUR_UUID, &device->flags)) {
if (get_ldev_if_state(device, D_ATTACHING)) {
drbd_uuid_new_current(device);
tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(device);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
+ err = nla_put_status_info(adm_ctx->reply_skb, adm_ctx->device, NULL);
if (err) {
- nlmsg_free(adm_ctx.reply_skb);
+ nlmsg_free(adm_ctx->reply_skb);
+ adm_ctx->reply_skb = NULL;
return err;
}
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
struct timeout_parms tp;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
tp.timeout_type =
- adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
+ adm_ctx->device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &adm_ctx->device->flags) ? UT_DEGRADED :
UT_DEFAULT;
- err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
+ err = timeout_parms_to_priv_skb(adm_ctx->reply_skb, &tp);
if (err) {
- nlmsg_free(adm_ctx.reply_skb);
+ nlmsg_free(adm_ctx->reply_skb);
+ adm_ctx->reply_skb = NULL;
return err;
}
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_device *device;
enum drbd_ret_code retcode;
struct start_ov_parms parms;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- device = adm_ctx.device;
+ device = adm_ctx->device;
/* resume from last known position, if possible */
parms.ov_start_sector = device->ov_start_sector;
int err = start_ov_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto out;
}
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
/* w_make_ov_request expects position to be aligned */
device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
drbd_resume_io(device);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_device *device;
enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
struct new_c_uuid_parms args;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out_nolock;
- device = adm_ctx.device;
+ device = adm_ctx->device;
memset(&args, 0, sizeof(args));
if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
err = new_c_uuid_parms_from_attrs(&args, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto out_nolock;
}
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(device)) {
put_ldev(device);
out:
mutex_unlock(device->state_mutex);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out_nolock:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_connection *connection;
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
struct res_opts res_opts;
int err;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
err = res_opts_from_attrs(&res_opts, info);
if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
- drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
+ drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
goto out;
}
- retcode = drbd_check_resource_name(&adm_ctx);
+ retcode = drbd_check_resource_name(adm_ctx);
if (retcode != NO_ERROR)
goto out;
- if (adm_ctx.resource) {
+ if (adm_ctx->resource) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
retcode = ERR_INVALID_REQUEST;
- drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
+ drbd_msg_put_info(adm_ctx->reply_skb, "resource exists");
}
/* else: still NO_ERROR */
goto out;
/* not yet safe for genl_family.parallel_ops */
mutex_lock(&resources_mutex);
- connection = conn_create(adm_ctx.resource_name, &res_opts);
+ connection = conn_create(adm_ctx->resource_name, &res_opts);
mutex_unlock(&resources_mutex);
if (connection) {
retcode = ERR_NOMEM;
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
if (dh->minor > MINORMASK) {
- drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
+ drbd_msg_put_info(adm_ctx->reply_skb, "requested minor out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
}
- if (adm_ctx.volume > DRBD_VOLUME_MAX) {
- drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
+ if (adm_ctx->volume > DRBD_VOLUME_MAX) {
+ drbd_msg_put_info(adm_ctx->reply_skb, "requested volume id out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
}
/* drbd_adm_prepare made sure already
* that first_peer_device(device)->connection and device->vnr match the request. */
- if (adm_ctx.device) {
+ if (adm_ctx->device) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
retcode = ERR_MINOR_OR_VOLUME_EXISTS;
/* else: still NO_ERROR */
goto out;
}
- mutex_lock(&adm_ctx.resource->adm_mutex);
- retcode = drbd_create_device(&adm_ctx, dh->minor);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ retcode = drbd_create_device(adm_ctx, dh->minor);
if (retcode == NO_ERROR) {
struct drbd_device *device;
struct drbd_peer_device *peer_device;
}
mutex_unlock(¬ification_mutex);
}
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto out;
- mutex_lock(&adm_ctx.resource->adm_mutex);
- retcode = adm_del_minor(adm_ctx.device);
- mutex_unlock(&adm_ctx.resource->adm_mutex);
+ mutex_lock(&adm_ctx->resource->adm_mutex);
+ retcode = adm_del_minor(adm_ctx->device);
+ mutex_unlock(&adm_ctx->resource->adm_mutex);
out:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_resource *resource;
struct drbd_connection *connection;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
unsigned i;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto finish;
- resource = adm_ctx.resource;
+ resource = adm_ctx->resource;
mutex_lock(&resource->adm_mutex);
/* demote */
for_each_connection(connection, resource) {
idr_for_each_entry(&connection->peer_devices, peer_device, i) {
retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
if (retcode < SS_SUCCESS) {
- drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
+ drbd_msg_put_info(adm_ctx->reply_skb, "failed to demote");
goto out;
}
}
retcode = conn_try_disconnect(connection, 0);
if (retcode < SS_SUCCESS) {
- drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
+ drbd_msg_put_info(adm_ctx->reply_skb, "failed to disconnect");
goto out;
}
}
idr_for_each_entry(&resource->devices, device, i) {
retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
- drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
+ drbd_msg_put_info(adm_ctx->reply_skb, "failed to detach");
goto out;
}
}
retcode = adm_del_minor(device);
if (retcode != NO_ERROR) {
/* "can not happen" */
- drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
+ drbd_msg_put_info(adm_ctx->reply_skb, "failed to delete volume");
goto out;
}
}
out:
mutex_unlock(&resource->adm_mutex);
finish:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}
int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
- struct drbd_config_context adm_ctx;
+ struct drbd_config_context *adm_ctx = info->user_ptr[0];
struct drbd_resource *resource;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
- if (!adm_ctx.reply_skb)
- return retcode;
+ if (!adm_ctx->reply_skb)
+ return 0;
+ retcode = adm_ctx->reply_dh->ret_code;
if (retcode != NO_ERROR)
goto finish;
- resource = adm_ctx.resource;
+ resource = adm_ctx->resource;
mutex_lock(&resource->adm_mutex);
retcode = adm_del_resource(resource);
mutex_unlock(&resource->adm_mutex);
finish:
- drbd_adm_finish(&adm_ctx, info, retcode);
+ adm_ctx->reply_dh->ret_code = retcode;
return 0;
}