]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /******************************************************************************* | |
3 | * Filename: target_core_configfs.c | |
4 | * | |
5 | * This file contains ConfigFS logic for the Generic Target Engine project. | |
6 | * | |
7 | * (c) Copyright 2008-2013 Datera, Inc. | |
8 | * | |
9 | * Nicholas A. Bellinger <nab@kernel.org> | |
10 | * | |
11 | * based on configfs Copyright (C) 2005 Oracle. All rights reserved. | |
12 | * | |
13 | ****************************************************************************/ | |
14 | ||
15 | #include <linux/kstrtox.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/moduleparam.h> | |
18 | #include <generated/utsrelease.h> | |
19 | #include <linux/utsname.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/namei.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/unistd.h> | |
27 | #include <linux/string.h> | |
28 | #include <linux/parser.h> | |
29 | #include <linux/syscalls.h> | |
30 | #include <linux/configfs.h> | |
31 | #include <linux/spinlock.h> | |
32 | ||
33 | #include <target/target_core_base.h> | |
34 | #include <target/target_core_backend.h> | |
35 | #include <target/target_core_fabric.h> | |
36 | ||
37 | #include "target_core_internal.h" | |
38 | #include "target_core_alua.h" | |
39 | #include "target_core_pr.h" | |
40 | #include "target_core_rd.h" | |
41 | #include "target_core_xcopy.h" | |
42 | ||
43 | #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | |
44 | static void target_core_setup_##_name##_cit(struct target_backend *tb) \ | |
45 | { \ | |
46 | struct config_item_type *cit = &tb->tb_##_name##_cit; \ | |
47 | \ | |
48 | cit->ct_item_ops = _item_ops; \ | |
49 | cit->ct_group_ops = _group_ops; \ | |
50 | cit->ct_attrs = _attrs; \ | |
51 | cit->ct_owner = tb->ops->owner; \ | |
52 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | |
53 | } | |
54 | ||
55 | #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ | |
56 | static void target_core_setup_##_name##_cit(struct target_backend *tb) \ | |
57 | { \ | |
58 | struct config_item_type *cit = &tb->tb_##_name##_cit; \ | |
59 | \ | |
60 | cit->ct_item_ops = _item_ops; \ | |
61 | cit->ct_group_ops = _group_ops; \ | |
62 | cit->ct_attrs = tb->ops->tb_##_name##_attrs; \ | |
63 | cit->ct_owner = tb->ops->owner; \ | |
64 | pr_debug("Setup generic %s\n", __stringify(_name)); \ | |
65 | } | |
66 | ||
67 | extern struct t10_alua_lu_gp *default_lu_gp; | |
68 | ||
69 | static LIST_HEAD(g_tf_list); | |
70 | static DEFINE_MUTEX(g_tf_lock); | |
71 | ||
72 | static struct config_group target_core_hbagroup; | |
73 | static struct config_group alua_group; | |
74 | static struct config_group alua_lu_gps_group; | |
75 | ||
76 | static unsigned int target_devices; | |
77 | static DEFINE_MUTEX(target_devices_lock); | |
78 | ||
79 | static inline struct se_hba * | |
80 | item_to_hba(struct config_item *item) | |
81 | { | |
82 | return container_of(to_config_group(item), struct se_hba, hba_group); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Attributes for /sys/kernel/config/target/ | |
87 | */ | |
88 | static ssize_t target_core_item_version_show(struct config_item *item, | |
89 | char *page) | |
90 | { | |
91 | return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" | |
92 | " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, | |
93 | utsname()->sysname, utsname()->machine); | |
94 | } | |
95 | ||
96 | CONFIGFS_ATTR_RO(target_core_item_, version); | |
97 | ||
98 | char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT; | |
99 | static char db_root_stage[DB_ROOT_LEN]; | |
100 | ||
101 | static ssize_t target_core_item_dbroot_show(struct config_item *item, | |
102 | char *page) | |
103 | { | |
104 | return sprintf(page, "%s\n", db_root); | |
105 | } | |
106 | ||
107 | static ssize_t target_core_item_dbroot_store(struct config_item *item, | |
108 | const char *page, size_t count) | |
109 | { | |
110 | ssize_t read_bytes; | |
111 | struct file *fp; | |
112 | ssize_t r = -EINVAL; | |
113 | ||
114 | mutex_lock(&target_devices_lock); | |
115 | if (target_devices) { | |
116 | pr_err("db_root: cannot be changed because it's in use\n"); | |
117 | goto unlock; | |
118 | } | |
119 | ||
120 | if (count > (DB_ROOT_LEN - 1)) { | |
121 | pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n", | |
122 | (int)count, DB_ROOT_LEN - 1); | |
123 | goto unlock; | |
124 | } | |
125 | ||
126 | read_bytes = scnprintf(db_root_stage, DB_ROOT_LEN, "%s", page); | |
127 | if (!read_bytes) | |
128 | goto unlock; | |
129 | ||
130 | if (db_root_stage[read_bytes - 1] == '\n') | |
131 | db_root_stage[read_bytes - 1] = '\0'; | |
132 | ||
133 | /* validate new db root before accepting it */ | |
134 | fp = filp_open(db_root_stage, O_RDONLY, 0); | |
135 | if (IS_ERR(fp)) { | |
136 | pr_err("db_root: cannot open: %s\n", db_root_stage); | |
137 | goto unlock; | |
138 | } | |
139 | if (!S_ISDIR(file_inode(fp)->i_mode)) { | |
140 | filp_close(fp, NULL); | |
141 | pr_err("db_root: not a directory: %s\n", db_root_stage); | |
142 | goto unlock; | |
143 | } | |
144 | filp_close(fp, NULL); | |
145 | ||
146 | strscpy(db_root, db_root_stage); | |
147 | pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); | |
148 | ||
149 | r = read_bytes; | |
150 | ||
151 | unlock: | |
152 | mutex_unlock(&target_devices_lock); | |
153 | return r; | |
154 | } | |
155 | ||
156 | CONFIGFS_ATTR(target_core_item_, dbroot); | |
157 | ||
158 | static struct target_fabric_configfs *target_core_get_fabric( | |
159 | const char *name) | |
160 | { | |
161 | struct target_fabric_configfs *tf; | |
162 | ||
163 | if (!name) | |
164 | return NULL; | |
165 | ||
166 | mutex_lock(&g_tf_lock); | |
167 | list_for_each_entry(tf, &g_tf_list, tf_list) { | |
168 | const char *cmp_name = tf->tf_ops->fabric_alias; | |
169 | if (!cmp_name) | |
170 | cmp_name = tf->tf_ops->fabric_name; | |
171 | if (!strcmp(cmp_name, name)) { | |
172 | atomic_inc(&tf->tf_access_cnt); | |
173 | mutex_unlock(&g_tf_lock); | |
174 | return tf; | |
175 | } | |
176 | } | |
177 | mutex_unlock(&g_tf_lock); | |
178 | ||
179 | return NULL; | |
180 | } | |
181 | ||
182 | /* | |
183 | * Called from struct target_core_group_ops->make_group() | |
184 | */ | |
185 | static struct config_group *target_core_register_fabric( | |
186 | struct config_group *group, | |
187 | const char *name) | |
188 | { | |
189 | struct target_fabric_configfs *tf; | |
190 | int ret; | |
191 | ||
192 | pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" | |
193 | " %s\n", group, name); | |
194 | ||
195 | tf = target_core_get_fabric(name); | |
196 | if (!tf) { | |
197 | pr_debug("target_core_register_fabric() trying autoload for %s\n", | |
198 | name); | |
199 | ||
200 | /* | |
201 | * Below are some hardcoded request_module() calls to automatically | |
202 | * local fabric modules when the following is called: | |
203 | * | |
204 | * mkdir -p /sys/kernel/config/target/$MODULE_NAME | |
205 | * | |
206 | * Note that this does not limit which TCM fabric module can be | |
207 | * registered, but simply provids auto loading logic for modules with | |
208 | * mkdir(2) system calls with known TCM fabric modules. | |
209 | */ | |
210 | ||
211 | if (!strncmp(name, "iscsi", 5)) { | |
212 | /* | |
213 | * Automatically load the LIO Target fabric module when the | |
214 | * following is called: | |
215 | * | |
216 | * mkdir -p $CONFIGFS/target/iscsi | |
217 | */ | |
218 | ret = request_module("iscsi_target_mod"); | |
219 | if (ret < 0) { | |
220 | pr_debug("request_module() failed for" | |
221 | " iscsi_target_mod.ko: %d\n", ret); | |
222 | return ERR_PTR(-EINVAL); | |
223 | } | |
224 | } else if (!strncmp(name, "loopback", 8)) { | |
225 | /* | |
226 | * Automatically load the tcm_loop fabric module when the | |
227 | * following is called: | |
228 | * | |
229 | * mkdir -p $CONFIGFS/target/loopback | |
230 | */ | |
231 | ret = request_module("tcm_loop"); | |
232 | if (ret < 0) { | |
233 | pr_debug("request_module() failed for" | |
234 | " tcm_loop.ko: %d\n", ret); | |
235 | return ERR_PTR(-EINVAL); | |
236 | } | |
237 | } | |
238 | ||
239 | tf = target_core_get_fabric(name); | |
240 | } | |
241 | ||
242 | if (!tf) { | |
243 | pr_debug("target_core_get_fabric() failed for %s\n", | |
244 | name); | |
245 | return ERR_PTR(-EINVAL); | |
246 | } | |
247 | pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" | |
248 | " %s\n", tf->tf_ops->fabric_name); | |
249 | /* | |
250 | * On a successful target_core_get_fabric() look, the returned | |
251 | * struct target_fabric_configfs *tf will contain a usage reference. | |
252 | */ | |
253 | pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", | |
254 | &tf->tf_wwn_cit); | |
255 | ||
256 | config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit); | |
257 | ||
258 | config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", | |
259 | &tf->tf_discovery_cit); | |
260 | configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group); | |
261 | ||
262 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n", | |
263 | config_item_name(&tf->tf_group.cg_item)); | |
264 | return &tf->tf_group; | |
265 | } | |
266 | ||
267 | /* | |
268 | * Called from struct target_core_group_ops->drop_item() | |
269 | */ | |
270 | static void target_core_deregister_fabric( | |
271 | struct config_group *group, | |
272 | struct config_item *item) | |
273 | { | |
274 | struct target_fabric_configfs *tf = container_of( | |
275 | to_config_group(item), struct target_fabric_configfs, tf_group); | |
276 | ||
277 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" | |
278 | " tf list\n", config_item_name(item)); | |
279 | ||
280 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" | |
281 | " %s\n", tf->tf_ops->fabric_name); | |
282 | atomic_dec(&tf->tf_access_cnt); | |
283 | ||
284 | pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" | |
285 | " %s\n", config_item_name(item)); | |
286 | ||
287 | configfs_remove_default_groups(&tf->tf_group); | |
288 | config_item_put(item); | |
289 | } | |
290 | ||
291 | static struct configfs_group_operations target_core_fabric_group_ops = { | |
292 | .make_group = &target_core_register_fabric, | |
293 | .drop_item = &target_core_deregister_fabric, | |
294 | }; | |
295 | ||
296 | /* | |
297 | * All item attributes appearing in /sys/kernel/target/ appear here. | |
298 | */ | |
299 | static struct configfs_attribute *target_core_fabric_item_attrs[] = { | |
300 | &target_core_item_attr_version, | |
301 | &target_core_item_attr_dbroot, | |
302 | NULL, | |
303 | }; | |
304 | ||
305 | /* | |
306 | * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ | |
307 | */ | |
308 | static const struct config_item_type target_core_fabrics_item = { | |
309 | .ct_group_ops = &target_core_fabric_group_ops, | |
310 | .ct_attrs = target_core_fabric_item_attrs, | |
311 | .ct_owner = THIS_MODULE, | |
312 | }; | |
313 | ||
314 | static struct configfs_subsystem target_core_fabrics = { | |
315 | .su_group = { | |
316 | .cg_item = { | |
317 | .ci_namebuf = "target", | |
318 | .ci_type = &target_core_fabrics_item, | |
319 | }, | |
320 | }, | |
321 | }; | |
322 | ||
323 | int target_depend_item(struct config_item *item) | |
324 | { | |
325 | return configfs_depend_item(&target_core_fabrics, item); | |
326 | } | |
327 | EXPORT_SYMBOL(target_depend_item); | |
328 | ||
329 | void target_undepend_item(struct config_item *item) | |
330 | { | |
331 | return configfs_undepend_item(item); | |
332 | } | |
333 | EXPORT_SYMBOL(target_undepend_item); | |
334 | ||
335 | /*############################################################################## | |
336 | // Start functions called by external Target Fabrics Modules | |
337 | //############################################################################*/ | |
338 | static int target_disable_feature(struct se_portal_group *se_tpg) | |
339 | { | |
340 | return 0; | |
341 | } | |
342 | ||
343 | static u32 target_default_get_inst_index(struct se_portal_group *se_tpg) | |
344 | { | |
345 | return 1; | |
346 | } | |
347 | ||
348 | static u32 target_default_sess_get_index(struct se_session *se_sess) | |
349 | { | |
350 | return 0; | |
351 | } | |
352 | ||
353 | static void target_set_default_node_attributes(struct se_node_acl *se_acl) | |
354 | { | |
355 | } | |
356 | ||
357 | static int target_default_get_cmd_state(struct se_cmd *se_cmd) | |
358 | { | |
359 | return 0; | |
360 | } | |
361 | ||
362 | static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |
363 | { | |
364 | if (tfo->fabric_alias) { | |
365 | if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) { | |
366 | pr_err("Passed alias: %s exceeds " | |
367 | "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias); | |
368 | return -EINVAL; | |
369 | } | |
370 | } | |
371 | if (!tfo->fabric_name) { | |
372 | pr_err("Missing tfo->fabric_name\n"); | |
373 | return -EINVAL; | |
374 | } | |
375 | if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) { | |
376 | pr_err("Passed name: %s exceeds " | |
377 | "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name); | |
378 | return -EINVAL; | |
379 | } | |
380 | if (!tfo->tpg_get_wwn) { | |
381 | pr_err("Missing tfo->tpg_get_wwn()\n"); | |
382 | return -EINVAL; | |
383 | } | |
384 | if (!tfo->tpg_get_tag) { | |
385 | pr_err("Missing tfo->tpg_get_tag()\n"); | |
386 | return -EINVAL; | |
387 | } | |
388 | if (!tfo->release_cmd) { | |
389 | pr_err("Missing tfo->release_cmd()\n"); | |
390 | return -EINVAL; | |
391 | } | |
392 | if (!tfo->write_pending) { | |
393 | pr_err("Missing tfo->write_pending()\n"); | |
394 | return -EINVAL; | |
395 | } | |
396 | if (!tfo->queue_data_in) { | |
397 | pr_err("Missing tfo->queue_data_in()\n"); | |
398 | return -EINVAL; | |
399 | } | |
400 | if (!tfo->queue_status) { | |
401 | pr_err("Missing tfo->queue_status()\n"); | |
402 | return -EINVAL; | |
403 | } | |
404 | if (!tfo->queue_tm_rsp) { | |
405 | pr_err("Missing tfo->queue_tm_rsp()\n"); | |
406 | return -EINVAL; | |
407 | } | |
408 | if (!tfo->aborted_task) { | |
409 | pr_err("Missing tfo->aborted_task()\n"); | |
410 | return -EINVAL; | |
411 | } | |
412 | if (!tfo->check_stop_free) { | |
413 | pr_err("Missing tfo->check_stop_free()\n"); | |
414 | return -EINVAL; | |
415 | } | |
416 | /* | |
417 | * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() | |
418 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in | |
419 | * target_core_fabric_configfs.c WWN+TPG group context code. | |
420 | */ | |
421 | if (!tfo->fabric_make_wwn) { | |
422 | pr_err("Missing tfo->fabric_make_wwn()\n"); | |
423 | return -EINVAL; | |
424 | } | |
425 | if (!tfo->fabric_drop_wwn) { | |
426 | pr_err("Missing tfo->fabric_drop_wwn()\n"); | |
427 | return -EINVAL; | |
428 | } | |
429 | if (!tfo->fabric_make_tpg) { | |
430 | pr_err("Missing tfo->fabric_make_tpg()\n"); | |
431 | return -EINVAL; | |
432 | } | |
433 | if (!tfo->fabric_drop_tpg) { | |
434 | pr_err("Missing tfo->fabric_drop_tpg()\n"); | |
435 | return -EINVAL; | |
436 | } | |
437 | ||
438 | return 0; | |
439 | } | |
440 | ||
441 | static void target_set_default_ops(struct target_core_fabric_ops *tfo) | |
442 | { | |
443 | if (!tfo->tpg_check_demo_mode) | |
444 | tfo->tpg_check_demo_mode = target_disable_feature; | |
445 | ||
446 | if (!tfo->tpg_check_demo_mode_cache) | |
447 | tfo->tpg_check_demo_mode_cache = target_disable_feature; | |
448 | ||
449 | if (!tfo->tpg_check_demo_mode_write_protect) | |
450 | tfo->tpg_check_demo_mode_write_protect = target_disable_feature; | |
451 | ||
452 | if (!tfo->tpg_check_prod_mode_write_protect) | |
453 | tfo->tpg_check_prod_mode_write_protect = target_disable_feature; | |
454 | ||
455 | if (!tfo->tpg_get_inst_index) | |
456 | tfo->tpg_get_inst_index = target_default_get_inst_index; | |
457 | ||
458 | if (!tfo->sess_get_index) | |
459 | tfo->sess_get_index = target_default_sess_get_index; | |
460 | ||
461 | if (!tfo->set_default_node_attributes) | |
462 | tfo->set_default_node_attributes = target_set_default_node_attributes; | |
463 | ||
464 | if (!tfo->get_cmd_state) | |
465 | tfo->get_cmd_state = target_default_get_cmd_state; | |
466 | } | |
467 | ||
468 | int target_register_template(const struct target_core_fabric_ops *fo) | |
469 | { | |
470 | struct target_core_fabric_ops *tfo; | |
471 | struct target_fabric_configfs *tf; | |
472 | int ret; | |
473 | ||
474 | ret = target_fabric_tf_ops_check(fo); | |
475 | if (ret) | |
476 | return ret; | |
477 | ||
478 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | |
479 | if (!tf) { | |
480 | pr_err("%s: could not allocate memory!\n", __func__); | |
481 | return -ENOMEM; | |
482 | } | |
483 | tfo = kzalloc(sizeof(struct target_core_fabric_ops), GFP_KERNEL); | |
484 | if (!tfo) { | |
485 | kfree(tf); | |
486 | pr_err("%s: could not allocate memory!\n", __func__); | |
487 | return -ENOMEM; | |
488 | } | |
489 | memcpy(tfo, fo, sizeof(*tfo)); | |
490 | target_set_default_ops(tfo); | |
491 | ||
492 | INIT_LIST_HEAD(&tf->tf_list); | |
493 | atomic_set(&tf->tf_access_cnt, 0); | |
494 | tf->tf_ops = tfo; | |
495 | target_fabric_setup_cits(tf); | |
496 | ||
497 | mutex_lock(&g_tf_lock); | |
498 | list_add_tail(&tf->tf_list, &g_tf_list); | |
499 | mutex_unlock(&g_tf_lock); | |
500 | ||
501 | return 0; | |
502 | } | |
503 | EXPORT_SYMBOL(target_register_template); | |
504 | ||
505 | void target_unregister_template(const struct target_core_fabric_ops *fo) | |
506 | { | |
507 | struct target_fabric_configfs *t; | |
508 | ||
509 | mutex_lock(&g_tf_lock); | |
510 | list_for_each_entry(t, &g_tf_list, tf_list) { | |
511 | if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) { | |
512 | BUG_ON(atomic_read(&t->tf_access_cnt)); | |
513 | list_del(&t->tf_list); | |
514 | mutex_unlock(&g_tf_lock); | |
515 | /* | |
516 | * Wait for any outstanding fabric se_deve_entry->rcu_head | |
517 | * callbacks to complete post kfree_rcu(), before allowing | |
518 | * fabric driver unload of TFO->module to proceed. | |
519 | */ | |
520 | rcu_barrier(); | |
521 | kfree(t->tf_tpg_base_cit.ct_attrs); | |
522 | kfree(t->tf_ops); | |
523 | kfree(t); | |
524 | return; | |
525 | } | |
526 | } | |
527 | mutex_unlock(&g_tf_lock); | |
528 | } | |
529 | EXPORT_SYMBOL(target_unregister_template); | |
530 | ||
531 | /*############################################################################## | |
532 | // Stop functions called by external Target Fabrics Modules | |
533 | //############################################################################*/ | |
534 | ||
535 | static inline struct se_dev_attrib *to_attrib(struct config_item *item) | |
536 | { | |
537 | return container_of(to_config_group(item), struct se_dev_attrib, | |
538 | da_group); | |
539 | } | |
540 | ||
541 | /* Start functions for struct config_item_type tb_dev_attrib_cit */ | |
542 | #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \ | |
543 | static ssize_t _name##_show(struct config_item *item, char *page) \ | |
544 | { \ | |
545 | return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \ | |
546 | } | |
547 | ||
548 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias); | |
549 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo); | |
550 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write); | |
551 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read); | |
552 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache); | |
553 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl); | |
554 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas); | |
555 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu); | |
556 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); | |
557 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); | |
558 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); | |
559 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr); | |
560 | DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); | |
561 | DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); | |
562 | DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); | |
563 | DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); | |
564 | DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); | |
565 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord); | |
566 | DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl); | |
567 | DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size); | |
568 | DEF_CONFIGFS_ATTRIB_SHOW(block_size); | |
569 | DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors); | |
570 | DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors); | |
571 | DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth); | |
572 | DEF_CONFIGFS_ATTRIB_SHOW(queue_depth); | |
573 | DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count); | |
574 | DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); | |
575 | DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); | |
576 | DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); | |
577 | DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); | |
578 | DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); | |
579 | DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); | |
580 | DEF_CONFIGFS_ATTRIB_SHOW(submit_type); | |
581 | ||
582 | #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ | |
583 | static ssize_t _name##_store(struct config_item *item, const char *page,\ | |
584 | size_t count) \ | |
585 | { \ | |
586 | struct se_dev_attrib *da = to_attrib(item); \ | |
587 | u32 val; \ | |
588 | int ret; \ | |
589 | \ | |
590 | ret = kstrtou32(page, 0, &val); \ | |
591 | if (ret < 0) \ | |
592 | return ret; \ | |
593 | da->_name = val; \ | |
594 | return count; \ | |
595 | } | |
596 | ||
597 | DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count); | |
598 | DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count); | |
599 | DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity); | |
600 | DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment); | |
601 | DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len); | |
602 | ||
603 | #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \ | |
604 | static ssize_t _name##_store(struct config_item *item, const char *page, \ | |
605 | size_t count) \ | |
606 | { \ | |
607 | struct se_dev_attrib *da = to_attrib(item); \ | |
608 | bool flag; \ | |
609 | int ret; \ | |
610 | \ | |
611 | ret = kstrtobool(page, &flag); \ | |
612 | if (ret < 0) \ | |
613 | return ret; \ | |
614 | da->_name = flag; \ | |
615 | return count; \ | |
616 | } | |
617 | ||
618 | DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); | |
619 | DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); | |
620 | DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); | |
621 | DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr); | |
622 | DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); | |
623 | DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); | |
624 | ||
625 | #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \ | |
626 | static ssize_t _name##_store(struct config_item *item, const char *page,\ | |
627 | size_t count) \ | |
628 | { \ | |
629 | printk_once(KERN_WARNING \ | |
630 | "ignoring deprecated %s attribute\n", \ | |
631 | __stringify(_name)); \ | |
632 | return count; \ | |
633 | } | |
634 | ||
635 | DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo); | |
636 | DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read); | |
637 | ||
638 | static void dev_set_t10_wwn_model_alias(struct se_device *dev) | |
639 | { | |
640 | const char *configname; | |
641 | ||
642 | configname = config_item_name(&dev->dev_group.cg_item); | |
643 | if (strlen(configname) >= INQUIRY_MODEL_LEN) { | |
644 | pr_warn("dev[%p]: Backstore name '%s' is too long for " | |
645 | "INQUIRY_MODEL, truncating to 15 characters\n", dev, | |
646 | configname); | |
647 | } | |
648 | /* | |
649 | * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1) | |
650 | * here without potentially breaking existing setups, so continue to | |
651 | * truncate one byte shorter than what can be carried in INQUIRY. | |
652 | */ | |
653 | strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); | |
654 | } | |
655 | ||
656 | static ssize_t emulate_model_alias_store(struct config_item *item, | |
657 | const char *page, size_t count) | |
658 | { | |
659 | struct se_dev_attrib *da = to_attrib(item); | |
660 | struct se_device *dev = da->da_dev; | |
661 | bool flag; | |
662 | int ret; | |
663 | ||
664 | if (dev->export_count) { | |
665 | pr_err("dev[%p]: Unable to change model alias" | |
666 | " while export_count is %d\n", | |
667 | dev, dev->export_count); | |
668 | return -EINVAL; | |
669 | } | |
670 | ||
671 | ret = kstrtobool(page, &flag); | |
672 | if (ret < 0) | |
673 | return ret; | |
674 | ||
675 | BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); | |
676 | if (flag) | |
677 | dev_set_t10_wwn_model_alias(dev); | |
678 | else | |
679 | strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod); | |
680 | da->emulate_model_alias = flag; | |
681 | return count; | |
682 | } | |
683 | ||
684 | static ssize_t emulate_write_cache_store(struct config_item *item, | |
685 | const char *page, size_t count) | |
686 | { | |
687 | struct se_dev_attrib *da = to_attrib(item); | |
688 | bool flag; | |
689 | int ret; | |
690 | ||
691 | ret = kstrtobool(page, &flag); | |
692 | if (ret < 0) | |
693 | return ret; | |
694 | ||
695 | if (flag && da->da_dev->transport->get_write_cache) { | |
696 | pr_err("emulate_write_cache not supported for this device\n"); | |
697 | return -EINVAL; | |
698 | } | |
699 | ||
700 | da->emulate_write_cache = flag; | |
701 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | |
702 | da->da_dev, flag); | |
703 | return count; | |
704 | } | |
705 | ||
706 | static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item, | |
707 | const char *page, size_t count) | |
708 | { | |
709 | struct se_dev_attrib *da = to_attrib(item); | |
710 | u32 val; | |
711 | int ret; | |
712 | ||
713 | ret = kstrtou32(page, 0, &val); | |
714 | if (ret < 0) | |
715 | return ret; | |
716 | ||
717 | if (val != TARGET_UA_INTLCK_CTRL_CLEAR | |
718 | && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR | |
719 | && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { | |
720 | pr_err("Illegal value %d\n", val); | |
721 | return -EINVAL; | |
722 | } | |
723 | ||
724 | if (da->da_dev->export_count) { | |
725 | pr_err("dev[%p]: Unable to change SE Device" | |
726 | " UA_INTRLCK_CTRL while export_count is %d\n", | |
727 | da->da_dev, da->da_dev->export_count); | |
728 | return -EINVAL; | |
729 | } | |
730 | da->emulate_ua_intlck_ctrl = val; | |
731 | pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | |
732 | da->da_dev, val); | |
733 | return count; | |
734 | } | |
735 | ||
736 | static ssize_t emulate_tas_store(struct config_item *item, | |
737 | const char *page, size_t count) | |
738 | { | |
739 | struct se_dev_attrib *da = to_attrib(item); | |
740 | bool flag; | |
741 | int ret; | |
742 | ||
743 | ret = kstrtobool(page, &flag); | |
744 | if (ret < 0) | |
745 | return ret; | |
746 | ||
747 | if (da->da_dev->export_count) { | |
748 | pr_err("dev[%p]: Unable to change SE Device TAS while" | |
749 | " export_count is %d\n", | |
750 | da->da_dev, da->da_dev->export_count); | |
751 | return -EINVAL; | |
752 | } | |
753 | da->emulate_tas = flag; | |
754 | pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | |
755 | da->da_dev, flag ? "Enabled" : "Disabled"); | |
756 | ||
757 | return count; | |
758 | } | |
759 | ||
760 | static int target_try_configure_unmap(struct se_device *dev, | |
761 | const char *config_opt) | |
762 | { | |
763 | if (!dev->transport->configure_unmap) { | |
764 | pr_err("Generic Block Discard not supported\n"); | |
765 | return -ENOSYS; | |
766 | } | |
767 | ||
768 | if (!target_dev_configured(dev)) { | |
769 | pr_err("Generic Block Discard setup for %s requires device to be configured\n", | |
770 | config_opt); | |
771 | return -ENODEV; | |
772 | } | |
773 | ||
774 | if (!dev->transport->configure_unmap(dev)) { | |
775 | pr_err("Generic Block Discard setup for %s failed\n", | |
776 | config_opt); | |
777 | return -ENOSYS; | |
778 | } | |
779 | ||
780 | return 0; | |
781 | } | |
782 | ||
783 | static ssize_t emulate_tpu_store(struct config_item *item, | |
784 | const char *page, size_t count) | |
785 | { | |
786 | struct se_dev_attrib *da = to_attrib(item); | |
787 | struct se_device *dev = da->da_dev; | |
788 | bool flag; | |
789 | int ret; | |
790 | ||
791 | ret = kstrtobool(page, &flag); | |
792 | if (ret < 0) | |
793 | return ret; | |
794 | ||
795 | /* | |
796 | * We expect this value to be non-zero when generic Block Layer | |
797 | * Discard supported is detected iblock_create_virtdevice(). | |
798 | */ | |
799 | if (flag && !da->max_unmap_block_desc_count) { | |
800 | ret = target_try_configure_unmap(dev, "emulate_tpu"); | |
801 | if (ret) | |
802 | return ret; | |
803 | } | |
804 | ||
805 | da->emulate_tpu = flag; | |
806 | pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | |
807 | da->da_dev, flag); | |
808 | return count; | |
809 | } | |
810 | ||
811 | static ssize_t emulate_tpws_store(struct config_item *item, | |
812 | const char *page, size_t count) | |
813 | { | |
814 | struct se_dev_attrib *da = to_attrib(item); | |
815 | struct se_device *dev = da->da_dev; | |
816 | bool flag; | |
817 | int ret; | |
818 | ||
819 | ret = kstrtobool(page, &flag); | |
820 | if (ret < 0) | |
821 | return ret; | |
822 | ||
823 | /* | |
824 | * We expect this value to be non-zero when generic Block Layer | |
825 | * Discard supported is detected iblock_create_virtdevice(). | |
826 | */ | |
827 | if (flag && !da->max_unmap_block_desc_count) { | |
828 | ret = target_try_configure_unmap(dev, "emulate_tpws"); | |
829 | if (ret) | |
830 | return ret; | |
831 | } | |
832 | ||
833 | da->emulate_tpws = flag; | |
834 | pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | |
835 | da->da_dev, flag); | |
836 | return count; | |
837 | } | |
838 | ||
839 | static ssize_t pi_prot_type_store(struct config_item *item, | |
840 | const char *page, size_t count) | |
841 | { | |
842 | struct se_dev_attrib *da = to_attrib(item); | |
843 | int old_prot = da->pi_prot_type, ret; | |
844 | struct se_device *dev = da->da_dev; | |
845 | u32 flag; | |
846 | ||
847 | ret = kstrtou32(page, 0, &flag); | |
848 | if (ret < 0) | |
849 | return ret; | |
850 | ||
851 | if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { | |
852 | pr_err("Illegal value %d for pi_prot_type\n", flag); | |
853 | return -EINVAL; | |
854 | } | |
855 | if (flag == 2) { | |
856 | pr_err("DIF TYPE2 protection currently not supported\n"); | |
857 | return -ENOSYS; | |
858 | } | |
859 | if (da->hw_pi_prot_type) { | |
860 | pr_warn("DIF protection enabled on underlying hardware," | |
861 | " ignoring\n"); | |
862 | return count; | |
863 | } | |
864 | if (!dev->transport->init_prot || !dev->transport->free_prot) { | |
865 | /* 0 is only allowed value for non-supporting backends */ | |
866 | if (flag == 0) | |
867 | return count; | |
868 | ||
869 | pr_err("DIF protection not supported by backend: %s\n", | |
870 | dev->transport->name); | |
871 | return -ENOSYS; | |
872 | } | |
873 | if (!target_dev_configured(dev)) { | |
874 | pr_err("DIF protection requires device to be configured\n"); | |
875 | return -ENODEV; | |
876 | } | |
877 | if (dev->export_count) { | |
878 | pr_err("dev[%p]: Unable to change SE Device PROT type while" | |
879 | " export_count is %d\n", dev, dev->export_count); | |
880 | return -EINVAL; | |
881 | } | |
882 | ||
883 | da->pi_prot_type = flag; | |
884 | ||
885 | if (flag && !old_prot) { | |
886 | ret = dev->transport->init_prot(dev); | |
887 | if (ret) { | |
888 | da->pi_prot_type = old_prot; | |
889 | da->pi_prot_verify = (bool) da->pi_prot_type; | |
890 | return ret; | |
891 | } | |
892 | ||
893 | } else if (!flag && old_prot) { | |
894 | dev->transport->free_prot(dev); | |
895 | } | |
896 | ||
897 | da->pi_prot_verify = (bool) da->pi_prot_type; | |
898 | pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); | |
899 | return count; | |
900 | } | |
901 | ||
902 | /* always zero, but attr needs to remain RW to avoid userspace breakage */ | |
903 | static ssize_t pi_prot_format_show(struct config_item *item, char *page) | |
904 | { | |
905 | return snprintf(page, PAGE_SIZE, "0\n"); | |
906 | } | |
907 | ||
908 | static ssize_t pi_prot_format_store(struct config_item *item, | |
909 | const char *page, size_t count) | |
910 | { | |
911 | struct se_dev_attrib *da = to_attrib(item); | |
912 | struct se_device *dev = da->da_dev; | |
913 | bool flag; | |
914 | int ret; | |
915 | ||
916 | ret = kstrtobool(page, &flag); | |
917 | if (ret < 0) | |
918 | return ret; | |
919 | ||
920 | if (!flag) | |
921 | return count; | |
922 | ||
923 | if (!dev->transport->format_prot) { | |
924 | pr_err("DIF protection format not supported by backend %s\n", | |
925 | dev->transport->name); | |
926 | return -ENOSYS; | |
927 | } | |
928 | if (!target_dev_configured(dev)) { | |
929 | pr_err("DIF protection format requires device to be configured\n"); | |
930 | return -ENODEV; | |
931 | } | |
932 | if (dev->export_count) { | |
933 | pr_err("dev[%p]: Unable to format SE Device PROT type while" | |
934 | " export_count is %d\n", dev, dev->export_count); | |
935 | return -EINVAL; | |
936 | } | |
937 | ||
938 | ret = dev->transport->format_prot(dev); | |
939 | if (ret) | |
940 | return ret; | |
941 | ||
942 | pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); | |
943 | return count; | |
944 | } | |
945 | ||
946 | static ssize_t pi_prot_verify_store(struct config_item *item, | |
947 | const char *page, size_t count) | |
948 | { | |
949 | struct se_dev_attrib *da = to_attrib(item); | |
950 | bool flag; | |
951 | int ret; | |
952 | ||
953 | ret = kstrtobool(page, &flag); | |
954 | if (ret < 0) | |
955 | return ret; | |
956 | ||
957 | if (!flag) { | |
958 | da->pi_prot_verify = flag; | |
959 | return count; | |
960 | } | |
961 | if (da->hw_pi_prot_type) { | |
962 | pr_warn("DIF protection enabled on underlying hardware," | |
963 | " ignoring\n"); | |
964 | return count; | |
965 | } | |
966 | if (!da->pi_prot_type) { | |
967 | pr_warn("DIF protection not supported by backend, ignoring\n"); | |
968 | return count; | |
969 | } | |
970 | da->pi_prot_verify = flag; | |
971 | ||
972 | return count; | |
973 | } | |
974 | ||
975 | static ssize_t force_pr_aptpl_store(struct config_item *item, | |
976 | const char *page, size_t count) | |
977 | { | |
978 | struct se_dev_attrib *da = to_attrib(item); | |
979 | bool flag; | |
980 | int ret; | |
981 | ||
982 | ret = kstrtobool(page, &flag); | |
983 | if (ret < 0) | |
984 | return ret; | |
985 | if (da->da_dev->export_count) { | |
986 | pr_err("dev[%p]: Unable to set force_pr_aptpl while" | |
987 | " export_count is %d\n", | |
988 | da->da_dev, da->da_dev->export_count); | |
989 | return -EINVAL; | |
990 | } | |
991 | ||
992 | da->force_pr_aptpl = flag; | |
993 | pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag); | |
994 | return count; | |
995 | } | |
996 | ||
997 | static ssize_t emulate_rest_reord_store(struct config_item *item, | |
998 | const char *page, size_t count) | |
999 | { | |
1000 | struct se_dev_attrib *da = to_attrib(item); | |
1001 | bool flag; | |
1002 | int ret; | |
1003 | ||
1004 | ret = kstrtobool(page, &flag); | |
1005 | if (ret < 0) | |
1006 | return ret; | |
1007 | ||
1008 | if (flag != 0) { | |
1009 | printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" | |
1010 | " reordering not implemented\n", da->da_dev); | |
1011 | return -ENOSYS; | |
1012 | } | |
1013 | da->emulate_rest_reord = flag; | |
1014 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", | |
1015 | da->da_dev, flag); | |
1016 | return count; | |
1017 | } | |
1018 | ||
1019 | static ssize_t unmap_zeroes_data_store(struct config_item *item, | |
1020 | const char *page, size_t count) | |
1021 | { | |
1022 | struct se_dev_attrib *da = to_attrib(item); | |
1023 | struct se_device *dev = da->da_dev; | |
1024 | bool flag; | |
1025 | int ret; | |
1026 | ||
1027 | ret = kstrtobool(page, &flag); | |
1028 | if (ret < 0) | |
1029 | return ret; | |
1030 | ||
1031 | if (da->da_dev->export_count) { | |
1032 | pr_err("dev[%p]: Unable to change SE Device" | |
1033 | " unmap_zeroes_data while export_count is %d\n", | |
1034 | da->da_dev, da->da_dev->export_count); | |
1035 | return -EINVAL; | |
1036 | } | |
1037 | /* | |
1038 | * We expect this value to be non-zero when generic Block Layer | |
1039 | * Discard supported is detected iblock_configure_device(). | |
1040 | */ | |
1041 | if (flag && !da->max_unmap_block_desc_count) { | |
1042 | ret = target_try_configure_unmap(dev, "unmap_zeroes_data"); | |
1043 | if (ret) | |
1044 | return ret; | |
1045 | } | |
1046 | da->unmap_zeroes_data = flag; | |
1047 | pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", | |
1048 | da->da_dev, flag); | |
1049 | return count; | |
1050 | } | |
1051 | ||
1052 | /* | |
1053 | * Note, this can only be called on unexported SE Device Object. | |
1054 | */ | |
1055 | static ssize_t queue_depth_store(struct config_item *item, | |
1056 | const char *page, size_t count) | |
1057 | { | |
1058 | struct se_dev_attrib *da = to_attrib(item); | |
1059 | struct se_device *dev = da->da_dev; | |
1060 | u32 val; | |
1061 | int ret; | |
1062 | ||
1063 | ret = kstrtou32(page, 0, &val); | |
1064 | if (ret < 0) | |
1065 | return ret; | |
1066 | ||
1067 | if (dev->export_count) { | |
1068 | pr_err("dev[%p]: Unable to change SE Device TCQ while" | |
1069 | " export_count is %d\n", | |
1070 | dev, dev->export_count); | |
1071 | return -EINVAL; | |
1072 | } | |
1073 | if (!val) { | |
1074 | pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); | |
1075 | return -EINVAL; | |
1076 | } | |
1077 | ||
1078 | if (val > dev->dev_attrib.queue_depth) { | |
1079 | if (val > dev->dev_attrib.hw_queue_depth) { | |
1080 | pr_err("dev[%p]: Passed queue_depth:" | |
1081 | " %u exceeds TCM/SE_Device MAX" | |
1082 | " TCQ: %u\n", dev, val, | |
1083 | dev->dev_attrib.hw_queue_depth); | |
1084 | return -EINVAL; | |
1085 | } | |
1086 | } | |
1087 | da->queue_depth = dev->queue_depth = val; | |
1088 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); | |
1089 | return count; | |
1090 | } | |
1091 | ||
1092 | static ssize_t optimal_sectors_store(struct config_item *item, | |
1093 | const char *page, size_t count) | |
1094 | { | |
1095 | struct se_dev_attrib *da = to_attrib(item); | |
1096 | u32 val; | |
1097 | int ret; | |
1098 | ||
1099 | ret = kstrtou32(page, 0, &val); | |
1100 | if (ret < 0) | |
1101 | return ret; | |
1102 | ||
1103 | if (da->da_dev->export_count) { | |
1104 | pr_err("dev[%p]: Unable to change SE Device" | |
1105 | " optimal_sectors while export_count is %d\n", | |
1106 | da->da_dev, da->da_dev->export_count); | |
1107 | return -EINVAL; | |
1108 | } | |
1109 | if (val > da->hw_max_sectors) { | |
1110 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" | |
1111 | " greater than hw_max_sectors: %u\n", | |
1112 | da->da_dev, val, da->hw_max_sectors); | |
1113 | return -EINVAL; | |
1114 | } | |
1115 | ||
1116 | da->optimal_sectors = val; | |
1117 | pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", | |
1118 | da->da_dev, val); | |
1119 | return count; | |
1120 | } | |
1121 | ||
1122 | static ssize_t block_size_store(struct config_item *item, | |
1123 | const char *page, size_t count) | |
1124 | { | |
1125 | struct se_dev_attrib *da = to_attrib(item); | |
1126 | u32 val; | |
1127 | int ret; | |
1128 | ||
1129 | ret = kstrtou32(page, 0, &val); | |
1130 | if (ret < 0) | |
1131 | return ret; | |
1132 | ||
1133 | if (da->da_dev->export_count) { | |
1134 | pr_err("dev[%p]: Unable to change SE Device block_size" | |
1135 | " while export_count is %d\n", | |
1136 | da->da_dev, da->da_dev->export_count); | |
1137 | return -EINVAL; | |
1138 | } | |
1139 | ||
1140 | if (val != 512 && val != 1024 && val != 2048 && val != 4096) { | |
1141 | pr_err("dev[%p]: Illegal value for block_device: %u" | |
1142 | " for SE device, must be 512, 1024, 2048 or 4096\n", | |
1143 | da->da_dev, val); | |
1144 | return -EINVAL; | |
1145 | } | |
1146 | ||
1147 | da->block_size = val; | |
1148 | ||
1149 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", | |
1150 | da->da_dev, val); | |
1151 | return count; | |
1152 | } | |
1153 | ||
1154 | static ssize_t alua_support_show(struct config_item *item, char *page) | |
1155 | { | |
1156 | struct se_dev_attrib *da = to_attrib(item); | |
1157 | u8 flags = da->da_dev->transport_flags; | |
1158 | ||
1159 | return snprintf(page, PAGE_SIZE, "%d\n", | |
1160 | flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1); | |
1161 | } | |
1162 | ||
1163 | static ssize_t alua_support_store(struct config_item *item, | |
1164 | const char *page, size_t count) | |
1165 | { | |
1166 | struct se_dev_attrib *da = to_attrib(item); | |
1167 | struct se_device *dev = da->da_dev; | |
1168 | bool flag, oldflag; | |
1169 | int ret; | |
1170 | ||
1171 | ret = kstrtobool(page, &flag); | |
1172 | if (ret < 0) | |
1173 | return ret; | |
1174 | ||
1175 | oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); | |
1176 | if (flag == oldflag) | |
1177 | return count; | |
1178 | ||
1179 | if (!(dev->transport->transport_flags_changeable & | |
1180 | TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { | |
1181 | pr_err("dev[%p]: Unable to change SE Device alua_support:" | |
1182 | " alua_support has fixed value\n", dev); | |
1183 | return -ENOSYS; | |
1184 | } | |
1185 | ||
1186 | if (flag) | |
1187 | dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; | |
1188 | else | |
1189 | dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA; | |
1190 | return count; | |
1191 | } | |
1192 | ||
1193 | static ssize_t pgr_support_show(struct config_item *item, char *page) | |
1194 | { | |
1195 | struct se_dev_attrib *da = to_attrib(item); | |
1196 | u8 flags = da->da_dev->transport_flags; | |
1197 | ||
1198 | return snprintf(page, PAGE_SIZE, "%d\n", | |
1199 | flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1); | |
1200 | } | |
1201 | ||
1202 | static ssize_t pgr_support_store(struct config_item *item, | |
1203 | const char *page, size_t count) | |
1204 | { | |
1205 | struct se_dev_attrib *da = to_attrib(item); | |
1206 | struct se_device *dev = da->da_dev; | |
1207 | bool flag, oldflag; | |
1208 | int ret; | |
1209 | ||
1210 | ret = kstrtobool(page, &flag); | |
1211 | if (ret < 0) | |
1212 | return ret; | |
1213 | ||
1214 | oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); | |
1215 | if (flag == oldflag) | |
1216 | return count; | |
1217 | ||
1218 | if (!(dev->transport->transport_flags_changeable & | |
1219 | TRANSPORT_FLAG_PASSTHROUGH_PGR)) { | |
1220 | pr_err("dev[%p]: Unable to change SE Device pgr_support:" | |
1221 | " pgr_support has fixed value\n", dev); | |
1222 | return -ENOSYS; | |
1223 | } | |
1224 | ||
1225 | if (flag) | |
1226 | dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR; | |
1227 | else | |
1228 | dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR; | |
1229 | return count; | |
1230 | } | |
1231 | ||
1232 | static ssize_t emulate_rsoc_store(struct config_item *item, | |
1233 | const char *page, size_t count) | |
1234 | { | |
1235 | struct se_dev_attrib *da = to_attrib(item); | |
1236 | bool flag; | |
1237 | int ret; | |
1238 | ||
1239 | ret = kstrtobool(page, &flag); | |
1240 | if (ret < 0) | |
1241 | return ret; | |
1242 | ||
1243 | da->emulate_rsoc = flag; | |
1244 | pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n", | |
1245 | da->da_dev, flag); | |
1246 | return count; | |
1247 | } | |
1248 | ||
1249 | static ssize_t submit_type_store(struct config_item *item, const char *page, | |
1250 | size_t count) | |
1251 | { | |
1252 | struct se_dev_attrib *da = to_attrib(item); | |
1253 | int ret; | |
1254 | u8 val; | |
1255 | ||
1256 | ret = kstrtou8(page, 0, &val); | |
1257 | if (ret < 0) | |
1258 | return ret; | |
1259 | ||
1260 | if (val > TARGET_QUEUE_SUBMIT) | |
1261 | return -EINVAL; | |
1262 | ||
1263 | da->submit_type = val; | |
1264 | return count; | |
1265 | } | |
1266 | ||
1267 | CONFIGFS_ATTR(, emulate_model_alias); | |
1268 | CONFIGFS_ATTR(, emulate_dpo); | |
1269 | CONFIGFS_ATTR(, emulate_fua_write); | |
1270 | CONFIGFS_ATTR(, emulate_fua_read); | |
1271 | CONFIGFS_ATTR(, emulate_write_cache); | |
1272 | CONFIGFS_ATTR(, emulate_ua_intlck_ctrl); | |
1273 | CONFIGFS_ATTR(, emulate_tas); | |
1274 | CONFIGFS_ATTR(, emulate_tpu); | |
1275 | CONFIGFS_ATTR(, emulate_tpws); | |
1276 | CONFIGFS_ATTR(, emulate_caw); | |
1277 | CONFIGFS_ATTR(, emulate_3pc); | |
1278 | CONFIGFS_ATTR(, emulate_pr); | |
1279 | CONFIGFS_ATTR(, emulate_rsoc); | |
1280 | CONFIGFS_ATTR(, pi_prot_type); | |
1281 | CONFIGFS_ATTR_RO(, hw_pi_prot_type); | |
1282 | CONFIGFS_ATTR(, pi_prot_format); | |
1283 | CONFIGFS_ATTR(, pi_prot_verify); | |
1284 | CONFIGFS_ATTR(, enforce_pr_isids); | |
1285 | CONFIGFS_ATTR(, is_nonrot); | |
1286 | CONFIGFS_ATTR(, emulate_rest_reord); | |
1287 | CONFIGFS_ATTR(, force_pr_aptpl); | |
1288 | CONFIGFS_ATTR_RO(, hw_block_size); | |
1289 | CONFIGFS_ATTR(, block_size); | |
1290 | CONFIGFS_ATTR_RO(, hw_max_sectors); | |
1291 | CONFIGFS_ATTR(, optimal_sectors); | |
1292 | CONFIGFS_ATTR_RO(, hw_queue_depth); | |
1293 | CONFIGFS_ATTR(, queue_depth); | |
1294 | CONFIGFS_ATTR(, max_unmap_lba_count); | |
1295 | CONFIGFS_ATTR(, max_unmap_block_desc_count); | |
1296 | CONFIGFS_ATTR(, unmap_granularity); | |
1297 | CONFIGFS_ATTR(, unmap_granularity_alignment); | |
1298 | CONFIGFS_ATTR(, unmap_zeroes_data); | |
1299 | CONFIGFS_ATTR(, max_write_same_len); | |
1300 | CONFIGFS_ATTR(, alua_support); | |
1301 | CONFIGFS_ATTR(, pgr_support); | |
1302 | CONFIGFS_ATTR(, submit_type); | |
1303 | ||
1304 | /* | |
1305 | * dev_attrib attributes for devices using the target core SBC/SPC | |
1306 | * interpreter. Any backend using spc_parse_cdb should be using | |
1307 | * these. | |
1308 | */ | |
1309 | struct configfs_attribute *sbc_attrib_attrs[] = { | |
1310 | &attr_emulate_model_alias, | |
1311 | &attr_emulate_dpo, | |
1312 | &attr_emulate_fua_write, | |
1313 | &attr_emulate_fua_read, | |
1314 | &attr_emulate_write_cache, | |
1315 | &attr_emulate_ua_intlck_ctrl, | |
1316 | &attr_emulate_tas, | |
1317 | &attr_emulate_tpu, | |
1318 | &attr_emulate_tpws, | |
1319 | &attr_emulate_caw, | |
1320 | &attr_emulate_3pc, | |
1321 | &attr_emulate_pr, | |
1322 | &attr_pi_prot_type, | |
1323 | &attr_hw_pi_prot_type, | |
1324 | &attr_pi_prot_format, | |
1325 | &attr_pi_prot_verify, | |
1326 | &attr_enforce_pr_isids, | |
1327 | &attr_is_nonrot, | |
1328 | &attr_emulate_rest_reord, | |
1329 | &attr_force_pr_aptpl, | |
1330 | &attr_hw_block_size, | |
1331 | &attr_block_size, | |
1332 | &attr_hw_max_sectors, | |
1333 | &attr_optimal_sectors, | |
1334 | &attr_hw_queue_depth, | |
1335 | &attr_queue_depth, | |
1336 | &attr_max_unmap_lba_count, | |
1337 | &attr_max_unmap_block_desc_count, | |
1338 | &attr_unmap_granularity, | |
1339 | &attr_unmap_granularity_alignment, | |
1340 | &attr_unmap_zeroes_data, | |
1341 | &attr_max_write_same_len, | |
1342 | &attr_alua_support, | |
1343 | &attr_pgr_support, | |
1344 | &attr_emulate_rsoc, | |
1345 | &attr_submit_type, | |
1346 | NULL, | |
1347 | }; | |
1348 | EXPORT_SYMBOL(sbc_attrib_attrs); | |
1349 | ||
1350 | /* | |
1351 | * Minimal dev_attrib attributes for devices passing through CDBs. | |
1352 | * In this case we only provide a few read-only attributes for | |
1353 | * backwards compatibility. | |
1354 | */ | |
1355 | struct configfs_attribute *passthrough_attrib_attrs[] = { | |
1356 | &attr_hw_pi_prot_type, | |
1357 | &attr_hw_block_size, | |
1358 | &attr_hw_max_sectors, | |
1359 | &attr_hw_queue_depth, | |
1360 | &attr_emulate_pr, | |
1361 | &attr_alua_support, | |
1362 | &attr_pgr_support, | |
1363 | &attr_submit_type, | |
1364 | NULL, | |
1365 | }; | |
1366 | EXPORT_SYMBOL(passthrough_attrib_attrs); | |
1367 | ||
1368 | /* | |
1369 | * pr related dev_attrib attributes for devices passing through CDBs, | |
1370 | * but allowing in core pr emulation. | |
1371 | */ | |
1372 | struct configfs_attribute *passthrough_pr_attrib_attrs[] = { | |
1373 | &attr_enforce_pr_isids, | |
1374 | &attr_force_pr_aptpl, | |
1375 | NULL, | |
1376 | }; | |
1377 | EXPORT_SYMBOL(passthrough_pr_attrib_attrs); | |
1378 | ||
1379 | TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); | |
1380 | TB_CIT_SETUP_DRV(dev_action, NULL, NULL); | |
1381 | ||
1382 | /* End functions for struct config_item_type tb_dev_attrib_cit */ | |
1383 | ||
1384 | /* Start functions for struct config_item_type tb_dev_wwn_cit */ | |
1385 | ||
1386 | static struct t10_wwn *to_t10_wwn(struct config_item *item) | |
1387 | { | |
1388 | return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group); | |
1389 | } | |
1390 | ||
1391 | static ssize_t target_check_inquiry_data(char *buf) | |
1392 | { | |
1393 | size_t len; | |
1394 | int i; | |
1395 | ||
1396 | len = strlen(buf); | |
1397 | ||
1398 | /* | |
1399 | * SPC 4.3.1: | |
1400 | * ASCII data fields shall contain only ASCII printable characters | |
1401 | * (i.e., code values 20h to 7Eh) and may be terminated with one or | |
1402 | * more ASCII null (00h) characters. | |
1403 | */ | |
1404 | for (i = 0; i < len; i++) { | |
1405 | if (buf[i] < 0x20 || buf[i] > 0x7E) { | |
1406 | pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n"); | |
1407 | return -EINVAL; | |
1408 | } | |
1409 | } | |
1410 | ||
1411 | return len; | |
1412 | } | |
1413 | ||
1414 | /* | |
1415 | * STANDARD and VPD page 0x83 T10 Vendor Identification | |
1416 | */ | |
1417 | static ssize_t target_wwn_vendor_id_show(struct config_item *item, | |
1418 | char *page) | |
1419 | { | |
1420 | return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]); | |
1421 | } | |
1422 | ||
1423 | static ssize_t target_wwn_vendor_id_store(struct config_item *item, | |
1424 | const char *page, size_t count) | |
1425 | { | |
1426 | struct t10_wwn *t10_wwn = to_t10_wwn(item); | |
1427 | struct se_device *dev = t10_wwn->t10_dev; | |
1428 | /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ | |
1429 | unsigned char buf[INQUIRY_VENDOR_LEN + 2]; | |
1430 | char *stripped = NULL; | |
1431 | ssize_t len; | |
1432 | ssize_t ret; | |
1433 | ||
1434 | len = strscpy(buf, page); | |
1435 | if (len > 0) { | |
1436 | /* Strip any newline added from userspace. */ | |
1437 | stripped = strstrip(buf); | |
1438 | len = strlen(stripped); | |
1439 | } | |
1440 | if (len < 0 || len > INQUIRY_VENDOR_LEN) { | |
1441 | pr_err("Emulated T10 Vendor Identification exceeds" | |
1442 | " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) | |
1443 | "\n"); | |
1444 | return -EOVERFLOW; | |
1445 | } | |
1446 | ||
1447 | ret = target_check_inquiry_data(stripped); | |
1448 | ||
1449 | if (ret < 0) | |
1450 | return ret; | |
1451 | ||
1452 | /* | |
1453 | * Check to see if any active exports exist. If they do exist, fail | |
1454 | * here as changing this information on the fly (underneath the | |
1455 | * initiator side OS dependent multipath code) could cause negative | |
1456 | * effects. | |
1457 | */ | |
1458 | if (dev->export_count) { | |
1459 | pr_err("Unable to set T10 Vendor Identification while" | |
1460 | " active %d exports exist\n", dev->export_count); | |
1461 | return -EINVAL; | |
1462 | } | |
1463 | ||
1464 | BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); | |
1465 | strscpy(dev->t10_wwn.vendor, stripped); | |
1466 | ||
1467 | pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" | |
1468 | " %s\n", dev->t10_wwn.vendor); | |
1469 | ||
1470 | return count; | |
1471 | } | |
1472 | ||
1473 | static ssize_t target_wwn_product_id_show(struct config_item *item, | |
1474 | char *page) | |
1475 | { | |
1476 | return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]); | |
1477 | } | |
1478 | ||
1479 | static ssize_t target_wwn_product_id_store(struct config_item *item, | |
1480 | const char *page, size_t count) | |
1481 | { | |
1482 | struct t10_wwn *t10_wwn = to_t10_wwn(item); | |
1483 | struct se_device *dev = t10_wwn->t10_dev; | |
1484 | /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ | |
1485 | unsigned char buf[INQUIRY_MODEL_LEN + 2]; | |
1486 | char *stripped = NULL; | |
1487 | ssize_t len; | |
1488 | ssize_t ret; | |
1489 | ||
1490 | len = strscpy(buf, page); | |
1491 | if (len > 0) { | |
1492 | /* Strip any newline added from userspace. */ | |
1493 | stripped = strstrip(buf); | |
1494 | len = strlen(stripped); | |
1495 | } | |
1496 | if (len < 0 || len > INQUIRY_MODEL_LEN) { | |
1497 | pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: " | |
1498 | __stringify(INQUIRY_MODEL_LEN) | |
1499 | "\n"); | |
1500 | return -EOVERFLOW; | |
1501 | } | |
1502 | ||
1503 | ret = target_check_inquiry_data(stripped); | |
1504 | ||
1505 | if (ret < 0) | |
1506 | return ret; | |
1507 | ||
1508 | /* | |
1509 | * Check to see if any active exports exist. If they do exist, fail | |
1510 | * here as changing this information on the fly (underneath the | |
1511 | * initiator side OS dependent multipath code) could cause negative | |
1512 | * effects. | |
1513 | */ | |
1514 | if (dev->export_count) { | |
1515 | pr_err("Unable to set T10 Model while active %d exports exist\n", | |
1516 | dev->export_count); | |
1517 | return -EINVAL; | |
1518 | } | |
1519 | ||
1520 | BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); | |
1521 | strscpy(dev->t10_wwn.model, stripped); | |
1522 | ||
1523 | pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", | |
1524 | dev->t10_wwn.model); | |
1525 | ||
1526 | return count; | |
1527 | } | |
1528 | ||
1529 | static ssize_t target_wwn_revision_show(struct config_item *item, | |
1530 | char *page) | |
1531 | { | |
1532 | return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]); | |
1533 | } | |
1534 | ||
1535 | static ssize_t target_wwn_revision_store(struct config_item *item, | |
1536 | const char *page, size_t count) | |
1537 | { | |
1538 | struct t10_wwn *t10_wwn = to_t10_wwn(item); | |
1539 | struct se_device *dev = t10_wwn->t10_dev; | |
1540 | /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ | |
1541 | unsigned char buf[INQUIRY_REVISION_LEN + 2]; | |
1542 | char *stripped = NULL; | |
1543 | ssize_t len; | |
1544 | ssize_t ret; | |
1545 | ||
1546 | len = strscpy(buf, page); | |
1547 | if (len > 0) { | |
1548 | /* Strip any newline added from userspace. */ | |
1549 | stripped = strstrip(buf); | |
1550 | len = strlen(stripped); | |
1551 | } | |
1552 | if (len < 0 || len > INQUIRY_REVISION_LEN) { | |
1553 | pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: " | |
1554 | __stringify(INQUIRY_REVISION_LEN) | |
1555 | "\n"); | |
1556 | return -EOVERFLOW; | |
1557 | } | |
1558 | ||
1559 | ret = target_check_inquiry_data(stripped); | |
1560 | ||
1561 | if (ret < 0) | |
1562 | return ret; | |
1563 | ||
1564 | /* | |
1565 | * Check to see if any active exports exist. If they do exist, fail | |
1566 | * here as changing this information on the fly (underneath the | |
1567 | * initiator side OS dependent multipath code) could cause negative | |
1568 | * effects. | |
1569 | */ | |
1570 | if (dev->export_count) { | |
1571 | pr_err("Unable to set T10 Revision while active %d exports exist\n", | |
1572 | dev->export_count); | |
1573 | return -EINVAL; | |
1574 | } | |
1575 | ||
1576 | BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); | |
1577 | strscpy(dev->t10_wwn.revision, stripped); | |
1578 | ||
1579 | pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", | |
1580 | dev->t10_wwn.revision); | |
1581 | ||
1582 | return count; | |
1583 | } | |
1584 | ||
1585 | static ssize_t | |
1586 | target_wwn_company_id_show(struct config_item *item, | |
1587 | char *page) | |
1588 | { | |
1589 | return snprintf(page, PAGE_SIZE, "%#08x\n", | |
1590 | to_t10_wwn(item)->company_id); | |
1591 | } | |
1592 | ||
1593 | static ssize_t | |
1594 | target_wwn_company_id_store(struct config_item *item, | |
1595 | const char *page, size_t count) | |
1596 | { | |
1597 | struct t10_wwn *t10_wwn = to_t10_wwn(item); | |
1598 | struct se_device *dev = t10_wwn->t10_dev; | |
1599 | u32 val; | |
1600 | int ret; | |
1601 | ||
1602 | /* | |
1603 | * The IEEE COMPANY_ID field should contain a 24-bit canonical | |
1604 | * form OUI assigned by the IEEE. | |
1605 | */ | |
1606 | ret = kstrtou32(page, 0, &val); | |
1607 | if (ret < 0) | |
1608 | return ret; | |
1609 | ||
1610 | if (val >= 0x1000000) | |
1611 | return -EOVERFLOW; | |
1612 | ||
1613 | /* | |
1614 | * Check to see if any active exports exist. If they do exist, fail | |
1615 | * here as changing this information on the fly (underneath the | |
1616 | * initiator side OS dependent multipath code) could cause negative | |
1617 | * effects. | |
1618 | */ | |
1619 | if (dev->export_count) { | |
1620 | pr_err("Unable to set Company ID while %u exports exist\n", | |
1621 | dev->export_count); | |
1622 | return -EINVAL; | |
1623 | } | |
1624 | ||
1625 | t10_wwn->company_id = val; | |
1626 | ||
1627 | pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n", | |
1628 | t10_wwn->company_id); | |
1629 | ||
1630 | return count; | |
1631 | } | |
1632 | ||
1633 | /* | |
1634 | * VPD page 0x80 Unit serial | |
1635 | */ | |
1636 | static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, | |
1637 | char *page) | |
1638 | { | |
1639 | return sprintf(page, "T10 VPD Unit Serial Number: %s\n", | |
1640 | &to_t10_wwn(item)->unit_serial[0]); | |
1641 | } | |
1642 | ||
1643 | static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, | |
1644 | const char *page, size_t count) | |
1645 | { | |
1646 | struct t10_wwn *t10_wwn = to_t10_wwn(item); | |
1647 | struct se_device *dev = t10_wwn->t10_dev; | |
1648 | unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { }; | |
1649 | ||
1650 | /* | |
1651 | * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial | |
1652 | * from the struct scsi_device level firmware, do not allow | |
1653 | * VPD Unit Serial to be emulated. | |
1654 | * | |
1655 | * Note this struct scsi_device could also be emulating VPD | |
1656 | * information from its drivers/scsi LLD. But for now we assume | |
1657 | * it is doing 'the right thing' wrt a world wide unique | |
1658 | * VPD Unit Serial Number that OS dependent multipath can depend on. | |
1659 | */ | |
1660 | if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) { | |
1661 | pr_err("Underlying SCSI device firmware provided VPD" | |
1662 | " Unit Serial, ignoring request\n"); | |
1663 | return -EOPNOTSUPP; | |
1664 | } | |
1665 | ||
1666 | if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { | |
1667 | pr_err("Emulated VPD Unit Serial exceeds" | |
1668 | " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); | |
1669 | return -EOVERFLOW; | |
1670 | } | |
1671 | /* | |
1672 | * Check to see if any active $FABRIC_MOD exports exist. If they | |
1673 | * do exist, fail here as changing this information on the fly | |
1674 | * (underneath the initiator side OS dependent multipath code) | |
1675 | * could cause negative effects. | |
1676 | */ | |
1677 | if (dev->export_count) { | |
1678 | pr_err("Unable to set VPD Unit Serial while" | |
1679 | " active %d $FABRIC_MOD exports exist\n", | |
1680 | dev->export_count); | |
1681 | return -EINVAL; | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * This currently assumes ASCII encoding for emulated VPD Unit Serial. | |
1686 | * | |
1687 | * Also, strip any newline added from the userspace | |
1688 | * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial | |
1689 | */ | |
1690 | snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); | |
1691 | snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, | |
1692 | "%s", strstrip(buf)); | |
1693 | dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL; | |
1694 | ||
1695 | pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" | |
1696 | " %s\n", dev->t10_wwn.unit_serial); | |
1697 | ||
1698 | return count; | |
1699 | } | |
1700 | ||
1701 | /* | |
1702 | * VPD page 0x83 Protocol Identifier | |
1703 | */ | |
1704 | static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item, | |
1705 | char *page) | |
1706 | { | |
1707 | struct t10_wwn *t10_wwn = to_t10_wwn(item); | |
1708 | struct t10_vpd *vpd; | |
1709 | unsigned char buf[VPD_TMP_BUF_SIZE] = { }; | |
1710 | ssize_t len = 0; | |
1711 | ||
1712 | spin_lock(&t10_wwn->t10_vpd_lock); | |
1713 | list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { | |
1714 | if (!vpd->protocol_identifier_set) | |
1715 | continue; | |
1716 | ||
1717 | transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); | |
1718 | ||
1719 | if (len + strlen(buf) >= PAGE_SIZE) | |
1720 | break; | |
1721 | ||
1722 | len += sprintf(page+len, "%s", buf); | |
1723 | } | |
1724 | spin_unlock(&t10_wwn->t10_vpd_lock); | |
1725 | ||
1726 | return len; | |
1727 | } | |
1728 | ||
1729 | /* | |
1730 | * Generic wrapper for dumping VPD identifiers by association. | |
1731 | */ | |
1732 | #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ | |
1733 | static ssize_t target_wwn_##_name##_show(struct config_item *item, \ | |
1734 | char *page) \ | |
1735 | { \ | |
1736 | struct t10_wwn *t10_wwn = to_t10_wwn(item); \ | |
1737 | struct t10_vpd *vpd; \ | |
1738 | unsigned char buf[VPD_TMP_BUF_SIZE]; \ | |
1739 | ssize_t len = 0; \ | |
1740 | \ | |
1741 | spin_lock(&t10_wwn->t10_vpd_lock); \ | |
1742 | list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ | |
1743 | if (vpd->association != _assoc) \ | |
1744 | continue; \ | |
1745 | \ | |
1746 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | |
1747 | transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ | |
1748 | if (len + strlen(buf) >= PAGE_SIZE) \ | |
1749 | break; \ | |
1750 | len += sprintf(page+len, "%s", buf); \ | |
1751 | \ | |
1752 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | |
1753 | transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ | |
1754 | if (len + strlen(buf) >= PAGE_SIZE) \ | |
1755 | break; \ | |
1756 | len += sprintf(page+len, "%s", buf); \ | |
1757 | \ | |
1758 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | |
1759 | transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ | |
1760 | if (len + strlen(buf) >= PAGE_SIZE) \ | |
1761 | break; \ | |
1762 | len += sprintf(page+len, "%s", buf); \ | |
1763 | } \ | |
1764 | spin_unlock(&t10_wwn->t10_vpd_lock); \ | |
1765 | \ | |
1766 | return len; \ | |
1767 | } | |
1768 | ||
1769 | /* VPD page 0x83 Association: Logical Unit */ | |
1770 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); | |
1771 | /* VPD page 0x83 Association: Target Port */ | |
1772 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); | |
1773 | /* VPD page 0x83 Association: SCSI Target Device */ | |
1774 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); | |
1775 | ||
1776 | CONFIGFS_ATTR(target_wwn_, vendor_id); | |
1777 | CONFIGFS_ATTR(target_wwn_, product_id); | |
1778 | CONFIGFS_ATTR(target_wwn_, revision); | |
1779 | CONFIGFS_ATTR(target_wwn_, company_id); | |
1780 | CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); | |
1781 | CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); | |
1782 | CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); | |
1783 | CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port); | |
1784 | CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); | |
1785 | ||
1786 | static struct configfs_attribute *target_core_dev_wwn_attrs[] = { | |
1787 | &target_wwn_attr_vendor_id, | |
1788 | &target_wwn_attr_product_id, | |
1789 | &target_wwn_attr_revision, | |
1790 | &target_wwn_attr_company_id, | |
1791 | &target_wwn_attr_vpd_unit_serial, | |
1792 | &target_wwn_attr_vpd_protocol_identifier, | |
1793 | &target_wwn_attr_vpd_assoc_logical_unit, | |
1794 | &target_wwn_attr_vpd_assoc_target_port, | |
1795 | &target_wwn_attr_vpd_assoc_scsi_target_device, | |
1796 | NULL, | |
1797 | }; | |
1798 | ||
1799 | TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs); | |
1800 | ||
1801 | /* End functions for struct config_item_type tb_dev_wwn_cit */ | |
1802 | ||
1803 | /* Start functions for struct config_item_type tb_dev_pr_cit */ | |
1804 | ||
1805 | static struct se_device *pr_to_dev(struct config_item *item) | |
1806 | { | |
1807 | return container_of(to_config_group(item), struct se_device, | |
1808 | dev_pr_group); | |
1809 | } | |
1810 | ||
1811 | static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, | |
1812 | char *page) | |
1813 | { | |
1814 | struct se_node_acl *se_nacl; | |
1815 | struct t10_pr_registration *pr_reg; | |
1816 | char i_buf[PR_REG_ISID_ID_LEN] = { }; | |
1817 | ||
1818 | pr_reg = dev->dev_pr_res_holder; | |
1819 | if (!pr_reg) | |
1820 | return sprintf(page, "No SPC-3 Reservation holder\n"); | |
1821 | ||
1822 | se_nacl = pr_reg->pr_reg_nacl; | |
1823 | core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); | |
1824 | ||
1825 | return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", | |
1826 | se_nacl->se_tpg->se_tpg_tfo->fabric_name, | |
1827 | se_nacl->initiatorname, i_buf); | |
1828 | } | |
1829 | ||
1830 | static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, | |
1831 | char *page) | |
1832 | { | |
1833 | struct se_session *sess = dev->reservation_holder; | |
1834 | struct se_node_acl *se_nacl; | |
1835 | ssize_t len; | |
1836 | ||
1837 | if (sess) { | |
1838 | se_nacl = sess->se_node_acl; | |
1839 | len = sprintf(page, | |
1840 | "SPC-2 Reservation: %s Initiator: %s\n", | |
1841 | se_nacl->se_tpg->se_tpg_tfo->fabric_name, | |
1842 | se_nacl->initiatorname); | |
1843 | } else { | |
1844 | len = sprintf(page, "No SPC-2 Reservation holder\n"); | |
1845 | } | |
1846 | return len; | |
1847 | } | |
1848 | ||
1849 | static ssize_t target_pr_res_holder_show(struct config_item *item, char *page) | |
1850 | { | |
1851 | struct se_device *dev = pr_to_dev(item); | |
1852 | int ret; | |
1853 | ||
1854 | if (!dev->dev_attrib.emulate_pr) | |
1855 | return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); | |
1856 | ||
1857 | if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) | |
1858 | return sprintf(page, "Passthrough\n"); | |
1859 | ||
1860 | spin_lock(&dev->dev_reservation_lock); | |
1861 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) | |
1862 | ret = target_core_dev_pr_show_spc2_res(dev, page); | |
1863 | else | |
1864 | ret = target_core_dev_pr_show_spc3_res(dev, page); | |
1865 | spin_unlock(&dev->dev_reservation_lock); | |
1866 | return ret; | |
1867 | } | |
1868 | ||
1869 | static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item, | |
1870 | char *page) | |
1871 | { | |
1872 | struct se_device *dev = pr_to_dev(item); | |
1873 | ssize_t len = 0; | |
1874 | ||
1875 | spin_lock(&dev->dev_reservation_lock); | |
1876 | if (!dev->dev_pr_res_holder) { | |
1877 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | |
1878 | } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) { | |
1879 | len = sprintf(page, "SPC-3 Reservation: All Target" | |
1880 | " Ports registration\n"); | |
1881 | } else { | |
1882 | len = sprintf(page, "SPC-3 Reservation: Single" | |
1883 | " Target Port registration\n"); | |
1884 | } | |
1885 | ||
1886 | spin_unlock(&dev->dev_reservation_lock); | |
1887 | return len; | |
1888 | } | |
1889 | ||
1890 | static ssize_t target_pr_res_pr_generation_show(struct config_item *item, | |
1891 | char *page) | |
1892 | { | |
1893 | return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation); | |
1894 | } | |
1895 | ||
1896 | ||
1897 | static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item, | |
1898 | char *page) | |
1899 | { | |
1900 | struct se_device *dev = pr_to_dev(item); | |
1901 | struct se_node_acl *se_nacl; | |
1902 | struct se_portal_group *se_tpg; | |
1903 | struct t10_pr_registration *pr_reg; | |
1904 | const struct target_core_fabric_ops *tfo; | |
1905 | ssize_t len = 0; | |
1906 | ||
1907 | spin_lock(&dev->dev_reservation_lock); | |
1908 | pr_reg = dev->dev_pr_res_holder; | |
1909 | if (!pr_reg) { | |
1910 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | |
1911 | goto out_unlock; | |
1912 | } | |
1913 | ||
1914 | se_nacl = pr_reg->pr_reg_nacl; | |
1915 | se_tpg = se_nacl->se_tpg; | |
1916 | tfo = se_tpg->se_tpg_tfo; | |
1917 | ||
1918 | len += sprintf(page+len, "SPC-3 Reservation: %s" | |
1919 | " Target Node Endpoint: %s\n", tfo->fabric_name, | |
1920 | tfo->tpg_get_wwn(se_tpg)); | |
1921 | len += sprintf(page+len, "SPC-3 Reservation: Relative Port" | |
1922 | " Identifier Tag: %hu %s Portal Group Tag: %hu" | |
1923 | " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, | |
1924 | tfo->fabric_name, tfo->tpg_get_tag(se_tpg), | |
1925 | tfo->fabric_name, pr_reg->pr_aptpl_target_lun); | |
1926 | ||
1927 | out_unlock: | |
1928 | spin_unlock(&dev->dev_reservation_lock); | |
1929 | return len; | |
1930 | } | |
1931 | ||
1932 | ||
1933 | static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item, | |
1934 | char *page) | |
1935 | { | |
1936 | struct se_device *dev = pr_to_dev(item); | |
1937 | const struct target_core_fabric_ops *tfo; | |
1938 | struct t10_pr_registration *pr_reg; | |
1939 | unsigned char buf[384]; | |
1940 | char i_buf[PR_REG_ISID_ID_LEN]; | |
1941 | ssize_t len = 0; | |
1942 | int reg_count = 0; | |
1943 | ||
1944 | len += sprintf(page+len, "SPC-3 PR Registrations:\n"); | |
1945 | ||
1946 | spin_lock(&dev->t10_pr.registration_lock); | |
1947 | list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, | |
1948 | pr_reg_list) { | |
1949 | ||
1950 | memset(buf, 0, 384); | |
1951 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | |
1952 | tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | |
1953 | core_pr_dump_initiator_port(pr_reg, i_buf, | |
1954 | PR_REG_ISID_ID_LEN); | |
1955 | sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", | |
1956 | tfo->fabric_name, | |
1957 | pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, | |
1958 | pr_reg->pr_res_generation); | |
1959 | ||
1960 | if (len + strlen(buf) >= PAGE_SIZE) | |
1961 | break; | |
1962 | ||
1963 | len += sprintf(page+len, "%s", buf); | |
1964 | reg_count++; | |
1965 | } | |
1966 | spin_unlock(&dev->t10_pr.registration_lock); | |
1967 | ||
1968 | if (!reg_count) | |
1969 | len += sprintf(page+len, "None\n"); | |
1970 | ||
1971 | return len; | |
1972 | } | |
1973 | ||
1974 | static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page) | |
1975 | { | |
1976 | struct se_device *dev = pr_to_dev(item); | |
1977 | struct t10_pr_registration *pr_reg; | |
1978 | ssize_t len = 0; | |
1979 | ||
1980 | spin_lock(&dev->dev_reservation_lock); | |
1981 | pr_reg = dev->dev_pr_res_holder; | |
1982 | if (pr_reg) { | |
1983 | len = sprintf(page, "SPC-3 Reservation Type: %s\n", | |
1984 | core_scsi3_pr_dump_type(pr_reg->pr_res_type)); | |
1985 | } else { | |
1986 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | |
1987 | } | |
1988 | ||
1989 | spin_unlock(&dev->dev_reservation_lock); | |
1990 | return len; | |
1991 | } | |
1992 | ||
1993 | static ssize_t target_pr_res_type_show(struct config_item *item, char *page) | |
1994 | { | |
1995 | struct se_device *dev = pr_to_dev(item); | |
1996 | ||
1997 | if (!dev->dev_attrib.emulate_pr) | |
1998 | return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); | |
1999 | if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) | |
2000 | return sprintf(page, "SPC_PASSTHROUGH\n"); | |
2001 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) | |
2002 | return sprintf(page, "SPC2_RESERVATIONS\n"); | |
2003 | ||
2004 | return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); | |
2005 | } | |
2006 | ||
2007 | static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, | |
2008 | char *page) | |
2009 | { | |
2010 | struct se_device *dev = pr_to_dev(item); | |
2011 | ||
2012 | if (!dev->dev_attrib.emulate_pr || | |
2013 | (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) | |
2014 | return 0; | |
2015 | ||
2016 | return sprintf(page, "APTPL Bit Status: %s\n", | |
2017 | (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); | |
2018 | } | |
2019 | ||
2020 | static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item, | |
2021 | char *page) | |
2022 | { | |
2023 | struct se_device *dev = pr_to_dev(item); | |
2024 | ||
2025 | if (!dev->dev_attrib.emulate_pr || | |
2026 | (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) | |
2027 | return 0; | |
2028 | ||
2029 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); | |
2030 | } | |
2031 | ||
2032 | enum { | |
2033 | Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, | |
2034 | Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, | |
2035 | Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, | |
2036 | Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err | |
2037 | }; | |
2038 | ||
2039 | static match_table_t tokens = { | |
2040 | {Opt_initiator_fabric, "initiator_fabric=%s"}, | |
2041 | {Opt_initiator_node, "initiator_node=%s"}, | |
2042 | {Opt_initiator_sid, "initiator_sid=%s"}, | |
2043 | {Opt_sa_res_key, "sa_res_key=%s"}, | |
2044 | {Opt_res_holder, "res_holder=%d"}, | |
2045 | {Opt_res_type, "res_type=%d"}, | |
2046 | {Opt_res_scope, "res_scope=%d"}, | |
2047 | {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, | |
2048 | {Opt_mapped_lun, "mapped_lun=%u"}, | |
2049 | {Opt_target_fabric, "target_fabric=%s"}, | |
2050 | {Opt_target_node, "target_node=%s"}, | |
2051 | {Opt_tpgt, "tpgt=%d"}, | |
2052 | {Opt_port_rtpi, "port_rtpi=%d"}, | |
2053 | {Opt_target_lun, "target_lun=%u"}, | |
2054 | {Opt_err, NULL} | |
2055 | }; | |
2056 | ||
2057 | static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item, | |
2058 | const char *page, size_t count) | |
2059 | { | |
2060 | struct se_device *dev = pr_to_dev(item); | |
2061 | unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; | |
2062 | unsigned char *t_fabric = NULL, *t_port = NULL; | |
2063 | char *orig, *ptr, *opts; | |
2064 | substring_t args[MAX_OPT_ARGS]; | |
2065 | unsigned long long tmp_ll; | |
2066 | u64 sa_res_key = 0; | |
2067 | u64 mapped_lun = 0, target_lun = 0; | |
2068 | int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; | |
2069 | u16 tpgt = 0; | |
2070 | u8 type = 0; | |
2071 | ||
2072 | if (!dev->dev_attrib.emulate_pr || | |
2073 | (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) | |
2074 | return count; | |
2075 | if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) | |
2076 | return count; | |
2077 | ||
2078 | if (dev->export_count) { | |
2079 | pr_debug("Unable to process APTPL metadata while" | |
2080 | " active fabric exports exist\n"); | |
2081 | return -EINVAL; | |
2082 | } | |
2083 | ||
2084 | opts = kstrdup(page, GFP_KERNEL); | |
2085 | if (!opts) | |
2086 | return -ENOMEM; | |
2087 | ||
2088 | orig = opts; | |
2089 | while ((ptr = strsep(&opts, ",\n")) != NULL) { | |
2090 | if (!*ptr) | |
2091 | continue; | |
2092 | ||
2093 | token = match_token(ptr, tokens, args); | |
2094 | switch (token) { | |
2095 | case Opt_initiator_fabric: | |
2096 | i_fabric = match_strdup(args); | |
2097 | if (!i_fabric) { | |
2098 | ret = -ENOMEM; | |
2099 | goto out; | |
2100 | } | |
2101 | break; | |
2102 | case Opt_initiator_node: | |
2103 | i_port = match_strdup(args); | |
2104 | if (!i_port) { | |
2105 | ret = -ENOMEM; | |
2106 | goto out; | |
2107 | } | |
2108 | if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { | |
2109 | pr_err("APTPL metadata initiator_node=" | |
2110 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", | |
2111 | PR_APTPL_MAX_IPORT_LEN); | |
2112 | ret = -EINVAL; | |
2113 | break; | |
2114 | } | |
2115 | break; | |
2116 | case Opt_initiator_sid: | |
2117 | isid = match_strdup(args); | |
2118 | if (!isid) { | |
2119 | ret = -ENOMEM; | |
2120 | goto out; | |
2121 | } | |
2122 | if (strlen(isid) >= PR_REG_ISID_LEN) { | |
2123 | pr_err("APTPL metadata initiator_isid" | |
2124 | "= exceeds PR_REG_ISID_LEN: %d\n", | |
2125 | PR_REG_ISID_LEN); | |
2126 | ret = -EINVAL; | |
2127 | break; | |
2128 | } | |
2129 | break; | |
2130 | case Opt_sa_res_key: | |
2131 | ret = match_u64(args, &tmp_ll); | |
2132 | if (ret < 0) { | |
2133 | pr_err("kstrtoull() failed for sa_res_key=\n"); | |
2134 | goto out; | |
2135 | } | |
2136 | sa_res_key = (u64)tmp_ll; | |
2137 | break; | |
2138 | /* | |
2139 | * PR APTPL Metadata for Reservation | |
2140 | */ | |
2141 | case Opt_res_holder: | |
2142 | ret = match_int(args, &arg); | |
2143 | if (ret) | |
2144 | goto out; | |
2145 | res_holder = arg; | |
2146 | break; | |
2147 | case Opt_res_type: | |
2148 | ret = match_int(args, &arg); | |
2149 | if (ret) | |
2150 | goto out; | |
2151 | type = (u8)arg; | |
2152 | break; | |
2153 | case Opt_res_scope: | |
2154 | ret = match_int(args, &arg); | |
2155 | if (ret) | |
2156 | goto out; | |
2157 | break; | |
2158 | case Opt_res_all_tg_pt: | |
2159 | ret = match_int(args, &arg); | |
2160 | if (ret) | |
2161 | goto out; | |
2162 | all_tg_pt = (int)arg; | |
2163 | break; | |
2164 | case Opt_mapped_lun: | |
2165 | ret = match_u64(args, &tmp_ll); | |
2166 | if (ret) | |
2167 | goto out; | |
2168 | mapped_lun = (u64)tmp_ll; | |
2169 | break; | |
2170 | /* | |
2171 | * PR APTPL Metadata for Target Port | |
2172 | */ | |
2173 | case Opt_target_fabric: | |
2174 | t_fabric = match_strdup(args); | |
2175 | if (!t_fabric) { | |
2176 | ret = -ENOMEM; | |
2177 | goto out; | |
2178 | } | |
2179 | break; | |
2180 | case Opt_target_node: | |
2181 | t_port = match_strdup(args); | |
2182 | if (!t_port) { | |
2183 | ret = -ENOMEM; | |
2184 | goto out; | |
2185 | } | |
2186 | if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { | |
2187 | pr_err("APTPL metadata target_node=" | |
2188 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", | |
2189 | PR_APTPL_MAX_TPORT_LEN); | |
2190 | ret = -EINVAL; | |
2191 | break; | |
2192 | } | |
2193 | break; | |
2194 | case Opt_tpgt: | |
2195 | ret = match_int(args, &arg); | |
2196 | if (ret) | |
2197 | goto out; | |
2198 | tpgt = (u16)arg; | |
2199 | break; | |
2200 | case Opt_port_rtpi: | |
2201 | ret = match_int(args, &arg); | |
2202 | if (ret) | |
2203 | goto out; | |
2204 | break; | |
2205 | case Opt_target_lun: | |
2206 | ret = match_u64(args, &tmp_ll); | |
2207 | if (ret) | |
2208 | goto out; | |
2209 | target_lun = (u64)tmp_ll; | |
2210 | break; | |
2211 | default: | |
2212 | break; | |
2213 | } | |
2214 | } | |
2215 | ||
2216 | if (!i_port || !t_port || !sa_res_key) { | |
2217 | pr_err("Illegal parameters for APTPL registration\n"); | |
2218 | ret = -EINVAL; | |
2219 | goto out; | |
2220 | } | |
2221 | ||
2222 | if (res_holder && !(type)) { | |
2223 | pr_err("Illegal PR type: 0x%02x for reservation" | |
2224 | " holder\n", type); | |
2225 | ret = -EINVAL; | |
2226 | goto out; | |
2227 | } | |
2228 | ||
2229 | ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key, | |
2230 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, | |
2231 | res_holder, all_tg_pt, type); | |
2232 | out: | |
2233 | kfree(i_fabric); | |
2234 | kfree(i_port); | |
2235 | kfree(isid); | |
2236 | kfree(t_fabric); | |
2237 | kfree(t_port); | |
2238 | kfree(orig); | |
2239 | return (ret == 0) ? count : ret; | |
2240 | } | |
2241 | ||
2242 | ||
2243 | CONFIGFS_ATTR_RO(target_pr_, res_holder); | |
2244 | CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts); | |
2245 | CONFIGFS_ATTR_RO(target_pr_, res_pr_generation); | |
2246 | CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port); | |
2247 | CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts); | |
2248 | CONFIGFS_ATTR_RO(target_pr_, res_pr_type); | |
2249 | CONFIGFS_ATTR_RO(target_pr_, res_type); | |
2250 | CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active); | |
2251 | CONFIGFS_ATTR(target_pr_, res_aptpl_metadata); | |
2252 | ||
2253 | static struct configfs_attribute *target_core_dev_pr_attrs[] = { | |
2254 | &target_pr_attr_res_holder, | |
2255 | &target_pr_attr_res_pr_all_tgt_pts, | |
2256 | &target_pr_attr_res_pr_generation, | |
2257 | &target_pr_attr_res_pr_holder_tg_port, | |
2258 | &target_pr_attr_res_pr_registered_i_pts, | |
2259 | &target_pr_attr_res_pr_type, | |
2260 | &target_pr_attr_res_type, | |
2261 | &target_pr_attr_res_aptpl_active, | |
2262 | &target_pr_attr_res_aptpl_metadata, | |
2263 | NULL, | |
2264 | }; | |
2265 | ||
2266 | TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs); | |
2267 | ||
2268 | /* End functions for struct config_item_type tb_dev_pr_cit */ | |
2269 | ||
2270 | /* Start functions for struct config_item_type tb_dev_cit */ | |
2271 | ||
2272 | static inline struct se_device *to_device(struct config_item *item) | |
2273 | { | |
2274 | return container_of(to_config_group(item), struct se_device, dev_group); | |
2275 | } | |
2276 | ||
2277 | static ssize_t target_dev_info_show(struct config_item *item, char *page) | |
2278 | { | |
2279 | struct se_device *dev = to_device(item); | |
2280 | int bl = 0; | |
2281 | ssize_t read_bytes = 0; | |
2282 | ||
2283 | transport_dump_dev_state(dev, page, &bl); | |
2284 | read_bytes += bl; | |
2285 | read_bytes += dev->transport->show_configfs_dev_params(dev, | |
2286 | page+read_bytes); | |
2287 | return read_bytes; | |
2288 | } | |
2289 | ||
2290 | static ssize_t target_dev_control_store(struct config_item *item, | |
2291 | const char *page, size_t count) | |
2292 | { | |
2293 | struct se_device *dev = to_device(item); | |
2294 | ||
2295 | return dev->transport->set_configfs_dev_params(dev, page, count); | |
2296 | } | |
2297 | ||
2298 | static ssize_t target_dev_alias_show(struct config_item *item, char *page) | |
2299 | { | |
2300 | struct se_device *dev = to_device(item); | |
2301 | ||
2302 | if (!(dev->dev_flags & DF_USING_ALIAS)) | |
2303 | return 0; | |
2304 | ||
2305 | return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); | |
2306 | } | |
2307 | ||
2308 | static ssize_t target_dev_alias_store(struct config_item *item, | |
2309 | const char *page, size_t count) | |
2310 | { | |
2311 | struct se_device *dev = to_device(item); | |
2312 | struct se_hba *hba = dev->se_hba; | |
2313 | ssize_t read_bytes; | |
2314 | ||
2315 | if (count > (SE_DEV_ALIAS_LEN-1)) { | |
2316 | pr_err("alias count: %d exceeds" | |
2317 | " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, | |
2318 | SE_DEV_ALIAS_LEN-1); | |
2319 | return -EINVAL; | |
2320 | } | |
2321 | ||
2322 | read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); | |
2323 | if (!read_bytes) | |
2324 | return -EINVAL; | |
2325 | if (dev->dev_alias[read_bytes - 1] == '\n') | |
2326 | dev->dev_alias[read_bytes - 1] = '\0'; | |
2327 | ||
2328 | dev->dev_flags |= DF_USING_ALIAS; | |
2329 | ||
2330 | pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", | |
2331 | config_item_name(&hba->hba_group.cg_item), | |
2332 | config_item_name(&dev->dev_group.cg_item), | |
2333 | dev->dev_alias); | |
2334 | ||
2335 | return read_bytes; | |
2336 | } | |
2337 | ||
2338 | static ssize_t target_dev_udev_path_show(struct config_item *item, char *page) | |
2339 | { | |
2340 | struct se_device *dev = to_device(item); | |
2341 | ||
2342 | if (!(dev->dev_flags & DF_USING_UDEV_PATH)) | |
2343 | return 0; | |
2344 | ||
2345 | return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); | |
2346 | } | |
2347 | ||
2348 | static ssize_t target_dev_udev_path_store(struct config_item *item, | |
2349 | const char *page, size_t count) | |
2350 | { | |
2351 | struct se_device *dev = to_device(item); | |
2352 | struct se_hba *hba = dev->se_hba; | |
2353 | ssize_t read_bytes; | |
2354 | ||
2355 | if (count > (SE_UDEV_PATH_LEN-1)) { | |
2356 | pr_err("udev_path count: %d exceeds" | |
2357 | " SE_UDEV_PATH_LEN-1: %u\n", (int)count, | |
2358 | SE_UDEV_PATH_LEN-1); | |
2359 | return -EINVAL; | |
2360 | } | |
2361 | ||
2362 | read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN, | |
2363 | "%s", page); | |
2364 | if (!read_bytes) | |
2365 | return -EINVAL; | |
2366 | if (dev->udev_path[read_bytes - 1] == '\n') | |
2367 | dev->udev_path[read_bytes - 1] = '\0'; | |
2368 | ||
2369 | dev->dev_flags |= DF_USING_UDEV_PATH; | |
2370 | ||
2371 | pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", | |
2372 | config_item_name(&hba->hba_group.cg_item), | |
2373 | config_item_name(&dev->dev_group.cg_item), | |
2374 | dev->udev_path); | |
2375 | ||
2376 | return read_bytes; | |
2377 | } | |
2378 | ||
2379 | static ssize_t target_dev_enable_show(struct config_item *item, char *page) | |
2380 | { | |
2381 | struct se_device *dev = to_device(item); | |
2382 | ||
2383 | return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev)); | |
2384 | } | |
2385 | ||
2386 | static ssize_t target_dev_enable_store(struct config_item *item, | |
2387 | const char *page, size_t count) | |
2388 | { | |
2389 | struct se_device *dev = to_device(item); | |
2390 | char *ptr; | |
2391 | int ret; | |
2392 | ||
2393 | ptr = strstr(page, "1"); | |
2394 | if (!ptr) { | |
2395 | pr_err("For dev_enable ops, only valid value" | |
2396 | " is \"1\"\n"); | |
2397 | return -EINVAL; | |
2398 | } | |
2399 | ||
2400 | ret = target_configure_device(dev); | |
2401 | if (ret) | |
2402 | return ret; | |
2403 | return count; | |
2404 | } | |
2405 | ||
2406 | static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page) | |
2407 | { | |
2408 | struct se_device *dev = to_device(item); | |
2409 | struct config_item *lu_ci; | |
2410 | struct t10_alua_lu_gp *lu_gp; | |
2411 | struct t10_alua_lu_gp_member *lu_gp_mem; | |
2412 | ssize_t len = 0; | |
2413 | ||
2414 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | |
2415 | if (!lu_gp_mem) | |
2416 | return 0; | |
2417 | ||
2418 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | |
2419 | lu_gp = lu_gp_mem->lu_gp; | |
2420 | if (lu_gp) { | |
2421 | lu_ci = &lu_gp->lu_gp_group.cg_item; | |
2422 | len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", | |
2423 | config_item_name(lu_ci), lu_gp->lu_gp_id); | |
2424 | } | |
2425 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | |
2426 | ||
2427 | return len; | |
2428 | } | |
2429 | ||
2430 | static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, | |
2431 | const char *page, size_t count) | |
2432 | { | |
2433 | struct se_device *dev = to_device(item); | |
2434 | struct se_hba *hba = dev->se_hba; | |
2435 | struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; | |
2436 | struct t10_alua_lu_gp_member *lu_gp_mem; | |
2437 | unsigned char buf[LU_GROUP_NAME_BUF] = { }; | |
2438 | int move = 0; | |
2439 | ||
2440 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | |
2441 | if (!lu_gp_mem) | |
2442 | return count; | |
2443 | ||
2444 | if (count > LU_GROUP_NAME_BUF) { | |
2445 | pr_err("ALUA LU Group Alias too large!\n"); | |
2446 | return -EINVAL; | |
2447 | } | |
2448 | memcpy(buf, page, count); | |
2449 | /* | |
2450 | * Any ALUA logical unit alias besides "NULL" means we will be | |
2451 | * making a new group association. | |
2452 | */ | |
2453 | if (strcmp(strstrip(buf), "NULL")) { | |
2454 | /* | |
2455 | * core_alua_get_lu_gp_by_name() will increment reference to | |
2456 | * struct t10_alua_lu_gp. This reference is released with | |
2457 | * core_alua_get_lu_gp_by_name below(). | |
2458 | */ | |
2459 | lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); | |
2460 | if (!lu_gp_new) | |
2461 | return -ENODEV; | |
2462 | } | |
2463 | ||
2464 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | |
2465 | lu_gp = lu_gp_mem->lu_gp; | |
2466 | if (lu_gp) { | |
2467 | /* | |
2468 | * Clearing an existing lu_gp association, and replacing | |
2469 | * with NULL | |
2470 | */ | |
2471 | if (!lu_gp_new) { | |
2472 | pr_debug("Target_Core_ConfigFS: Releasing %s/%s" | |
2473 | " from ALUA LU Group: core/alua/lu_gps/%s, ID:" | |
2474 | " %hu\n", | |
2475 | config_item_name(&hba->hba_group.cg_item), | |
2476 | config_item_name(&dev->dev_group.cg_item), | |
2477 | config_item_name(&lu_gp->lu_gp_group.cg_item), | |
2478 | lu_gp->lu_gp_id); | |
2479 | ||
2480 | __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); | |
2481 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | |
2482 | ||
2483 | return count; | |
2484 | } | |
2485 | /* | |
2486 | * Removing existing association of lu_gp_mem with lu_gp | |
2487 | */ | |
2488 | __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); | |
2489 | move = 1; | |
2490 | } | |
2491 | /* | |
2492 | * Associate lu_gp_mem with lu_gp_new. | |
2493 | */ | |
2494 | __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); | |
2495 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | |
2496 | ||
2497 | pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" | |
2498 | " core/alua/lu_gps/%s, ID: %hu\n", | |
2499 | (move) ? "Moving" : "Adding", | |
2500 | config_item_name(&hba->hba_group.cg_item), | |
2501 | config_item_name(&dev->dev_group.cg_item), | |
2502 | config_item_name(&lu_gp_new->lu_gp_group.cg_item), | |
2503 | lu_gp_new->lu_gp_id); | |
2504 | ||
2505 | core_alua_put_lu_gp_from_name(lu_gp_new); | |
2506 | return count; | |
2507 | } | |
2508 | ||
2509 | static ssize_t target_dev_lba_map_show(struct config_item *item, char *page) | |
2510 | { | |
2511 | struct se_device *dev = to_device(item); | |
2512 | struct t10_alua_lba_map *map; | |
2513 | struct t10_alua_lba_map_member *mem; | |
2514 | char *b = page; | |
2515 | int bl = 0; | |
2516 | char state; | |
2517 | ||
2518 | spin_lock(&dev->t10_alua.lba_map_lock); | |
2519 | if (!list_empty(&dev->t10_alua.lba_map_list)) | |
2520 | bl += sprintf(b + bl, "%u %u\n", | |
2521 | dev->t10_alua.lba_map_segment_size, | |
2522 | dev->t10_alua.lba_map_segment_multiplier); | |
2523 | list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { | |
2524 | bl += sprintf(b + bl, "%llu %llu", | |
2525 | map->lba_map_first_lba, map->lba_map_last_lba); | |
2526 | list_for_each_entry(mem, &map->lba_map_mem_list, | |
2527 | lba_map_mem_list) { | |
2528 | switch (mem->lba_map_mem_alua_state) { | |
2529 | case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: | |
2530 | state = 'O'; | |
2531 | break; | |
2532 | case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: | |
2533 | state = 'A'; | |
2534 | break; | |
2535 | case ALUA_ACCESS_STATE_STANDBY: | |
2536 | state = 'S'; | |
2537 | break; | |
2538 | case ALUA_ACCESS_STATE_UNAVAILABLE: | |
2539 | state = 'U'; | |
2540 | break; | |
2541 | default: | |
2542 | state = '.'; | |
2543 | break; | |
2544 | } | |
2545 | bl += sprintf(b + bl, " %d:%c", | |
2546 | mem->lba_map_mem_alua_pg_id, state); | |
2547 | } | |
2548 | bl += sprintf(b + bl, "\n"); | |
2549 | } | |
2550 | spin_unlock(&dev->t10_alua.lba_map_lock); | |
2551 | return bl; | |
2552 | } | |
2553 | ||
2554 | static ssize_t target_dev_lba_map_store(struct config_item *item, | |
2555 | const char *page, size_t count) | |
2556 | { | |
2557 | struct se_device *dev = to_device(item); | |
2558 | struct t10_alua_lba_map *lba_map = NULL; | |
2559 | struct list_head lba_list; | |
2560 | char *map_entries, *orig, *ptr; | |
2561 | char state; | |
2562 | int pg_num = -1, pg; | |
2563 | int ret = 0, num = 0, pg_id, alua_state; | |
2564 | unsigned long start_lba = -1, end_lba = -1; | |
2565 | unsigned long segment_size = -1, segment_mult = -1; | |
2566 | ||
2567 | orig = map_entries = kstrdup(page, GFP_KERNEL); | |
2568 | if (!map_entries) | |
2569 | return -ENOMEM; | |
2570 | ||
2571 | INIT_LIST_HEAD(&lba_list); | |
2572 | while ((ptr = strsep(&map_entries, "\n")) != NULL) { | |
2573 | if (!*ptr) | |
2574 | continue; | |
2575 | ||
2576 | if (num == 0) { | |
2577 | if (sscanf(ptr, "%lu %lu\n", | |
2578 | &segment_size, &segment_mult) != 2) { | |
2579 | pr_err("Invalid line %d\n", num); | |
2580 | ret = -EINVAL; | |
2581 | break; | |
2582 | } | |
2583 | num++; | |
2584 | continue; | |
2585 | } | |
2586 | if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { | |
2587 | pr_err("Invalid line %d\n", num); | |
2588 | ret = -EINVAL; | |
2589 | break; | |
2590 | } | |
2591 | ptr = strchr(ptr, ' '); | |
2592 | if (!ptr) { | |
2593 | pr_err("Invalid line %d, missing end lba\n", num); | |
2594 | ret = -EINVAL; | |
2595 | break; | |
2596 | } | |
2597 | ptr++; | |
2598 | ptr = strchr(ptr, ' '); | |
2599 | if (!ptr) { | |
2600 | pr_err("Invalid line %d, missing state definitions\n", | |
2601 | num); | |
2602 | ret = -EINVAL; | |
2603 | break; | |
2604 | } | |
2605 | ptr++; | |
2606 | lba_map = core_alua_allocate_lba_map(&lba_list, | |
2607 | start_lba, end_lba); | |
2608 | if (IS_ERR(lba_map)) { | |
2609 | ret = PTR_ERR(lba_map); | |
2610 | break; | |
2611 | } | |
2612 | pg = 0; | |
2613 | while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { | |
2614 | switch (state) { | |
2615 | case 'O': | |
2616 | alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; | |
2617 | break; | |
2618 | case 'A': | |
2619 | alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; | |
2620 | break; | |
2621 | case 'S': | |
2622 | alua_state = ALUA_ACCESS_STATE_STANDBY; | |
2623 | break; | |
2624 | case 'U': | |
2625 | alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; | |
2626 | break; | |
2627 | default: | |
2628 | pr_err("Invalid ALUA state '%c'\n", state); | |
2629 | ret = -EINVAL; | |
2630 | goto out; | |
2631 | } | |
2632 | ||
2633 | ret = core_alua_allocate_lba_map_mem(lba_map, | |
2634 | pg_id, alua_state); | |
2635 | if (ret) { | |
2636 | pr_err("Invalid target descriptor %d:%c " | |
2637 | "at line %d\n", | |
2638 | pg_id, state, num); | |
2639 | break; | |
2640 | } | |
2641 | pg++; | |
2642 | ptr = strchr(ptr, ' '); | |
2643 | if (ptr) | |
2644 | ptr++; | |
2645 | else | |
2646 | break; | |
2647 | } | |
2648 | if (pg_num == -1) | |
2649 | pg_num = pg; | |
2650 | else if (pg != pg_num) { | |
2651 | pr_err("Only %d from %d port groups definitions " | |
2652 | "at line %d\n", pg, pg_num, num); | |
2653 | ret = -EINVAL; | |
2654 | break; | |
2655 | } | |
2656 | num++; | |
2657 | } | |
2658 | out: | |
2659 | if (ret) { | |
2660 | core_alua_free_lba_map(&lba_list); | |
2661 | count = ret; | |
2662 | } else | |
2663 | core_alua_set_lba_map(dev, &lba_list, | |
2664 | segment_size, segment_mult); | |
2665 | kfree(orig); | |
2666 | return count; | |
2667 | } | |
2668 | ||
2669 | CONFIGFS_ATTR_RO(target_dev_, info); | |
2670 | CONFIGFS_ATTR_WO(target_dev_, control); | |
2671 | CONFIGFS_ATTR(target_dev_, alias); | |
2672 | CONFIGFS_ATTR(target_dev_, udev_path); | |
2673 | CONFIGFS_ATTR(target_dev_, enable); | |
2674 | CONFIGFS_ATTR(target_dev_, alua_lu_gp); | |
2675 | CONFIGFS_ATTR(target_dev_, lba_map); | |
2676 | ||
2677 | static struct configfs_attribute *target_core_dev_attrs[] = { | |
2678 | &target_dev_attr_info, | |
2679 | &target_dev_attr_control, | |
2680 | &target_dev_attr_alias, | |
2681 | &target_dev_attr_udev_path, | |
2682 | &target_dev_attr_enable, | |
2683 | &target_dev_attr_alua_lu_gp, | |
2684 | &target_dev_attr_lba_map, | |
2685 | NULL, | |
2686 | }; | |
2687 | ||
2688 | static void target_core_dev_release(struct config_item *item) | |
2689 | { | |
2690 | struct config_group *dev_cg = to_config_group(item); | |
2691 | struct se_device *dev = | |
2692 | container_of(dev_cg, struct se_device, dev_group); | |
2693 | ||
2694 | target_free_device(dev); | |
2695 | } | |
2696 | ||
2697 | /* | |
2698 | * Used in target_core_fabric_configfs.c to verify valid se_device symlink | |
2699 | * within target_fabric_port_link() | |
2700 | */ | |
2701 | struct configfs_item_operations target_core_dev_item_ops = { | |
2702 | .release = target_core_dev_release, | |
2703 | }; | |
2704 | ||
2705 | TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); | |
2706 | ||
2707 | /* End functions for struct config_item_type tb_dev_cit */ | |
2708 | ||
2709 | /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ | |
2710 | ||
2711 | static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item) | |
2712 | { | |
2713 | return container_of(to_config_group(item), struct t10_alua_lu_gp, | |
2714 | lu_gp_group); | |
2715 | } | |
2716 | ||
2717 | static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page) | |
2718 | { | |
2719 | struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); | |
2720 | ||
2721 | if (!lu_gp->lu_gp_valid_id) | |
2722 | return 0; | |
2723 | return sprintf(page, "%hu\n", lu_gp->lu_gp_id); | |
2724 | } | |
2725 | ||
2726 | static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item, | |
2727 | const char *page, size_t count) | |
2728 | { | |
2729 | struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); | |
2730 | struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; | |
2731 | unsigned long lu_gp_id; | |
2732 | int ret; | |
2733 | ||
2734 | ret = kstrtoul(page, 0, &lu_gp_id); | |
2735 | if (ret < 0) { | |
2736 | pr_err("kstrtoul() returned %d for" | |
2737 | " lu_gp_id\n", ret); | |
2738 | return ret; | |
2739 | } | |
2740 | if (lu_gp_id > 0x0000ffff) { | |
2741 | pr_err("ALUA lu_gp_id: %lu exceeds maximum:" | |
2742 | " 0x0000ffff\n", lu_gp_id); | |
2743 | return -EINVAL; | |
2744 | } | |
2745 | ||
2746 | ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); | |
2747 | if (ret < 0) | |
2748 | return -EINVAL; | |
2749 | ||
2750 | pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" | |
2751 | " Group: core/alua/lu_gps/%s to ID: %hu\n", | |
2752 | config_item_name(&alua_lu_gp_cg->cg_item), | |
2753 | lu_gp->lu_gp_id); | |
2754 | ||
2755 | return count; | |
2756 | } | |
2757 | ||
2758 | static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) | |
2759 | { | |
2760 | struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); | |
2761 | struct se_device *dev; | |
2762 | struct se_hba *hba; | |
2763 | struct t10_alua_lu_gp_member *lu_gp_mem; | |
2764 | ssize_t len = 0, cur_len; | |
2765 | unsigned char buf[LU_GROUP_NAME_BUF] = { }; | |
2766 | ||
2767 | spin_lock(&lu_gp->lu_gp_lock); | |
2768 | list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { | |
2769 | dev = lu_gp_mem->lu_gp_mem_dev; | |
2770 | hba = dev->se_hba; | |
2771 | ||
2772 | cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", | |
2773 | config_item_name(&hba->hba_group.cg_item), | |
2774 | config_item_name(&dev->dev_group.cg_item)); | |
2775 | cur_len++; /* Extra byte for NULL terminator */ | |
2776 | ||
2777 | if ((cur_len + len) > PAGE_SIZE) { | |
2778 | pr_warn("Ran out of lu_gp_show_attr" | |
2779 | "_members buffer\n"); | |
2780 | break; | |
2781 | } | |
2782 | memcpy(page+len, buf, cur_len); | |
2783 | len += cur_len; | |
2784 | } | |
2785 | spin_unlock(&lu_gp->lu_gp_lock); | |
2786 | ||
2787 | return len; | |
2788 | } | |
2789 | ||
2790 | CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); | |
2791 | CONFIGFS_ATTR_RO(target_lu_gp_, members); | |
2792 | ||
2793 | static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { | |
2794 | &target_lu_gp_attr_lu_gp_id, | |
2795 | &target_lu_gp_attr_members, | |
2796 | NULL, | |
2797 | }; | |
2798 | ||
2799 | static void target_core_alua_lu_gp_release(struct config_item *item) | |
2800 | { | |
2801 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | |
2802 | struct t10_alua_lu_gp, lu_gp_group); | |
2803 | ||
2804 | core_alua_free_lu_gp(lu_gp); | |
2805 | } | |
2806 | ||
2807 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { | |
2808 | .release = target_core_alua_lu_gp_release, | |
2809 | }; | |
2810 | ||
2811 | static const struct config_item_type target_core_alua_lu_gp_cit = { | |
2812 | .ct_item_ops = &target_core_alua_lu_gp_ops, | |
2813 | .ct_attrs = target_core_alua_lu_gp_attrs, | |
2814 | .ct_owner = THIS_MODULE, | |
2815 | }; | |
2816 | ||
2817 | /* End functions for struct config_item_type target_core_alua_lu_gp_cit */ | |
2818 | ||
2819 | /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ | |
2820 | ||
2821 | static struct config_group *target_core_alua_create_lu_gp( | |
2822 | struct config_group *group, | |
2823 | const char *name) | |
2824 | { | |
2825 | struct t10_alua_lu_gp *lu_gp; | |
2826 | struct config_group *alua_lu_gp_cg = NULL; | |
2827 | struct config_item *alua_lu_gp_ci = NULL; | |
2828 | ||
2829 | lu_gp = core_alua_allocate_lu_gp(name, 0); | |
2830 | if (IS_ERR(lu_gp)) | |
2831 | return NULL; | |
2832 | ||
2833 | alua_lu_gp_cg = &lu_gp->lu_gp_group; | |
2834 | alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; | |
2835 | ||
2836 | config_group_init_type_name(alua_lu_gp_cg, name, | |
2837 | &target_core_alua_lu_gp_cit); | |
2838 | ||
2839 | pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" | |
2840 | " Group: core/alua/lu_gps/%s\n", | |
2841 | config_item_name(alua_lu_gp_ci)); | |
2842 | ||
2843 | return alua_lu_gp_cg; | |
2844 | ||
2845 | } | |
2846 | ||
2847 | static void target_core_alua_drop_lu_gp( | |
2848 | struct config_group *group, | |
2849 | struct config_item *item) | |
2850 | { | |
2851 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | |
2852 | struct t10_alua_lu_gp, lu_gp_group); | |
2853 | ||
2854 | pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" | |
2855 | " Group: core/alua/lu_gps/%s, ID: %hu\n", | |
2856 | config_item_name(item), lu_gp->lu_gp_id); | |
2857 | /* | |
2858 | * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() | |
2859 | * -> target_core_alua_lu_gp_release() | |
2860 | */ | |
2861 | config_item_put(item); | |
2862 | } | |
2863 | ||
2864 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { | |
2865 | .make_group = &target_core_alua_create_lu_gp, | |
2866 | .drop_item = &target_core_alua_drop_lu_gp, | |
2867 | }; | |
2868 | ||
2869 | static const struct config_item_type target_core_alua_lu_gps_cit = { | |
2870 | .ct_item_ops = NULL, | |
2871 | .ct_group_ops = &target_core_alua_lu_gps_group_ops, | |
2872 | .ct_owner = THIS_MODULE, | |
2873 | }; | |
2874 | ||
2875 | /* End functions for struct config_item_type target_core_alua_lu_gps_cit */ | |
2876 | ||
2877 | /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ | |
2878 | ||
2879 | static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item) | |
2880 | { | |
2881 | return container_of(to_config_group(item), struct t10_alua_tg_pt_gp, | |
2882 | tg_pt_gp_group); | |
2883 | } | |
2884 | ||
2885 | static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, | |
2886 | char *page) | |
2887 | { | |
2888 | return sprintf(page, "%d\n", | |
2889 | to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); | |
2890 | } | |
2891 | ||
2892 | static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, | |
2893 | const char *page, size_t count) | |
2894 | { | |
2895 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
2896 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; | |
2897 | unsigned long tmp; | |
2898 | int new_state, ret; | |
2899 | ||
2900 | if (!tg_pt_gp->tg_pt_gp_valid_id) { | |
2901 | pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n"); | |
2902 | return -EINVAL; | |
2903 | } | |
2904 | if (!target_dev_configured(dev)) { | |
2905 | pr_err("Unable to set alua_access_state while device is" | |
2906 | " not configured\n"); | |
2907 | return -ENODEV; | |
2908 | } | |
2909 | ||
2910 | ret = kstrtoul(page, 0, &tmp); | |
2911 | if (ret < 0) { | |
2912 | pr_err("Unable to extract new ALUA access state from" | |
2913 | " %s\n", page); | |
2914 | return ret; | |
2915 | } | |
2916 | new_state = (int)tmp; | |
2917 | ||
2918 | if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { | |
2919 | pr_err("Unable to process implicit configfs ALUA" | |
2920 | " transition while TPGS_IMPLICIT_ALUA is disabled\n"); | |
2921 | return -EINVAL; | |
2922 | } | |
2923 | if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && | |
2924 | new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { | |
2925 | /* LBA DEPENDENT is only allowed with implicit ALUA */ | |
2926 | pr_err("Unable to process implicit configfs ALUA transition" | |
2927 | " while explicit ALUA management is enabled\n"); | |
2928 | return -EINVAL; | |
2929 | } | |
2930 | ||
2931 | ret = core_alua_do_port_transition(tg_pt_gp, dev, | |
2932 | NULL, NULL, new_state, 0); | |
2933 | return (!ret) ? count : -EINVAL; | |
2934 | } | |
2935 | ||
2936 | static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item, | |
2937 | char *page) | |
2938 | { | |
2939 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
2940 | return sprintf(page, "%s\n", | |
2941 | core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); | |
2942 | } | |
2943 | ||
2944 | static ssize_t target_tg_pt_gp_alua_access_status_store( | |
2945 | struct config_item *item, const char *page, size_t count) | |
2946 | { | |
2947 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
2948 | unsigned long tmp; | |
2949 | int new_status, ret; | |
2950 | ||
2951 | if (!tg_pt_gp->tg_pt_gp_valid_id) { | |
2952 | pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n"); | |
2953 | return -EINVAL; | |
2954 | } | |
2955 | ||
2956 | ret = kstrtoul(page, 0, &tmp); | |
2957 | if (ret < 0) { | |
2958 | pr_err("Unable to extract new ALUA access status" | |
2959 | " from %s\n", page); | |
2960 | return ret; | |
2961 | } | |
2962 | new_status = (int)tmp; | |
2963 | ||
2964 | if ((new_status != ALUA_STATUS_NONE) && | |
2965 | (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && | |
2966 | (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { | |
2967 | pr_err("Illegal ALUA access status: 0x%02x\n", | |
2968 | new_status); | |
2969 | return -EINVAL; | |
2970 | } | |
2971 | ||
2972 | tg_pt_gp->tg_pt_gp_alua_access_status = new_status; | |
2973 | return count; | |
2974 | } | |
2975 | ||
2976 | static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item, | |
2977 | char *page) | |
2978 | { | |
2979 | return core_alua_show_access_type(to_tg_pt_gp(item), page); | |
2980 | } | |
2981 | ||
2982 | static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item, | |
2983 | const char *page, size_t count) | |
2984 | { | |
2985 | return core_alua_store_access_type(to_tg_pt_gp(item), page, count); | |
2986 | } | |
2987 | ||
2988 | #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \ | |
2989 | static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \ | |
2990 | struct config_item *item, char *p) \ | |
2991 | { \ | |
2992 | struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ | |
2993 | return sprintf(p, "%d\n", \ | |
2994 | !!(t->tg_pt_gp_alua_supported_states & _bit)); \ | |
2995 | } \ | |
2996 | \ | |
2997 | static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \ | |
2998 | struct config_item *item, const char *p, size_t c) \ | |
2999 | { \ | |
3000 | struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ | |
3001 | unsigned long tmp; \ | |
3002 | int ret; \ | |
3003 | \ | |
3004 | if (!t->tg_pt_gp_valid_id) { \ | |
3005 | pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \ | |
3006 | return -EINVAL; \ | |
3007 | } \ | |
3008 | \ | |
3009 | ret = kstrtoul(p, 0, &tmp); \ | |
3010 | if (ret < 0) { \ | |
3011 | pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ | |
3012 | return -EINVAL; \ | |
3013 | } \ | |
3014 | if (tmp > 1) { \ | |
3015 | pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ | |
3016 | return -EINVAL; \ | |
3017 | } \ | |
3018 | if (tmp) \ | |
3019 | t->tg_pt_gp_alua_supported_states |= _bit; \ | |
3020 | else \ | |
3021 | t->tg_pt_gp_alua_supported_states &= ~_bit; \ | |
3022 | \ | |
3023 | return c; \ | |
3024 | } | |
3025 | ||
3026 | ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP); | |
3027 | ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP); | |
3028 | ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP); | |
3029 | ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP); | |
3030 | ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP); | |
3031 | ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP); | |
3032 | ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP); | |
3033 | ||
3034 | static ssize_t target_tg_pt_gp_alua_write_metadata_show( | |
3035 | struct config_item *item, char *page) | |
3036 | { | |
3037 | return sprintf(page, "%d\n", | |
3038 | to_tg_pt_gp(item)->tg_pt_gp_write_metadata); | |
3039 | } | |
3040 | ||
3041 | static ssize_t target_tg_pt_gp_alua_write_metadata_store( | |
3042 | struct config_item *item, const char *page, size_t count) | |
3043 | { | |
3044 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
3045 | unsigned long tmp; | |
3046 | int ret; | |
3047 | ||
3048 | ret = kstrtoul(page, 0, &tmp); | |
3049 | if (ret < 0) { | |
3050 | pr_err("Unable to extract alua_write_metadata\n"); | |
3051 | return ret; | |
3052 | } | |
3053 | ||
3054 | if ((tmp != 0) && (tmp != 1)) { | |
3055 | pr_err("Illegal value for alua_write_metadata:" | |
3056 | " %lu\n", tmp); | |
3057 | return -EINVAL; | |
3058 | } | |
3059 | tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; | |
3060 | ||
3061 | return count; | |
3062 | } | |
3063 | ||
3064 | static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item, | |
3065 | char *page) | |
3066 | { | |
3067 | return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page); | |
3068 | } | |
3069 | ||
3070 | static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item, | |
3071 | const char *page, size_t count) | |
3072 | { | |
3073 | return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page, | |
3074 | count); | |
3075 | } | |
3076 | ||
3077 | static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item, | |
3078 | char *page) | |
3079 | { | |
3080 | return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page); | |
3081 | } | |
3082 | ||
3083 | static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item, | |
3084 | const char *page, size_t count) | |
3085 | { | |
3086 | return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page, | |
3087 | count); | |
3088 | } | |
3089 | ||
3090 | static ssize_t target_tg_pt_gp_implicit_trans_secs_show( | |
3091 | struct config_item *item, char *page) | |
3092 | { | |
3093 | return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page); | |
3094 | } | |
3095 | ||
3096 | static ssize_t target_tg_pt_gp_implicit_trans_secs_store( | |
3097 | struct config_item *item, const char *page, size_t count) | |
3098 | { | |
3099 | return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page, | |
3100 | count); | |
3101 | } | |
3102 | ||
3103 | static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item, | |
3104 | char *page) | |
3105 | { | |
3106 | return core_alua_show_preferred_bit(to_tg_pt_gp(item), page); | |
3107 | } | |
3108 | ||
3109 | static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item, | |
3110 | const char *page, size_t count) | |
3111 | { | |
3112 | return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count); | |
3113 | } | |
3114 | ||
3115 | static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item, | |
3116 | char *page) | |
3117 | { | |
3118 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
3119 | ||
3120 | if (!tg_pt_gp->tg_pt_gp_valid_id) | |
3121 | return 0; | |
3122 | return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); | |
3123 | } | |
3124 | ||
3125 | static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item, | |
3126 | const char *page, size_t count) | |
3127 | { | |
3128 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
3129 | struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; | |
3130 | unsigned long tg_pt_gp_id; | |
3131 | int ret; | |
3132 | ||
3133 | ret = kstrtoul(page, 0, &tg_pt_gp_id); | |
3134 | if (ret < 0) { | |
3135 | pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n", | |
3136 | page); | |
3137 | return ret; | |
3138 | } | |
3139 | if (tg_pt_gp_id > 0x0000ffff) { | |
3140 | pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n", | |
3141 | tg_pt_gp_id); | |
3142 | return -EINVAL; | |
3143 | } | |
3144 | ||
3145 | ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); | |
3146 | if (ret < 0) | |
3147 | return -EINVAL; | |
3148 | ||
3149 | pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " | |
3150 | "core/alua/tg_pt_gps/%s to ID: %hu\n", | |
3151 | config_item_name(&alua_tg_pt_gp_cg->cg_item), | |
3152 | tg_pt_gp->tg_pt_gp_id); | |
3153 | ||
3154 | return count; | |
3155 | } | |
3156 | ||
3157 | static ssize_t target_tg_pt_gp_members_show(struct config_item *item, | |
3158 | char *page) | |
3159 | { | |
3160 | struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); | |
3161 | struct se_lun *lun; | |
3162 | ssize_t len = 0, cur_len; | |
3163 | unsigned char buf[TG_PT_GROUP_NAME_BUF] = { }; | |
3164 | ||
3165 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | |
3166 | list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, | |
3167 | lun_tg_pt_gp_link) { | |
3168 | struct se_portal_group *tpg = lun->lun_tpg; | |
3169 | ||
3170 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" | |
3171 | "/%s\n", tpg->se_tpg_tfo->fabric_name, | |
3172 | tpg->se_tpg_tfo->tpg_get_wwn(tpg), | |
3173 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | |
3174 | config_item_name(&lun->lun_group.cg_item)); | |
3175 | cur_len++; /* Extra byte for NULL terminator */ | |
3176 | ||
3177 | if ((cur_len + len) > PAGE_SIZE) { | |
3178 | pr_warn("Ran out of lu_gp_show_attr" | |
3179 | "_members buffer\n"); | |
3180 | break; | |
3181 | } | |
3182 | memcpy(page+len, buf, cur_len); | |
3183 | len += cur_len; | |
3184 | } | |
3185 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | |
3186 | ||
3187 | return len; | |
3188 | } | |
3189 | ||
3190 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state); | |
3191 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status); | |
3192 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type); | |
3193 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning); | |
3194 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline); | |
3195 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent); | |
3196 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable); | |
3197 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby); | |
3198 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized); | |
3199 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized); | |
3200 | CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata); | |
3201 | CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs); | |
3202 | CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs); | |
3203 | CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs); | |
3204 | CONFIGFS_ATTR(target_tg_pt_gp_, preferred); | |
3205 | CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id); | |
3206 | CONFIGFS_ATTR_RO(target_tg_pt_gp_, members); | |
3207 | ||
3208 | static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { | |
3209 | &target_tg_pt_gp_attr_alua_access_state, | |
3210 | &target_tg_pt_gp_attr_alua_access_status, | |
3211 | &target_tg_pt_gp_attr_alua_access_type, | |
3212 | &target_tg_pt_gp_attr_alua_support_transitioning, | |
3213 | &target_tg_pt_gp_attr_alua_support_offline, | |
3214 | &target_tg_pt_gp_attr_alua_support_lba_dependent, | |
3215 | &target_tg_pt_gp_attr_alua_support_unavailable, | |
3216 | &target_tg_pt_gp_attr_alua_support_standby, | |
3217 | &target_tg_pt_gp_attr_alua_support_active_nonoptimized, | |
3218 | &target_tg_pt_gp_attr_alua_support_active_optimized, | |
3219 | &target_tg_pt_gp_attr_alua_write_metadata, | |
3220 | &target_tg_pt_gp_attr_nonop_delay_msecs, | |
3221 | &target_tg_pt_gp_attr_trans_delay_msecs, | |
3222 | &target_tg_pt_gp_attr_implicit_trans_secs, | |
3223 | &target_tg_pt_gp_attr_preferred, | |
3224 | &target_tg_pt_gp_attr_tg_pt_gp_id, | |
3225 | &target_tg_pt_gp_attr_members, | |
3226 | NULL, | |
3227 | }; | |
3228 | ||
3229 | static void target_core_alua_tg_pt_gp_release(struct config_item *item) | |
3230 | { | |
3231 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | |
3232 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | |
3233 | ||
3234 | core_alua_free_tg_pt_gp(tg_pt_gp); | |
3235 | } | |
3236 | ||
3237 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { | |
3238 | .release = target_core_alua_tg_pt_gp_release, | |
3239 | }; | |
3240 | ||
3241 | static const struct config_item_type target_core_alua_tg_pt_gp_cit = { | |
3242 | .ct_item_ops = &target_core_alua_tg_pt_gp_ops, | |
3243 | .ct_attrs = target_core_alua_tg_pt_gp_attrs, | |
3244 | .ct_owner = THIS_MODULE, | |
3245 | }; | |
3246 | ||
3247 | /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ | |
3248 | ||
3249 | /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ | |
3250 | ||
3251 | static struct config_group *target_core_alua_create_tg_pt_gp( | |
3252 | struct config_group *group, | |
3253 | const char *name) | |
3254 | { | |
3255 | struct t10_alua *alua = container_of(group, struct t10_alua, | |
3256 | alua_tg_pt_gps_group); | |
3257 | struct t10_alua_tg_pt_gp *tg_pt_gp; | |
3258 | struct config_group *alua_tg_pt_gp_cg = NULL; | |
3259 | struct config_item *alua_tg_pt_gp_ci = NULL; | |
3260 | ||
3261 | tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); | |
3262 | if (!tg_pt_gp) | |
3263 | return NULL; | |
3264 | ||
3265 | alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; | |
3266 | alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; | |
3267 | ||
3268 | config_group_init_type_name(alua_tg_pt_gp_cg, name, | |
3269 | &target_core_alua_tg_pt_gp_cit); | |
3270 | ||
3271 | pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" | |
3272 | " Group: alua/tg_pt_gps/%s\n", | |
3273 | config_item_name(alua_tg_pt_gp_ci)); | |
3274 | ||
3275 | return alua_tg_pt_gp_cg; | |
3276 | } | |
3277 | ||
3278 | static void target_core_alua_drop_tg_pt_gp( | |
3279 | struct config_group *group, | |
3280 | struct config_item *item) | |
3281 | { | |
3282 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | |
3283 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | |
3284 | ||
3285 | pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" | |
3286 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", | |
3287 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); | |
3288 | /* | |
3289 | * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() | |
3290 | * -> target_core_alua_tg_pt_gp_release(). | |
3291 | */ | |
3292 | config_item_put(item); | |
3293 | } | |
3294 | ||
3295 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { | |
3296 | .make_group = &target_core_alua_create_tg_pt_gp, | |
3297 | .drop_item = &target_core_alua_drop_tg_pt_gp, | |
3298 | }; | |
3299 | ||
3300 | TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); | |
3301 | ||
3302 | /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ | |
3303 | ||
3304 | /* Start functions for struct config_item_type target_core_alua_cit */ | |
3305 | ||
3306 | /* | |
3307 | * target_core_alua_cit is a ConfigFS group that lives under | |
3308 | * /sys/kernel/config/target/core/alua. There are default groups | |
3309 | * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to | |
3310 | * target_core_alua_cit in target_core_init_configfs() below. | |
3311 | */ | |
3312 | static const struct config_item_type target_core_alua_cit = { | |
3313 | .ct_item_ops = NULL, | |
3314 | .ct_attrs = NULL, | |
3315 | .ct_owner = THIS_MODULE, | |
3316 | }; | |
3317 | ||
3318 | /* End functions for struct config_item_type target_core_alua_cit */ | |
3319 | ||
3320 | /* Start functions for struct config_item_type tb_dev_stat_cit */ | |
3321 | ||
3322 | static struct config_group *target_core_stat_mkdir( | |
3323 | struct config_group *group, | |
3324 | const char *name) | |
3325 | { | |
3326 | return ERR_PTR(-ENOSYS); | |
3327 | } | |
3328 | ||
3329 | static void target_core_stat_rmdir( | |
3330 | struct config_group *group, | |
3331 | struct config_item *item) | |
3332 | { | |
3333 | return; | |
3334 | } | |
3335 | ||
3336 | static struct configfs_group_operations target_core_stat_group_ops = { | |
3337 | .make_group = &target_core_stat_mkdir, | |
3338 | .drop_item = &target_core_stat_rmdir, | |
3339 | }; | |
3340 | ||
3341 | TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); | |
3342 | ||
3343 | /* End functions for struct config_item_type tb_dev_stat_cit */ | |
3344 | ||
3345 | /* Start functions for struct config_item_type target_core_hba_cit */ | |
3346 | ||
3347 | static struct config_group *target_core_make_subdev( | |
3348 | struct config_group *group, | |
3349 | const char *name) | |
3350 | { | |
3351 | struct t10_alua_tg_pt_gp *tg_pt_gp; | |
3352 | struct config_item *hba_ci = &group->cg_item; | |
3353 | struct se_hba *hba = item_to_hba(hba_ci); | |
3354 | struct target_backend *tb = hba->backend; | |
3355 | struct se_device *dev; | |
3356 | int errno = -ENOMEM, ret; | |
3357 | ||
3358 | ret = mutex_lock_interruptible(&hba->hba_access_mutex); | |
3359 | if (ret) | |
3360 | return ERR_PTR(ret); | |
3361 | ||
3362 | dev = target_alloc_device(hba, name); | |
3363 | if (!dev) | |
3364 | goto out_unlock; | |
3365 | ||
3366 | config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit); | |
3367 | ||
3368 | config_group_init_type_name(&dev->dev_action_group, "action", | |
3369 | &tb->tb_dev_action_cit); | |
3370 | configfs_add_default_group(&dev->dev_action_group, &dev->dev_group); | |
3371 | ||
3372 | config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", | |
3373 | &tb->tb_dev_attrib_cit); | |
3374 | configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group); | |
3375 | ||
3376 | config_group_init_type_name(&dev->dev_pr_group, "pr", | |
3377 | &tb->tb_dev_pr_cit); | |
3378 | configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group); | |
3379 | ||
3380 | config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", | |
3381 | &tb->tb_dev_wwn_cit); | |
3382 | configfs_add_default_group(&dev->t10_wwn.t10_wwn_group, | |
3383 | &dev->dev_group); | |
3384 | ||
3385 | config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, | |
3386 | "alua", &tb->tb_dev_alua_tg_pt_gps_cit); | |
3387 | configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group, | |
3388 | &dev->dev_group); | |
3389 | ||
3390 | config_group_init_type_name(&dev->dev_stat_grps.stat_group, | |
3391 | "statistics", &tb->tb_dev_stat_cit); | |
3392 | configfs_add_default_group(&dev->dev_stat_grps.stat_group, | |
3393 | &dev->dev_group); | |
3394 | ||
3395 | /* | |
3396 | * Add core/$HBA/$DEV/alua/default_tg_pt_gp | |
3397 | */ | |
3398 | tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); | |
3399 | if (!tg_pt_gp) | |
3400 | goto out_free_device; | |
3401 | dev->t10_alua.default_tg_pt_gp = tg_pt_gp; | |
3402 | ||
3403 | config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, | |
3404 | "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); | |
3405 | configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group, | |
3406 | &dev->t10_alua.alua_tg_pt_gps_group); | |
3407 | ||
3408 | /* | |
3409 | * Add core/$HBA/$DEV/statistics/ default groups | |
3410 | */ | |
3411 | target_stat_setup_dev_default_groups(dev); | |
3412 | ||
3413 | mutex_lock(&target_devices_lock); | |
3414 | target_devices++; | |
3415 | mutex_unlock(&target_devices_lock); | |
3416 | ||
3417 | mutex_unlock(&hba->hba_access_mutex); | |
3418 | return &dev->dev_group; | |
3419 | ||
3420 | out_free_device: | |
3421 | target_free_device(dev); | |
3422 | out_unlock: | |
3423 | mutex_unlock(&hba->hba_access_mutex); | |
3424 | return ERR_PTR(errno); | |
3425 | } | |
3426 | ||
3427 | static void target_core_drop_subdev( | |
3428 | struct config_group *group, | |
3429 | struct config_item *item) | |
3430 | { | |
3431 | struct config_group *dev_cg = to_config_group(item); | |
3432 | struct se_device *dev = | |
3433 | container_of(dev_cg, struct se_device, dev_group); | |
3434 | struct se_hba *hba; | |
3435 | ||
3436 | hba = item_to_hba(&dev->se_hba->hba_group.cg_item); | |
3437 | ||
3438 | mutex_lock(&hba->hba_access_mutex); | |
3439 | ||
3440 | configfs_remove_default_groups(&dev->dev_stat_grps.stat_group); | |
3441 | configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group); | |
3442 | ||
3443 | /* | |
3444 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp | |
3445 | * directly from target_core_alua_tg_pt_gp_release(). | |
3446 | */ | |
3447 | dev->t10_alua.default_tg_pt_gp = NULL; | |
3448 | ||
3449 | configfs_remove_default_groups(dev_cg); | |
3450 | ||
3451 | /* | |
3452 | * se_dev is released from target_core_dev_item_ops->release() | |
3453 | */ | |
3454 | config_item_put(item); | |
3455 | ||
3456 | mutex_lock(&target_devices_lock); | |
3457 | target_devices--; | |
3458 | mutex_unlock(&target_devices_lock); | |
3459 | ||
3460 | mutex_unlock(&hba->hba_access_mutex); | |
3461 | } | |
3462 | ||
3463 | static struct configfs_group_operations target_core_hba_group_ops = { | |
3464 | .make_group = target_core_make_subdev, | |
3465 | .drop_item = target_core_drop_subdev, | |
3466 | }; | |
3467 | ||
3468 | ||
3469 | static inline struct se_hba *to_hba(struct config_item *item) | |
3470 | { | |
3471 | return container_of(to_config_group(item), struct se_hba, hba_group); | |
3472 | } | |
3473 | ||
3474 | static ssize_t target_hba_info_show(struct config_item *item, char *page) | |
3475 | { | |
3476 | struct se_hba *hba = to_hba(item); | |
3477 | ||
3478 | return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", | |
3479 | hba->hba_id, hba->backend->ops->name, | |
3480 | TARGET_CORE_VERSION); | |
3481 | } | |
3482 | ||
3483 | static ssize_t target_hba_mode_show(struct config_item *item, char *page) | |
3484 | { | |
3485 | struct se_hba *hba = to_hba(item); | |
3486 | int hba_mode = 0; | |
3487 | ||
3488 | if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) | |
3489 | hba_mode = 1; | |
3490 | ||
3491 | return sprintf(page, "%d\n", hba_mode); | |
3492 | } | |
3493 | ||
3494 | static ssize_t target_hba_mode_store(struct config_item *item, | |
3495 | const char *page, size_t count) | |
3496 | { | |
3497 | struct se_hba *hba = to_hba(item); | |
3498 | unsigned long mode_flag; | |
3499 | int ret; | |
3500 | ||
3501 | if (hba->backend->ops->pmode_enable_hba == NULL) | |
3502 | return -EINVAL; | |
3503 | ||
3504 | ret = kstrtoul(page, 0, &mode_flag); | |
3505 | if (ret < 0) { | |
3506 | pr_err("Unable to extract hba mode flag: %d\n", ret); | |
3507 | return ret; | |
3508 | } | |
3509 | ||
3510 | if (hba->dev_count) { | |
3511 | pr_err("Unable to set hba_mode with active devices\n"); | |
3512 | return -EINVAL; | |
3513 | } | |
3514 | ||
3515 | ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); | |
3516 | if (ret < 0) | |
3517 | return -EINVAL; | |
3518 | if (ret > 0) | |
3519 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | |
3520 | else if (ret == 0) | |
3521 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | |
3522 | ||
3523 | return count; | |
3524 | } | |
3525 | ||
3526 | CONFIGFS_ATTR_RO(target_, hba_info); | |
3527 | CONFIGFS_ATTR(target_, hba_mode); | |
3528 | ||
3529 | static void target_core_hba_release(struct config_item *item) | |
3530 | { | |
3531 | struct se_hba *hba = container_of(to_config_group(item), | |
3532 | struct se_hba, hba_group); | |
3533 | core_delete_hba(hba); | |
3534 | } | |
3535 | ||
3536 | static struct configfs_attribute *target_core_hba_attrs[] = { | |
3537 | &target_attr_hba_info, | |
3538 | &target_attr_hba_mode, | |
3539 | NULL, | |
3540 | }; | |
3541 | ||
3542 | static struct configfs_item_operations target_core_hba_item_ops = { | |
3543 | .release = target_core_hba_release, | |
3544 | }; | |
3545 | ||
3546 | static const struct config_item_type target_core_hba_cit = { | |
3547 | .ct_item_ops = &target_core_hba_item_ops, | |
3548 | .ct_group_ops = &target_core_hba_group_ops, | |
3549 | .ct_attrs = target_core_hba_attrs, | |
3550 | .ct_owner = THIS_MODULE, | |
3551 | }; | |
3552 | ||
3553 | static struct config_group *target_core_call_addhbatotarget( | |
3554 | struct config_group *group, | |
3555 | const char *name) | |
3556 | { | |
3557 | char *se_plugin_str, *str, *str2; | |
3558 | struct se_hba *hba; | |
3559 | char buf[TARGET_CORE_NAME_MAX_LEN] = { }; | |
3560 | unsigned long plugin_dep_id = 0; | |
3561 | int ret; | |
3562 | ||
3563 | if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { | |
3564 | pr_err("Passed *name strlen(): %d exceeds" | |
3565 | " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), | |
3566 | TARGET_CORE_NAME_MAX_LEN); | |
3567 | return ERR_PTR(-ENAMETOOLONG); | |
3568 | } | |
3569 | snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); | |
3570 | ||
3571 | str = strstr(buf, "_"); | |
3572 | if (!str) { | |
3573 | pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); | |
3574 | return ERR_PTR(-EINVAL); | |
3575 | } | |
3576 | se_plugin_str = buf; | |
3577 | /* | |
3578 | * Special case for subsystem plugins that have "_" in their names. | |
3579 | * Namely rd_direct and rd_mcp.. | |
3580 | */ | |
3581 | str2 = strstr(str+1, "_"); | |
3582 | if (str2) { | |
3583 | *str2 = '\0'; /* Terminate for *se_plugin_str */ | |
3584 | str2++; /* Skip to start of plugin dependent ID */ | |
3585 | str = str2; | |
3586 | } else { | |
3587 | *str = '\0'; /* Terminate for *se_plugin_str */ | |
3588 | str++; /* Skip to start of plugin dependent ID */ | |
3589 | } | |
3590 | ||
3591 | ret = kstrtoul(str, 0, &plugin_dep_id); | |
3592 | if (ret < 0) { | |
3593 | pr_err("kstrtoul() returned %d for" | |
3594 | " plugin_dep_id\n", ret); | |
3595 | return ERR_PTR(ret); | |
3596 | } | |
3597 | /* | |
3598 | * Load up TCM subsystem plugins if they have not already been loaded. | |
3599 | */ | |
3600 | transport_subsystem_check_init(); | |
3601 | ||
3602 | hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); | |
3603 | if (IS_ERR(hba)) | |
3604 | return ERR_CAST(hba); | |
3605 | ||
3606 | config_group_init_type_name(&hba->hba_group, name, | |
3607 | &target_core_hba_cit); | |
3608 | ||
3609 | return &hba->hba_group; | |
3610 | } | |
3611 | ||
3612 | static void target_core_call_delhbafromtarget( | |
3613 | struct config_group *group, | |
3614 | struct config_item *item) | |
3615 | { | |
3616 | /* | |
3617 | * core_delete_hba() is called from target_core_hba_item_ops->release() | |
3618 | * -> target_core_hba_release() | |
3619 | */ | |
3620 | config_item_put(item); | |
3621 | } | |
3622 | ||
3623 | static struct configfs_group_operations target_core_group_ops = { | |
3624 | .make_group = target_core_call_addhbatotarget, | |
3625 | .drop_item = target_core_call_delhbafromtarget, | |
3626 | }; | |
3627 | ||
3628 | static const struct config_item_type target_core_cit = { | |
3629 | .ct_item_ops = NULL, | |
3630 | .ct_group_ops = &target_core_group_ops, | |
3631 | .ct_attrs = NULL, | |
3632 | .ct_owner = THIS_MODULE, | |
3633 | }; | |
3634 | ||
3635 | /* Stop functions for struct config_item_type target_core_hba_cit */ | |
3636 | ||
3637 | void target_setup_backend_cits(struct target_backend *tb) | |
3638 | { | |
3639 | target_core_setup_dev_cit(tb); | |
3640 | target_core_setup_dev_action_cit(tb); | |
3641 | target_core_setup_dev_attrib_cit(tb); | |
3642 | target_core_setup_dev_pr_cit(tb); | |
3643 | target_core_setup_dev_wwn_cit(tb); | |
3644 | target_core_setup_dev_alua_tg_pt_gps_cit(tb); | |
3645 | target_core_setup_dev_stat_cit(tb); | |
3646 | } | |
3647 | ||
3648 | static void target_init_dbroot(void) | |
3649 | { | |
3650 | struct file *fp; | |
3651 | ||
3652 | snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED); | |
3653 | fp = filp_open(db_root_stage, O_RDONLY, 0); | |
3654 | if (IS_ERR(fp)) { | |
3655 | pr_err("db_root: cannot open: %s\n", db_root_stage); | |
3656 | return; | |
3657 | } | |
3658 | if (!S_ISDIR(file_inode(fp)->i_mode)) { | |
3659 | filp_close(fp, NULL); | |
3660 | pr_err("db_root: not a valid directory: %s\n", db_root_stage); | |
3661 | return; | |
3662 | } | |
3663 | filp_close(fp, NULL); | |
3664 | ||
3665 | strscpy(db_root, db_root_stage); | |
3666 | pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); | |
3667 | } | |
3668 | ||
3669 | static int __init target_core_init_configfs(void) | |
3670 | { | |
3671 | struct configfs_subsystem *subsys = &target_core_fabrics; | |
3672 | struct t10_alua_lu_gp *lu_gp; | |
3673 | struct cred *kern_cred; | |
3674 | const struct cred *old_cred; | |
3675 | int ret; | |
3676 | ||
3677 | pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" | |
3678 | " Engine: %s on %s/%s on "UTS_RELEASE"\n", | |
3679 | TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); | |
3680 | ||
3681 | config_group_init(&subsys->su_group); | |
3682 | mutex_init(&subsys->su_mutex); | |
3683 | ||
3684 | ret = init_se_kmem_caches(); | |
3685 | if (ret < 0) | |
3686 | return ret; | |
3687 | /* | |
3688 | * Create $CONFIGFS/target/core default group for HBA <-> Storage Object | |
3689 | * and ALUA Logical Unit Group and Target Port Group infrastructure. | |
3690 | */ | |
3691 | config_group_init_type_name(&target_core_hbagroup, "core", | |
3692 | &target_core_cit); | |
3693 | configfs_add_default_group(&target_core_hbagroup, &subsys->su_group); | |
3694 | ||
3695 | /* | |
3696 | * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ | |
3697 | */ | |
3698 | config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); | |
3699 | configfs_add_default_group(&alua_group, &target_core_hbagroup); | |
3700 | ||
3701 | /* | |
3702 | * Add ALUA Logical Unit Group and Target Port Group ConfigFS | |
3703 | * groups under /sys/kernel/config/target/core/alua/ | |
3704 | */ | |
3705 | config_group_init_type_name(&alua_lu_gps_group, "lu_gps", | |
3706 | &target_core_alua_lu_gps_cit); | |
3707 | configfs_add_default_group(&alua_lu_gps_group, &alua_group); | |
3708 | ||
3709 | /* | |
3710 | * Add core/alua/lu_gps/default_lu_gp | |
3711 | */ | |
3712 | lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); | |
3713 | if (IS_ERR(lu_gp)) { | |
3714 | ret = -ENOMEM; | |
3715 | goto out_global; | |
3716 | } | |
3717 | ||
3718 | config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", | |
3719 | &target_core_alua_lu_gp_cit); | |
3720 | configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group); | |
3721 | ||
3722 | default_lu_gp = lu_gp; | |
3723 | ||
3724 | /* | |
3725 | * Register the target_core_mod subsystem with configfs. | |
3726 | */ | |
3727 | ret = configfs_register_subsystem(subsys); | |
3728 | if (ret < 0) { | |
3729 | pr_err("Error %d while registering subsystem %s\n", | |
3730 | ret, subsys->su_group.cg_item.ci_namebuf); | |
3731 | goto out_global; | |
3732 | } | |
3733 | pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" | |
3734 | " Infrastructure: "TARGET_CORE_VERSION" on %s/%s" | |
3735 | " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); | |
3736 | /* | |
3737 | * Register built-in RAMDISK subsystem logic for virtual LUN 0 | |
3738 | */ | |
3739 | ret = rd_module_init(); | |
3740 | if (ret < 0) | |
3741 | goto out; | |
3742 | ||
3743 | ret = core_dev_setup_virtual_lun0(); | |
3744 | if (ret < 0) | |
3745 | goto out; | |
3746 | ||
3747 | ret = target_xcopy_setup_pt(); | |
3748 | if (ret < 0) | |
3749 | goto out; | |
3750 | ||
3751 | /* We use the kernel credentials to access the target directory */ | |
3752 | kern_cred = prepare_kernel_cred(&init_task); | |
3753 | if (!kern_cred) { | |
3754 | ret = -ENOMEM; | |
3755 | goto out; | |
3756 | } | |
3757 | old_cred = override_creds(kern_cred); | |
3758 | target_init_dbroot(); | |
3759 | revert_creds(old_cred); | |
3760 | put_cred(kern_cred); | |
3761 | ||
3762 | return 0; | |
3763 | ||
3764 | out: | |
3765 | target_xcopy_release_pt(); | |
3766 | configfs_unregister_subsystem(subsys); | |
3767 | core_dev_release_virtual_lun0(); | |
3768 | rd_module_exit(); | |
3769 | out_global: | |
3770 | if (default_lu_gp) { | |
3771 | core_alua_free_lu_gp(default_lu_gp); | |
3772 | default_lu_gp = NULL; | |
3773 | } | |
3774 | release_se_kmem_caches(); | |
3775 | return ret; | |
3776 | } | |
3777 | ||
3778 | static void __exit target_core_exit_configfs(void) | |
3779 | { | |
3780 | configfs_remove_default_groups(&alua_lu_gps_group); | |
3781 | configfs_remove_default_groups(&alua_group); | |
3782 | configfs_remove_default_groups(&target_core_hbagroup); | |
3783 | ||
3784 | /* | |
3785 | * We expect subsys->su_group.default_groups to be released | |
3786 | * by configfs subsystem provider logic.. | |
3787 | */ | |
3788 | configfs_unregister_subsystem(&target_core_fabrics); | |
3789 | ||
3790 | core_alua_free_lu_gp(default_lu_gp); | |
3791 | default_lu_gp = NULL; | |
3792 | ||
3793 | pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" | |
3794 | " Infrastructure\n"); | |
3795 | ||
3796 | core_dev_release_virtual_lun0(); | |
3797 | rd_module_exit(); | |
3798 | target_xcopy_release_pt(); | |
3799 | release_se_kmem_caches(); | |
3800 | } | |
3801 | ||
3802 | MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); | |
3803 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | |
3804 | MODULE_LICENSE("GPL"); | |
3805 | ||
3806 | module_init(target_core_init_configfs); | |
3807 | module_exit(target_core_exit_configfs); |