1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_device.c (based on iscsi_target_device.c)
5 * This file contains the TCM Virtual Device and Disk Transport
6 * agnostic related functions.
8 * (c) Copyright 2003-2013 Datera, Inc.
10 * Nicholas A. Bellinger <nab@kernel.org>
12 ******************************************************************************/
14 #include <linux/net.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
22 #include <linux/export.h>
23 #include <linux/t10-pi.h>
24 #include <asm/unaligned.h>
27 #include <scsi/scsi_common.h>
28 #include <scsi/scsi_proto.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
39 static DEFINE_MUTEX(device_mutex
);
40 static LIST_HEAD(device_list
);
41 static DEFINE_IDR(devices_idr
);
43 static struct se_hba
*lun0_hba
;
44 /* not static, needed by tpg.c */
45 struct se_device
*g_lun0_dev
;
48 transport_lookup_cmd_lun(struct se_cmd
*se_cmd
)
50 struct se_lun
*se_lun
= NULL
;
51 struct se_session
*se_sess
= se_cmd
->se_sess
;
52 struct se_node_acl
*nacl
= se_sess
->se_node_acl
;
53 struct se_dev_entry
*deve
;
54 sense_reason_t ret
= TCM_NO_SENSE
;
57 deve
= target_nacl_find_deve(nacl
, se_cmd
->orig_fe_lun
);
59 atomic_long_inc(&deve
->total_cmds
);
61 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
62 atomic_long_add(se_cmd
->data_length
,
64 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
65 atomic_long_add(se_cmd
->data_length
,
68 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
69 deve
->lun_access_ro
) {
70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
71 " Access for 0x%08llx\n",
72 se_cmd
->se_tfo
->fabric_name
,
75 return TCM_WRITE_PROTECTED
;
78 se_lun
= deve
->se_lun
;
80 if (!percpu_ref_tryget_live(&se_lun
->lun_ref
)) {
85 se_cmd
->se_lun
= se_lun
;
86 se_cmd
->pr_res_key
= deve
->pr_res_key
;
87 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
88 se_cmd
->lun_ref_active
= true;
95 * Use the se_portal_group->tpg_virt_lun0 to allow for
96 * REPORT_LUNS, et al to be returned when no active
97 * MappedLUN=0 exists for this Initiator Port.
99 if (se_cmd
->orig_fe_lun
!= 0) {
100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
101 " Access for 0x%08llx from %s\n",
102 se_cmd
->se_tfo
->fabric_name
,
104 nacl
->initiatorname
);
105 return TCM_NON_EXISTENT_LUN
;
109 * Force WRITE PROTECT for virtual LUN 0
111 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
112 (se_cmd
->data_direction
!= DMA_NONE
))
113 return TCM_WRITE_PROTECTED
;
115 se_lun
= se_sess
->se_tpg
->tpg_virt_lun0
;
116 if (!percpu_ref_tryget_live(&se_lun
->lun_ref
))
117 return TCM_NON_EXISTENT_LUN
;
119 se_cmd
->se_lun
= se_sess
->se_tpg
->tpg_virt_lun0
;
120 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
121 se_cmd
->lun_ref_active
= true;
124 * RCU reference protected by percpu se_lun->lun_ref taken above that
125 * must drop to zero (including initial reference) before this se_lun
126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
127 * target_core_fabric_configfs.c:target_fabric_port_release
129 se_cmd
->se_dev
= rcu_dereference_raw(se_lun
->lun_se_dev
);
130 atomic_long_inc(&se_cmd
->se_dev
->num_cmds
);
132 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
133 atomic_long_add(se_cmd
->data_length
,
134 &se_cmd
->se_dev
->write_bytes
);
135 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
136 atomic_long_add(se_cmd
->data_length
,
137 &se_cmd
->se_dev
->read_bytes
);
141 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
143 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
)
145 struct se_dev_entry
*deve
;
146 struct se_lun
*se_lun
= NULL
;
147 struct se_session
*se_sess
= se_cmd
->se_sess
;
148 struct se_node_acl
*nacl
= se_sess
->se_node_acl
;
149 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
153 deve
= target_nacl_find_deve(nacl
, se_cmd
->orig_fe_lun
);
155 se_lun
= deve
->se_lun
;
157 if (!percpu_ref_tryget_live(&se_lun
->lun_ref
)) {
162 se_cmd
->se_lun
= se_lun
;
163 se_cmd
->pr_res_key
= deve
->pr_res_key
;
164 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
165 se_cmd
->lun_ref_active
= true;
171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
172 " Access for 0x%08llx for %s\n",
173 se_cmd
->se_tfo
->fabric_name
,
175 nacl
->initiatorname
);
178 se_cmd
->se_dev
= rcu_dereference_raw(se_lun
->lun_se_dev
);
179 se_tmr
->tmr_dev
= rcu_dereference_raw(se_lun
->lun_se_dev
);
181 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
182 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
183 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
187 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
189 bool target_lun_is_rdonly(struct se_cmd
*cmd
)
191 struct se_session
*se_sess
= cmd
->se_sess
;
192 struct se_dev_entry
*deve
;
196 deve
= target_nacl_find_deve(se_sess
->se_node_acl
, cmd
->orig_fe_lun
);
197 ret
= deve
&& deve
->lun_access_ro
;
202 EXPORT_SYMBOL(target_lun_is_rdonly
);
205 * This function is called from core_scsi3_emulate_pro_register_and_move()
206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
207 * when a matching rtpi is found.
209 struct se_dev_entry
*core_get_se_deve_from_rtpi(
210 struct se_node_acl
*nacl
,
213 struct se_dev_entry
*deve
;
215 struct se_portal_group
*tpg
= nacl
->se_tpg
;
218 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
221 pr_err("%s device entries device pointer is"
222 " NULL, but Initiator has access.\n",
223 tpg
->se_tpg_tfo
->fabric_name
);
226 if (lun
->lun_tpg
->tpg_rtpi
!= rtpi
)
229 kref_get(&deve
->pr_kref
);
239 void core_free_device_list_for_node(
240 struct se_node_acl
*nacl
,
241 struct se_portal_group
*tpg
)
243 struct se_dev_entry
*deve
;
245 mutex_lock(&nacl
->lun_entry_mutex
);
246 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
247 core_disable_device_list_for_node(deve
->se_lun
, deve
, nacl
, tpg
);
248 mutex_unlock(&nacl
->lun_entry_mutex
);
251 void core_update_device_list_access(
254 struct se_node_acl
*nacl
)
256 struct se_dev_entry
*deve
;
258 mutex_lock(&nacl
->lun_entry_mutex
);
259 deve
= target_nacl_find_deve(nacl
, mapped_lun
);
261 deve
->lun_access_ro
= lun_access_ro
;
262 mutex_unlock(&nacl
->lun_entry_mutex
);
266 * Called with rcu_read_lock or nacl->device_list_lock held.
268 struct se_dev_entry
*target_nacl_find_deve(struct se_node_acl
*nacl
, u64 mapped_lun
)
270 struct se_dev_entry
*deve
;
272 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
273 if (deve
->mapped_lun
== mapped_lun
)
278 EXPORT_SYMBOL(target_nacl_find_deve
);
280 void target_pr_kref_release(struct kref
*kref
)
282 struct se_dev_entry
*deve
= container_of(kref
, struct se_dev_entry
,
284 complete(&deve
->pr_comp
);
288 * Establish UA condition on SCSI device - all LUNs
290 void target_dev_ua_allocate(struct se_device
*dev
, u8 asc
, u8 ascq
)
292 struct se_dev_entry
*se_deve
;
295 spin_lock(&dev
->se_port_lock
);
296 list_for_each_entry(lun
, &dev
->dev_sep_list
, lun_dev_link
) {
298 spin_lock(&lun
->lun_deve_lock
);
299 list_for_each_entry(se_deve
, &lun
->lun_deve_list
, lun_link
)
300 core_scsi3_ua_allocate(se_deve
, asc
, ascq
);
301 spin_unlock(&lun
->lun_deve_lock
);
303 spin_unlock(&dev
->se_port_lock
);
307 target_luns_data_has_changed(struct se_node_acl
*nacl
, struct se_dev_entry
*new,
310 struct se_dev_entry
*tmp
;
313 hlist_for_each_entry_rcu(tmp
, &nacl
->lun_entry_hlist
, link
) {
314 if (skip_new
&& tmp
== new)
316 core_scsi3_ua_allocate(tmp
, 0x3F,
317 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED
);
322 int core_enable_device_list_for_node(
324 struct se_lun_acl
*lun_acl
,
327 struct se_node_acl
*nacl
,
328 struct se_portal_group
*tpg
)
330 struct se_dev_entry
*orig
, *new;
332 new = kzalloc(sizeof(*new), GFP_KERNEL
);
334 pr_err("Unable to allocate se_dev_entry memory\n");
338 spin_lock_init(&new->ua_lock
);
339 INIT_LIST_HEAD(&new->ua_list
);
340 INIT_LIST_HEAD(&new->lun_link
);
342 new->mapped_lun
= mapped_lun
;
343 kref_init(&new->pr_kref
);
344 init_completion(&new->pr_comp
);
346 new->lun_access_ro
= lun_access_ro
;
347 new->creation_time
= get_jiffies_64();
350 mutex_lock(&nacl
->lun_entry_mutex
);
351 orig
= target_nacl_find_deve(nacl
, mapped_lun
);
352 if (orig
&& orig
->se_lun
) {
353 struct se_lun
*orig_lun
= orig
->se_lun
;
355 if (orig_lun
!= lun
) {
356 pr_err("Existing orig->se_lun doesn't match new lun"
357 " for dynamic -> explicit NodeACL conversion:"
358 " %s\n", nacl
->initiatorname
);
359 mutex_unlock(&nacl
->lun_entry_mutex
);
363 if (orig
->se_lun_acl
!= NULL
) {
364 pr_warn_ratelimited("Detected existing explicit"
365 " se_lun_acl->se_lun_group reference for %s"
366 " mapped_lun: %llu, failing\n",
367 nacl
->initiatorname
, mapped_lun
);
368 mutex_unlock(&nacl
->lun_entry_mutex
);
374 new->se_lun_acl
= lun_acl
;
375 hlist_del_rcu(&orig
->link
);
376 hlist_add_head_rcu(&new->link
, &nacl
->lun_entry_hlist
);
377 mutex_unlock(&nacl
->lun_entry_mutex
);
379 spin_lock(&lun
->lun_deve_lock
);
380 list_del(&orig
->lun_link
);
381 list_add_tail(&new->lun_link
, &lun
->lun_deve_list
);
382 spin_unlock(&lun
->lun_deve_lock
);
384 kref_put(&orig
->pr_kref
, target_pr_kref_release
);
385 wait_for_completion(&orig
->pr_comp
);
387 target_luns_data_has_changed(nacl
, new, true);
388 kfree_rcu(orig
, rcu_head
);
393 new->se_lun_acl
= lun_acl
;
394 hlist_add_head_rcu(&new->link
, &nacl
->lun_entry_hlist
);
395 mutex_unlock(&nacl
->lun_entry_mutex
);
397 spin_lock(&lun
->lun_deve_lock
);
398 list_add_tail(&new->lun_link
, &lun
->lun_deve_list
);
399 spin_unlock(&lun
->lun_deve_lock
);
401 target_luns_data_has_changed(nacl
, new, true);
405 void core_disable_device_list_for_node(
407 struct se_dev_entry
*orig
,
408 struct se_node_acl
*nacl
,
409 struct se_portal_group
*tpg
)
412 * rcu_dereference_raw protected by se_lun->lun_group symlink
413 * reference to se_device->dev_group.
415 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
417 lockdep_assert_held(&nacl
->lun_entry_mutex
);
420 * If the MappedLUN entry is being disabled, the entry in
421 * lun->lun_deve_list must be removed now before clearing the
422 * struct se_dev_entry pointers below as logic in
423 * core_alua_do_transition_tg_pt() depends on these being present.
425 * deve->se_lun_acl will be NULL for demo-mode created LUNs
426 * that have not been explicitly converted to MappedLUNs ->
427 * struct se_lun_acl, but we remove deve->lun_link from
428 * lun->lun_deve_list. This also means that active UAs and
429 * NodeACL context specific PR metadata for demo-mode
430 * MappedLUN *deve will be released below..
432 spin_lock(&lun
->lun_deve_lock
);
433 list_del(&orig
->lun_link
);
434 spin_unlock(&lun
->lun_deve_lock
);
436 * Disable struct se_dev_entry LUN ACL mapping
438 core_scsi3_ua_release_all(orig
);
440 hlist_del_rcu(&orig
->link
);
441 clear_bit(DEF_PR_REG_ACTIVE
, &orig
->deve_flags
);
442 orig
->lun_access_ro
= false;
443 orig
->creation_time
= 0;
444 orig
->attach_count
--;
446 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
447 * or REGISTER_AND_MOVE PR operation to complete.
449 kref_put(&orig
->pr_kref
, target_pr_kref_release
);
450 wait_for_completion(&orig
->pr_comp
);
452 kfree_rcu(orig
, rcu_head
);
454 core_scsi3_free_pr_reg_from_nacl(dev
, nacl
);
455 target_luns_data_has_changed(nacl
, NULL
, false);
458 /* core_clear_lun_from_tpg():
462 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
464 struct se_node_acl
*nacl
;
465 struct se_dev_entry
*deve
;
467 mutex_lock(&tpg
->acl_node_mutex
);
468 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
470 mutex_lock(&nacl
->lun_entry_mutex
);
471 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
472 if (lun
!= deve
->se_lun
)
475 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
477 mutex_unlock(&nacl
->lun_entry_mutex
);
479 mutex_unlock(&tpg
->acl_node_mutex
);
482 static void se_release_vpd_for_dev(struct se_device
*dev
)
484 struct t10_vpd
*vpd
, *vpd_tmp
;
486 spin_lock(&dev
->t10_wwn
.t10_vpd_lock
);
487 list_for_each_entry_safe(vpd
, vpd_tmp
,
488 &dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
489 list_del(&vpd
->vpd_list
);
492 spin_unlock(&dev
->t10_wwn
.t10_vpd_lock
);
495 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
497 u32 aligned_max_sectors
;
500 * Limit max_sectors to a PAGE_SIZE aligned value for modern
501 * transport_allocate_data_tasks() operation.
503 alignment
= max(1ul, PAGE_SIZE
/ block_size
);
504 aligned_max_sectors
= rounddown(max_sectors
, alignment
);
506 if (max_sectors
!= aligned_max_sectors
)
507 pr_info("Rounding down aligned max_sectors from %u to %u\n",
508 max_sectors
, aligned_max_sectors
);
510 return aligned_max_sectors
;
513 int core_dev_add_lun(
514 struct se_portal_group
*tpg
,
515 struct se_device
*dev
,
520 rc
= core_tpg_add_lun(tpg
, lun
, false, dev
);
524 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
525 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->fabric_name
,
526 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
527 tpg
->se_tpg_tfo
->fabric_name
, dev
->se_hba
->hba_id
);
529 * Update LUN maps for dynamically added initiators when
530 * generate_node_acl is enabled.
532 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
533 struct se_node_acl
*acl
;
535 mutex_lock(&tpg
->acl_node_mutex
);
536 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
537 if (acl
->dynamic_node_acl
&&
538 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
539 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
540 core_tpg_add_node_to_devs(acl
, tpg
, lun
);
543 mutex_unlock(&tpg
->acl_node_mutex
);
549 /* core_dev_del_lun():
553 void core_dev_del_lun(
554 struct se_portal_group
*tpg
,
557 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
558 " device object\n", tpg
->se_tpg_tfo
->fabric_name
,
559 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
560 tpg
->se_tpg_tfo
->fabric_name
);
562 core_tpg_remove_lun(tpg
, lun
);
565 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
566 struct se_portal_group
*tpg
,
567 struct se_node_acl
*nacl
,
571 struct se_lun_acl
*lacl
;
573 if (strlen(nacl
->initiatorname
) >= TRANSPORT_IQN_LEN
) {
574 pr_err("%s InitiatorName exceeds maximum size.\n",
575 tpg
->se_tpg_tfo
->fabric_name
);
579 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
581 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
586 lacl
->mapped_lun
= mapped_lun
;
587 lacl
->se_lun_nacl
= nacl
;
592 int core_dev_add_initiator_node_lun_acl(
593 struct se_portal_group
*tpg
,
594 struct se_lun_acl
*lacl
,
598 struct se_node_acl
*nacl
= lacl
->se_lun_nacl
;
600 * rcu_dereference_raw protected by se_lun->lun_group symlink
601 * reference to se_device->dev_group.
603 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
608 if (lun
->lun_access_ro
)
609 lun_access_ro
= true;
613 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
614 lun_access_ro
, nacl
, tpg
) < 0)
617 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
618 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->fabric_name
,
619 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
, lacl
->mapped_lun
,
620 lun_access_ro
? "RO" : "RW",
621 nacl
->initiatorname
);
623 * Check to see if there are any existing persistent reservation APTPL
624 * pre-registrations that need to be enabled for this LUN ACL..
626 core_scsi3_check_aptpl_registration(dev
, tpg
, lun
, nacl
,
631 int core_dev_del_initiator_node_lun_acl(
633 struct se_lun_acl
*lacl
)
635 struct se_portal_group
*tpg
= lun
->lun_tpg
;
636 struct se_node_acl
*nacl
;
637 struct se_dev_entry
*deve
;
639 nacl
= lacl
->se_lun_nacl
;
643 mutex_lock(&nacl
->lun_entry_mutex
);
644 deve
= target_nacl_find_deve(nacl
, lacl
->mapped_lun
);
646 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
647 mutex_unlock(&nacl
->lun_entry_mutex
);
649 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
650 " InitiatorNode: %s Mapped LUN: %llu\n",
651 tpg
->se_tpg_tfo
->fabric_name
,
652 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
653 nacl
->initiatorname
, lacl
->mapped_lun
);
658 void core_dev_free_initiator_node_lun_acl(
659 struct se_portal_group
*tpg
,
660 struct se_lun_acl
*lacl
)
662 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
663 " Mapped LUN: %llu\n", tpg
->se_tpg_tfo
->fabric_name
,
664 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
665 tpg
->se_tpg_tfo
->fabric_name
,
666 lacl
->se_lun_nacl
->initiatorname
, lacl
->mapped_lun
);
671 static void scsi_dump_inquiry(struct se_device
*dev
)
673 struct t10_wwn
*wwn
= &dev
->t10_wwn
;
674 int device_type
= dev
->transport
->get_device_type(dev
);
677 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
679 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN
) "s\n",
681 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN
) "s\n",
683 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN
) "s\n",
685 pr_debug(" Type: %s ", scsi_device_type(device_type
));
688 struct se_device
*target_alloc_device(struct se_hba
*hba
, const char *name
)
690 struct se_device
*dev
;
691 struct se_lun
*xcopy_lun
;
694 dev
= hba
->backend
->ops
->alloc_device(hba
, name
);
698 dev
->queues
= kcalloc(nr_cpu_ids
, sizeof(*dev
->queues
), GFP_KERNEL
);
700 dev
->transport
->free_device(dev
);
704 dev
->queue_cnt
= nr_cpu_ids
;
705 for (i
= 0; i
< dev
->queue_cnt
; i
++) {
706 struct se_device_queue
*q
;
709 INIT_LIST_HEAD(&q
->state_list
);
710 spin_lock_init(&q
->lock
);
712 init_llist_head(&q
->sq
.cmd_list
);
713 INIT_WORK(&q
->sq
.work
, target_queued_submit_work
);
717 dev
->transport
= hba
->backend
->ops
;
718 dev
->transport_flags
= dev
->transport
->transport_flags_default
;
719 dev
->prot_length
= sizeof(struct t10_pi_tuple
);
720 dev
->hba_index
= hba
->hba_index
;
722 INIT_LIST_HEAD(&dev
->dev_sep_list
);
723 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
724 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
725 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
726 spin_lock_init(&dev
->delayed_cmd_lock
);
727 spin_lock_init(&dev
->dev_reservation_lock
);
728 spin_lock_init(&dev
->se_port_lock
);
729 spin_lock_init(&dev
->se_tmr_lock
);
730 spin_lock_init(&dev
->qf_cmd_lock
);
731 sema_init(&dev
->caw_sem
, 1);
732 INIT_LIST_HEAD(&dev
->t10_wwn
.t10_vpd_list
);
733 spin_lock_init(&dev
->t10_wwn
.t10_vpd_lock
);
734 INIT_LIST_HEAD(&dev
->t10_pr
.registration_list
);
735 INIT_LIST_HEAD(&dev
->t10_pr
.aptpl_reg_list
);
736 spin_lock_init(&dev
->t10_pr
.registration_lock
);
737 spin_lock_init(&dev
->t10_pr
.aptpl_reg_lock
);
738 INIT_LIST_HEAD(&dev
->t10_alua
.tg_pt_gps_list
);
739 spin_lock_init(&dev
->t10_alua
.tg_pt_gps_lock
);
740 INIT_LIST_HEAD(&dev
->t10_alua
.lba_map_list
);
741 spin_lock_init(&dev
->t10_alua
.lba_map_lock
);
743 INIT_WORK(&dev
->delayed_cmd_work
, target_do_delayed_work
);
744 mutex_init(&dev
->lun_reset_mutex
);
746 dev
->t10_wwn
.t10_dev
= dev
;
748 * Use OpenFabrics IEEE Company ID: 00 14 05
750 dev
->t10_wwn
.company_id
= 0x001405;
752 dev
->t10_alua
.t10_dev
= dev
;
754 dev
->dev_attrib
.da_dev
= dev
;
755 dev
->dev_attrib
.emulate_model_alias
= DA_EMULATE_MODEL_ALIAS
;
756 dev
->dev_attrib
.emulate_dpo
= 1;
757 dev
->dev_attrib
.emulate_fua_write
= 1;
758 dev
->dev_attrib
.emulate_fua_read
= 1;
759 dev
->dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
760 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= TARGET_UA_INTLCK_CTRL_CLEAR
;
761 dev
->dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
762 dev
->dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
763 dev
->dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
764 dev
->dev_attrib
.emulate_caw
= DA_EMULATE_CAW
;
765 dev
->dev_attrib
.emulate_3pc
= DA_EMULATE_3PC
;
766 dev
->dev_attrib
.emulate_pr
= DA_EMULATE_PR
;
767 dev
->dev_attrib
.emulate_rsoc
= DA_EMULATE_RSOC
;
768 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE0_PROT
;
769 dev
->dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
770 dev
->dev_attrib
.force_pr_aptpl
= DA_FORCE_PR_APTPL
;
771 dev
->dev_attrib
.is_nonrot
= DA_IS_NONROT
;
772 dev
->dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
773 dev
->dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
774 dev
->dev_attrib
.max_unmap_block_desc_count
=
775 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
776 dev
->dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
777 dev
->dev_attrib
.unmap_granularity_alignment
=
778 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
779 dev
->dev_attrib
.unmap_zeroes_data
=
780 DA_UNMAP_ZEROES_DATA_DEFAULT
;
781 dev
->dev_attrib
.max_write_same_len
= DA_MAX_WRITE_SAME_LEN
;
782 dev
->dev_attrib
.submit_type
= TARGET_FABRIC_DEFAULT_SUBMIT
;
784 xcopy_lun
= &dev
->xcopy_lun
;
785 rcu_assign_pointer(xcopy_lun
->lun_se_dev
, dev
);
786 init_completion(&xcopy_lun
->lun_shutdown_comp
);
787 INIT_LIST_HEAD(&xcopy_lun
->lun_deve_list
);
788 INIT_LIST_HEAD(&xcopy_lun
->lun_dev_link
);
789 mutex_init(&xcopy_lun
->lun_tg_pt_md_mutex
);
790 xcopy_lun
->lun_tpg
= &xcopy_pt_tpg
;
792 /* Preload the default INQUIRY const values */
793 strscpy(dev
->t10_wwn
.vendor
, "LIO-ORG", sizeof(dev
->t10_wwn
.vendor
));
794 strscpy(dev
->t10_wwn
.model
, dev
->transport
->inquiry_prod
,
795 sizeof(dev
->t10_wwn
.model
));
796 strscpy(dev
->t10_wwn
.revision
, dev
->transport
->inquiry_rev
,
797 sizeof(dev
->t10_wwn
.revision
));
803 * Check if the underlying struct block_device supports discard and if yes
804 * configure the UNMAP parameters.
806 bool target_configure_unmap_from_queue(struct se_dev_attrib
*attrib
,
807 struct block_device
*bdev
)
809 int block_size
= bdev_logical_block_size(bdev
);
811 if (!bdev_max_discard_sectors(bdev
))
814 attrib
->max_unmap_lba_count
=
815 bdev_max_discard_sectors(bdev
) >> (ilog2(block_size
) - 9);
817 * Currently hardcoded to 1 in Linux/SCSI code..
819 attrib
->max_unmap_block_desc_count
= 1;
820 attrib
->unmap_granularity
= bdev_discard_granularity(bdev
) / block_size
;
821 attrib
->unmap_granularity_alignment
=
822 bdev_discard_alignment(bdev
) / block_size
;
825 EXPORT_SYMBOL(target_configure_unmap_from_queue
);
828 * Convert from blocksize advertised to the initiator to the 512 byte
829 * units unconditionally used by the Linux block layer.
831 sector_t
target_to_linux_sector(struct se_device
*dev
, sector_t lb
)
833 switch (dev
->dev_attrib
.block_size
) {
844 EXPORT_SYMBOL(target_to_linux_sector
);
846 struct devices_idr_iter
{
847 int (*fn
)(struct se_device
*dev
, void *data
);
851 static int target_devices_idr_iter(int id
, void *p
, void *data
)
852 __must_hold(&device_mutex
)
854 struct devices_idr_iter
*iter
= data
;
855 struct se_device
*dev
= p
;
856 struct config_item
*item
;
860 * We add the device early to the idr, so it can be used
861 * by backend modules during configuration. We do not want
862 * to allow other callers to access partially setup devices,
863 * so we skip them here.
865 if (!target_dev_configured(dev
))
868 item
= config_item_get_unless_zero(&dev
->dev_group
.cg_item
);
871 mutex_unlock(&device_mutex
);
873 ret
= iter
->fn(dev
, iter
->data
);
874 config_item_put(item
);
876 mutex_lock(&device_mutex
);
881 * target_for_each_device - iterate over configured devices
882 * @fn: iterator function
883 * @data: pointer to data that will be passed to fn
885 * fn must return 0 to continue looping over devices. non-zero will break
886 * from the loop and return that value to the caller.
888 int target_for_each_device(int (*fn
)(struct se_device
*dev
, void *data
),
891 struct devices_idr_iter iter
= { .fn
= fn
, .data
= data
};
894 mutex_lock(&device_mutex
);
895 ret
= idr_for_each(&devices_idr
, target_devices_idr_iter
, &iter
);
896 mutex_unlock(&device_mutex
);
900 int target_configure_device(struct se_device
*dev
)
902 struct se_hba
*hba
= dev
->se_hba
;
905 if (target_dev_configured(dev
)) {
906 pr_err("se_dev->se_dev_ptr already set for storage"
912 * Add early so modules like tcmu can use during its
915 mutex_lock(&device_mutex
);
917 * Use cyclic to try and avoid collisions with devices
918 * that were recently removed.
920 id
= idr_alloc_cyclic(&devices_idr
, dev
, 0, INT_MAX
, GFP_KERNEL
);
921 mutex_unlock(&device_mutex
);
928 ret
= dev
->transport
->configure_device(dev
);
932 if (dev
->transport
->configure_unmap
&&
933 dev
->transport
->configure_unmap(dev
)) {
934 pr_debug("Discard support available, but disabled by default.\n");
938 * XXX: there is not much point to have two different values here..
940 dev
->dev_attrib
.block_size
= dev
->dev_attrib
.hw_block_size
;
941 dev
->dev_attrib
.queue_depth
= dev
->dev_attrib
.hw_queue_depth
;
944 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
946 dev
->dev_attrib
.hw_max_sectors
=
947 se_dev_align_max_sectors(dev
->dev_attrib
.hw_max_sectors
,
948 dev
->dev_attrib
.hw_block_size
);
949 dev
->dev_attrib
.optimal_sectors
= dev
->dev_attrib
.hw_max_sectors
;
951 dev
->creation_time
= get_jiffies_64();
953 ret
= core_setup_alua(dev
);
955 goto out_destroy_device
;
958 * Setup work_queue for QUEUE_FULL
960 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
962 scsi_dump_inquiry(dev
);
964 spin_lock(&hba
->device_lock
);
966 spin_unlock(&hba
->device_lock
);
968 dev
->dev_flags
|= DF_CONFIGURED
;
973 dev
->transport
->destroy_device(dev
);
975 mutex_lock(&device_mutex
);
976 idr_remove(&devices_idr
, dev
->dev_index
);
977 mutex_unlock(&device_mutex
);
979 se_release_vpd_for_dev(dev
);
983 void target_free_device(struct se_device
*dev
)
985 struct se_hba
*hba
= dev
->se_hba
;
987 WARN_ON(!list_empty(&dev
->dev_sep_list
));
989 if (target_dev_configured(dev
)) {
990 dev
->transport
->destroy_device(dev
);
992 mutex_lock(&device_mutex
);
993 idr_remove(&devices_idr
, dev
->dev_index
);
994 mutex_unlock(&device_mutex
);
996 spin_lock(&hba
->device_lock
);
998 spin_unlock(&hba
->device_lock
);
1001 core_alua_free_lu_gp_mem(dev
);
1002 core_alua_set_lba_map(dev
, NULL
, 0, 0);
1003 core_scsi3_free_all_registrations(dev
);
1004 se_release_vpd_for_dev(dev
);
1006 if (dev
->transport
->free_prot
)
1007 dev
->transport
->free_prot(dev
);
1010 dev
->transport
->free_device(dev
);
1013 int core_dev_setup_virtual_lun0(void)
1016 struct se_device
*dev
;
1017 char buf
[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1020 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1022 return PTR_ERR(hba
);
1024 dev
= target_alloc_device(hba
, "virt_lun0");
1030 hba
->backend
->ops
->set_configfs_dev_params(dev
, buf
, sizeof(buf
));
1032 ret
= target_configure_device(dev
);
1034 goto out_free_se_dev
;
1041 target_free_device(dev
);
1043 core_delete_hba(hba
);
1048 void core_dev_release_virtual_lun0(void)
1050 struct se_hba
*hba
= lun0_hba
;
1056 target_free_device(g_lun0_dev
);
1057 core_delete_hba(hba
);
1061 * Common CDB parsing for kernel and user passthrough.
1064 passthrough_parse_cdb(struct se_cmd
*cmd
,
1065 sense_reason_t (*exec_cmd
)(struct se_cmd
*cmd
))
1067 unsigned char *cdb
= cmd
->t_task_cdb
;
1068 struct se_device
*dev
= cmd
->se_dev
;
1072 * For REPORT LUNS we always need to emulate the response, for everything
1075 if (cdb
[0] == REPORT_LUNS
) {
1076 cmd
->execute_cmd
= spc_emulate_report_luns
;
1077 return TCM_NO_SENSE
;
1081 * With emulate_pr disabled, all reservation requests should fail,
1082 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1084 if (!dev
->dev_attrib
.emulate_pr
&&
1085 ((cdb
[0] == PERSISTENT_RESERVE_IN
) ||
1086 (cdb
[0] == PERSISTENT_RESERVE_OUT
) ||
1087 (cdb
[0] == RELEASE
|| cdb
[0] == RELEASE_10
) ||
1088 (cdb
[0] == RESERVE
|| cdb
[0] == RESERVE_10
))) {
1089 return TCM_UNSUPPORTED_SCSI_OPCODE
;
1093 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1094 * emulate the response, since tcmu does not have the information
1095 * required to process these commands.
1097 if (!(dev
->transport_flags
&
1098 TRANSPORT_FLAG_PASSTHROUGH_PGR
)) {
1099 if (cdb
[0] == PERSISTENT_RESERVE_IN
) {
1100 cmd
->execute_cmd
= target_scsi3_emulate_pr_in
;
1101 size
= get_unaligned_be16(&cdb
[7]);
1102 return target_cmd_size_check(cmd
, size
);
1104 if (cdb
[0] == PERSISTENT_RESERVE_OUT
) {
1105 cmd
->execute_cmd
= target_scsi3_emulate_pr_out
;
1106 size
= get_unaligned_be32(&cdb
[5]);
1107 return target_cmd_size_check(cmd
, size
);
1110 if (cdb
[0] == RELEASE
|| cdb
[0] == RELEASE_10
) {
1111 cmd
->execute_cmd
= target_scsi2_reservation_release
;
1112 if (cdb
[0] == RELEASE_10
)
1113 size
= get_unaligned_be16(&cdb
[7]);
1115 size
= cmd
->data_length
;
1116 return target_cmd_size_check(cmd
, size
);
1118 if (cdb
[0] == RESERVE
|| cdb
[0] == RESERVE_10
) {
1119 cmd
->execute_cmd
= target_scsi2_reservation_reserve
;
1120 if (cdb
[0] == RESERVE_10
)
1121 size
= get_unaligned_be16(&cdb
[7]);
1123 size
= cmd
->data_length
;
1124 return target_cmd_size_check(cmd
, size
);
1128 /* Set DATA_CDB flag for ops that should have it */
1139 case WRITE_VERIFY_12
:
1140 case WRITE_VERIFY_16
:
1141 case COMPARE_AND_WRITE
:
1142 case XDWRITEREAD_10
:
1143 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1145 case VARIABLE_LENGTH_CMD
:
1146 switch (get_unaligned_be16(&cdb
[8])) {
1149 case WRITE_VERIFY_32
:
1150 case XDWRITEREAD_32
:
1151 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1156 cmd
->execute_cmd
= exec_cmd
;
1158 return TCM_NO_SENSE
;
1160 EXPORT_SYMBOL(passthrough_parse_cdb
);