1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/bio.h>
35 #include <linux/genhd.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <asm/unaligned.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
45 #include "target_core_iblock.h"
47 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
48 #define IBLOCK_BIO_POOL_SIZE 128
50 static inline struct iblock_dev
*IBLOCK_DEV(struct se_device
*dev
)
52 return container_of(dev
, struct iblock_dev
, dev
);
56 static struct se_subsystem_api iblock_template
;
58 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
62 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
64 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba
->hba_id
,
66 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
70 static void iblock_detach_hba(struct se_hba
*hba
)
74 static struct se_device
*iblock_alloc_device(struct se_hba
*hba
, const char *name
)
76 struct iblock_dev
*ib_dev
= NULL
;
78 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
80 pr_err("Unable to allocate struct iblock_dev\n");
84 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
89 static int iblock_configure_device(struct se_device
*dev
)
91 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
92 struct request_queue
*q
;
93 struct block_device
*bd
= NULL
;
94 struct blk_integrity
*bi
;
98 if (!(ib_dev
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
99 pr_err("Missing udev_path= parameters for IBLOCK\n");
103 ib_dev
->ibd_bio_set
= bioset_create(IBLOCK_BIO_POOL_SIZE
, 0);
104 if (!ib_dev
->ibd_bio_set
) {
105 pr_err("IBLOCK: Unable to create bioset\n");
109 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
110 ib_dev
->ibd_udev_path
);
112 mode
= FMODE_READ
|FMODE_EXCL
;
113 if (!ib_dev
->ibd_readonly
)
116 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
, mode
, ib_dev
);
119 goto out_free_bioset
;
123 q
= bdev_get_queue(bd
);
125 dev
->dev_attrib
.hw_block_size
= bdev_logical_block_size(bd
);
126 dev
->dev_attrib
.hw_max_sectors
= queue_max_hw_sectors(q
);
127 dev
->dev_attrib
.hw_queue_depth
= q
->nr_requests
;
129 if (target_configure_unmap_from_queue(&dev
->dev_attrib
, q
))
130 pr_debug("IBLOCK: BLOCK Discard support available,"
131 " disabled by default\n");
134 * Enable write same emulation for IBLOCK and use 0xFFFF as
135 * the smaller WRITE_SAME(10) only has a two-byte block count.
137 dev
->dev_attrib
.max_write_same_len
= 0xFFFF;
139 if (blk_queue_nonrot(q
))
140 dev
->dev_attrib
.is_nonrot
= 1;
142 bi
= bdev_get_integrity(bd
);
144 struct bio_set
*bs
= ib_dev
->ibd_bio_set
;
146 if (!strcmp(bi
->name
, "T10-DIF-TYPE3-IP") ||
147 !strcmp(bi
->name
, "T10-DIF-TYPE1-IP")) {
148 pr_err("IBLOCK export of blk_integrity: %s not"
149 " supported\n", bi
->name
);
154 if (!strcmp(bi
->name
, "T10-DIF-TYPE3-CRC")) {
155 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE3_PROT
;
156 } else if (!strcmp(bi
->name
, "T10-DIF-TYPE1-CRC")) {
157 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE1_PROT
;
160 if (dev
->dev_attrib
.pi_prot_type
) {
161 if (bioset_integrity_create(bs
, IBLOCK_BIO_POOL_SIZE
) < 0) {
162 pr_err("Unable to allocate bioset for PI\n");
166 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
167 bs
->bio_integrity_pool
);
169 dev
->dev_attrib
.hw_pi_prot_type
= dev
->dev_attrib
.pi_prot_type
;
175 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
177 bioset_free(ib_dev
->ibd_bio_set
);
178 ib_dev
->ibd_bio_set
= NULL
;
183 static void iblock_free_device(struct se_device
*dev
)
185 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
187 if (ib_dev
->ibd_bd
!= NULL
)
188 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
189 if (ib_dev
->ibd_bio_set
!= NULL
)
190 bioset_free(ib_dev
->ibd_bio_set
);
195 static unsigned long long iblock_emulate_read_cap_with_block_size(
196 struct se_device
*dev
,
197 struct block_device
*bd
,
198 struct request_queue
*q
)
200 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
201 bdev_logical_block_size(bd
)) - 1);
202 u32 block_size
= bdev_logical_block_size(bd
);
204 if (block_size
== dev
->dev_attrib
.block_size
)
207 switch (block_size
) {
209 switch (dev
->dev_attrib
.block_size
) {
223 switch (dev
->dev_attrib
.block_size
) {
238 switch (dev
->dev_attrib
.block_size
) {
253 switch (dev
->dev_attrib
.block_size
) {
274 static void iblock_complete_cmd(struct se_cmd
*cmd
)
276 struct iblock_req
*ibr
= cmd
->priv
;
279 if (!atomic_dec_and_test(&ibr
->pending
))
282 if (atomic_read(&ibr
->ib_bio_err_cnt
))
283 status
= SAM_STAT_CHECK_CONDITION
;
285 status
= SAM_STAT_GOOD
;
287 target_complete_cmd(cmd
, status
);
291 static void iblock_bio_done(struct bio
*bio
, int err
)
293 struct se_cmd
*cmd
= bio
->bi_private
;
294 struct iblock_req
*ibr
= cmd
->priv
;
297 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
299 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) && !err
)
303 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
304 " err: %d\n", bio
, err
);
306 * Bump the ib_bio_err_cnt and release bio.
308 atomic_inc(&ibr
->ib_bio_err_cnt
);
309 smp_mb__after_atomic_inc();
314 iblock_complete_cmd(cmd
);
318 iblock_get_bio(struct se_cmd
*cmd
, sector_t lba
, u32 sg_num
)
320 struct iblock_dev
*ib_dev
= IBLOCK_DEV(cmd
->se_dev
);
324 * Only allocate as many vector entries as the bio code allows us to,
325 * we'll loop later on until we have handled the whole request.
327 if (sg_num
> BIO_MAX_PAGES
)
328 sg_num
= BIO_MAX_PAGES
;
330 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
332 pr_err("Unable to allocate memory for bio\n");
336 bio
->bi_bdev
= ib_dev
->ibd_bd
;
337 bio
->bi_private
= cmd
;
338 bio
->bi_end_io
= &iblock_bio_done
;
339 bio
->bi_iter
.bi_sector
= lba
;
344 static void iblock_submit_bios(struct bio_list
*list
, int rw
)
346 struct blk_plug plug
;
349 blk_start_plug(&plug
);
350 while ((bio
= bio_list_pop(list
)))
352 blk_finish_plug(&plug
);
355 static void iblock_end_io_flush(struct bio
*bio
, int err
)
357 struct se_cmd
*cmd
= bio
->bi_private
;
360 pr_err("IBLOCK: cache flush failed: %d\n", err
);
364 target_complete_cmd(cmd
, SAM_STAT_CHECK_CONDITION
);
366 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
373 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
374 * always flush the whole cache.
376 static sense_reason_t
377 iblock_execute_sync_cache(struct se_cmd
*cmd
)
379 struct iblock_dev
*ib_dev
= IBLOCK_DEV(cmd
->se_dev
);
380 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
384 * If the Immediate bit is set, queue up the GOOD response
385 * for this SYNCHRONIZE_CACHE op.
388 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
390 bio
= bio_alloc(GFP_KERNEL
, 0);
391 bio
->bi_end_io
= iblock_end_io_flush
;
392 bio
->bi_bdev
= ib_dev
->ibd_bd
;
394 bio
->bi_private
= cmd
;
395 submit_bio(WRITE_FLUSH
, bio
);
399 static sense_reason_t
400 iblock_do_unmap(struct se_cmd
*cmd
, void *priv
,
401 sector_t lba
, sector_t nolb
)
403 struct block_device
*bdev
= priv
;
404 struct se_device
*dev
= cmd
->se_dev
;
407 ret
= blkdev_issue_discard(bdev
,
408 target_to_linux_sector(dev
, lba
),
409 target_to_linux_sector(dev
, nolb
),
412 pr_err("blkdev_issue_discard() failed: %d\n", ret
);
413 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
419 static sense_reason_t
420 iblock_execute_unmap(struct se_cmd
*cmd
)
422 struct block_device
*bdev
= IBLOCK_DEV(cmd
->se_dev
)->ibd_bd
;
424 return sbc_execute_unmap(cmd
, iblock_do_unmap
, bdev
);
427 static sense_reason_t
428 iblock_execute_write_same_unmap(struct se_cmd
*cmd
)
430 struct block_device
*bdev
= IBLOCK_DEV(cmd
->se_dev
)->ibd_bd
;
431 sector_t lba
= cmd
->t_task_lba
;
432 sector_t nolb
= sbc_get_write_same_sectors(cmd
);
435 ret
= iblock_do_unmap(cmd
, bdev
, lba
, nolb
);
439 target_complete_cmd(cmd
, GOOD
);
443 static sense_reason_t
444 iblock_execute_write_same(struct se_cmd
*cmd
)
446 struct iblock_req
*ibr
;
447 struct scatterlist
*sg
;
449 struct bio_list list
;
450 struct se_device
*dev
= cmd
->se_dev
;
451 sector_t block_lba
= target_to_linux_sector(dev
, cmd
->t_task_lba
);
452 sector_t sectors
= target_to_linux_sector(dev
,
453 sbc_get_write_same_sectors(cmd
));
455 sg
= &cmd
->t_data_sg
[0];
457 if (cmd
->t_data_nents
> 1 ||
458 sg
->length
!= cmd
->se_dev
->dev_attrib
.block_size
) {
459 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
460 " block_size: %u\n", cmd
->t_data_nents
, sg
->length
,
461 cmd
->se_dev
->dev_attrib
.block_size
);
462 return TCM_INVALID_CDB_FIELD
;
465 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
470 bio
= iblock_get_bio(cmd
, block_lba
, 1);
474 bio_list_init(&list
);
475 bio_list_add(&list
, bio
);
477 atomic_set(&ibr
->pending
, 1);
480 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
483 bio
= iblock_get_bio(cmd
, block_lba
, 1);
487 atomic_inc(&ibr
->pending
);
488 bio_list_add(&list
, bio
);
491 /* Always in 512 byte units for Linux/Block */
492 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
496 iblock_submit_bios(&list
, WRITE
);
500 while ((bio
= bio_list_pop(&list
)))
505 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
509 Opt_udev_path
, Opt_readonly
, Opt_force
, Opt_err
512 static match_table_t tokens
= {
513 {Opt_udev_path
, "udev_path=%s"},
514 {Opt_readonly
, "readonly=%d"},
515 {Opt_force
, "force=%d"},
519 static ssize_t
iblock_set_configfs_dev_params(struct se_device
*dev
,
520 const char *page
, ssize_t count
)
522 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
523 char *orig
, *ptr
, *arg_p
, *opts
;
524 substring_t args
[MAX_OPT_ARGS
];
526 unsigned long tmp_readonly
;
528 opts
= kstrdup(page
, GFP_KERNEL
);
534 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
538 token
= match_token(ptr
, tokens
, args
);
541 if (ib_dev
->ibd_bd
) {
542 pr_err("Unable to set udev_path= while"
543 " ib_dev->ibd_bd exists\n");
547 if (match_strlcpy(ib_dev
->ibd_udev_path
, &args
[0],
548 SE_UDEV_PATH_LEN
) == 0) {
552 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
553 ib_dev
->ibd_udev_path
);
554 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
557 arg_p
= match_strdup(&args
[0]);
562 ret
= kstrtoul(arg_p
, 0, &tmp_readonly
);
565 pr_err("kstrtoul() failed for"
569 ib_dev
->ibd_readonly
= tmp_readonly
;
570 pr_debug("IBLOCK: readonly: %d\n", ib_dev
->ibd_readonly
);
581 return (!ret
) ? count
: ret
;
584 static ssize_t
iblock_show_configfs_dev_params(struct se_device
*dev
, char *b
)
586 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
587 struct block_device
*bd
= ib_dev
->ibd_bd
;
588 char buf
[BDEVNAME_SIZE
];
592 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
594 if (ib_dev
->ibd_flags
& IBDF_HAS_UDEV_PATH
)
595 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s",
596 ib_dev
->ibd_udev_path
);
597 bl
+= sprintf(b
+ bl
, " readonly: %d\n", ib_dev
->ibd_readonly
);
599 bl
+= sprintf(b
+ bl
, " ");
601 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
602 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
), (!bd
->bd_contains
) ?
603 "" : (bd
->bd_holder
== ib_dev
) ?
604 "CLAIMED: IBLOCK" : "CLAIMED: OS");
606 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
613 iblock_alloc_bip(struct se_cmd
*cmd
, struct bio
*bio
)
615 struct se_device
*dev
= cmd
->se_dev
;
616 struct blk_integrity
*bi
;
617 struct bio_integrity_payload
*bip
;
618 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
619 struct scatterlist
*sg
;
622 bi
= bdev_get_integrity(ib_dev
->ibd_bd
);
624 pr_err("Unable to locate bio_integrity\n");
628 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, cmd
->t_prot_nents
);
630 pr_err("Unable to allocate bio_integrity_payload\n");
634 bip
->bip_iter
.bi_size
= (cmd
->data_length
/ dev
->dev_attrib
.block_size
) *
636 bip
->bip_iter
.bi_sector
= bio
->bi_iter
.bi_sector
;
638 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip
->bip_iter
.bi_size
,
639 (unsigned long long)bip
->bip_iter
.bi_sector
);
641 for_each_sg(cmd
->t_prot_sg
, sg
, cmd
->t_prot_nents
, i
) {
643 rc
= bio_integrity_add_page(bio
, sg_page(sg
), sg
->length
,
645 if (rc
!= sg
->length
) {
646 pr_err("bio_integrity_add_page() failed; %d\n", rc
);
650 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
651 sg_page(sg
), sg
->length
, sg
->offset
);
657 static sense_reason_t
658 iblock_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
659 enum dma_data_direction data_direction
)
661 struct se_device
*dev
= cmd
->se_dev
;
662 sector_t block_lba
= target_to_linux_sector(dev
, cmd
->t_task_lba
);
663 struct iblock_req
*ibr
;
664 struct bio
*bio
, *bio_start
;
665 struct bio_list list
;
666 struct scatterlist
*sg
;
667 u32 sg_num
= sgl_nents
;
672 if (data_direction
== DMA_TO_DEVICE
) {
673 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
674 struct request_queue
*q
= bdev_get_queue(ib_dev
->ibd_bd
);
676 * Force writethrough using WRITE_FUA if a volatile write cache
677 * is not enabled, or if initiator set the Force Unit Access bit.
679 if (q
->flush_flags
& REQ_FUA
) {
680 if (cmd
->se_cmd_flags
& SCF_FUA
)
682 else if (!(q
->flush_flags
& REQ_FLUSH
))
693 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
699 atomic_set(&ibr
->pending
, 1);
700 iblock_complete_cmd(cmd
);
704 bio
= iblock_get_bio(cmd
, block_lba
, sgl_nents
);
709 bio_list_init(&list
);
710 bio_list_add(&list
, bio
);
712 atomic_set(&ibr
->pending
, 2);
715 for_each_sg(sgl
, sg
, sgl_nents
, i
) {
717 * XXX: if the length the device accepts is shorter than the
718 * length of the S/G list entry this will cause and
719 * endless loop. Better hope no driver uses huge pages.
721 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
723 if (bio_cnt
>= IBLOCK_MAX_BIO_PER_TASK
) {
724 iblock_submit_bios(&list
, rw
);
728 bio
= iblock_get_bio(cmd
, block_lba
, sg_num
);
732 atomic_inc(&ibr
->pending
);
733 bio_list_add(&list
, bio
);
737 /* Always in 512 byte units for Linux/Block */
738 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
742 if (cmd
->prot_type
) {
743 int rc
= iblock_alloc_bip(cmd
, bio_start
);
748 iblock_submit_bios(&list
, rw
);
749 iblock_complete_cmd(cmd
);
753 while ((bio
= bio_list_pop(&list
)))
758 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
761 static sector_t
iblock_get_blocks(struct se_device
*dev
)
763 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
764 struct block_device
*bd
= ib_dev
->ibd_bd
;
765 struct request_queue
*q
= bdev_get_queue(bd
);
767 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
770 static sector_t
iblock_get_alignment_offset_lbas(struct se_device
*dev
)
772 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
773 struct block_device
*bd
= ib_dev
->ibd_bd
;
776 ret
= bdev_alignment_offset(bd
);
780 /* convert offset-bytes to offset-lbas */
781 return ret
/ bdev_logical_block_size(bd
);
784 static unsigned int iblock_get_lbppbe(struct se_device
*dev
)
786 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
787 struct block_device
*bd
= ib_dev
->ibd_bd
;
788 int logs_per_phys
= bdev_physical_block_size(bd
) / bdev_logical_block_size(bd
);
790 return ilog2(logs_per_phys
);
793 static unsigned int iblock_get_io_min(struct se_device
*dev
)
795 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
796 struct block_device
*bd
= ib_dev
->ibd_bd
;
798 return bdev_io_min(bd
);
801 static unsigned int iblock_get_io_opt(struct se_device
*dev
)
803 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
804 struct block_device
*bd
= ib_dev
->ibd_bd
;
806 return bdev_io_opt(bd
);
809 static struct sbc_ops iblock_sbc_ops
= {
810 .execute_rw
= iblock_execute_rw
,
811 .execute_sync_cache
= iblock_execute_sync_cache
,
812 .execute_write_same
= iblock_execute_write_same
,
813 .execute_write_same_unmap
= iblock_execute_write_same_unmap
,
814 .execute_unmap
= iblock_execute_unmap
,
817 static sense_reason_t
818 iblock_parse_cdb(struct se_cmd
*cmd
)
820 return sbc_parse_cdb(cmd
, &iblock_sbc_ops
);
823 static bool iblock_get_write_cache(struct se_device
*dev
)
825 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
826 struct block_device
*bd
= ib_dev
->ibd_bd
;
827 struct request_queue
*q
= bdev_get_queue(bd
);
829 return q
->flush_flags
& REQ_FLUSH
;
832 static struct se_subsystem_api iblock_template
= {
834 .inquiry_prod
= "IBLOCK",
835 .inquiry_rev
= IBLOCK_VERSION
,
836 .owner
= THIS_MODULE
,
837 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
838 .attach_hba
= iblock_attach_hba
,
839 .detach_hba
= iblock_detach_hba
,
840 .alloc_device
= iblock_alloc_device
,
841 .configure_device
= iblock_configure_device
,
842 .free_device
= iblock_free_device
,
843 .parse_cdb
= iblock_parse_cdb
,
844 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
845 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
846 .get_device_type
= sbc_get_device_type
,
847 .get_blocks
= iblock_get_blocks
,
848 .get_alignment_offset_lbas
= iblock_get_alignment_offset_lbas
,
849 .get_lbppbe
= iblock_get_lbppbe
,
850 .get_io_min
= iblock_get_io_min
,
851 .get_io_opt
= iblock_get_io_opt
,
852 .get_write_cache
= iblock_get_write_cache
,
855 static int __init
iblock_module_init(void)
857 return transport_subsystem_register(&iblock_template
);
860 static void __exit
iblock_module_exit(void)
862 transport_subsystem_release(&iblock_template
);
865 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
866 MODULE_AUTHOR("nab@Linux-iSCSI.org");
867 MODULE_LICENSE("GPL");
869 module_init(iblock_module_init
);
870 module_exit(iblock_module_exit
);