1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
35 #include <linux/blkdev.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/bio.h>
39 #include <linux/genhd.h>
40 #include <linux/file.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_transport.h>
48 #include "target_core_iblock.h"
51 #define DEBUG_IBLOCK(x...) printk(x)
53 #define DEBUG_IBLOCK(x...)
56 static struct se_subsystem_api iblock_template
;
58 static void iblock_bio_done(struct bio
*, int);
60 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
64 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
66 struct iblock_hba
*ib_host
;
68 ib_host
= kzalloc(sizeof(struct iblock_hba
), GFP_KERNEL
);
70 printk(KERN_ERR
"Unable to allocate memory for"
71 " struct iblock_hba\n");
75 ib_host
->iblock_host_id
= host_id
;
77 atomic_set(&hba
->left_queue_depth
, IBLOCK_HBA_QUEUE_DEPTH
);
78 atomic_set(&hba
->max_queue_depth
, IBLOCK_HBA_QUEUE_DEPTH
);
79 hba
->hba_ptr
= (void *) ib_host
;
81 printk(KERN_INFO
"CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
82 " Generic Target Core Stack %s\n", hba
->hba_id
,
83 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
85 printk(KERN_INFO
"CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
86 " Target Core TCQ Depth: %d\n", hba
->hba_id
,
87 ib_host
->iblock_host_id
, atomic_read(&hba
->max_queue_depth
));
92 static void iblock_detach_hba(struct se_hba
*hba
)
94 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
96 printk(KERN_INFO
"CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
97 " Target Core\n", hba
->hba_id
, ib_host
->iblock_host_id
);
103 static void *iblock_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
105 struct iblock_dev
*ib_dev
= NULL
;
106 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
108 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
110 printk(KERN_ERR
"Unable to allocate struct iblock_dev\n");
113 ib_dev
->ibd_host
= ib_host
;
115 printk(KERN_INFO
"IBLOCK: Allocated ib_dev for %s\n", name
);
120 static struct se_device
*iblock_create_virtdevice(
122 struct se_subsystem_dev
*se_dev
,
125 struct iblock_dev
*ib_dev
= p
;
126 struct se_device
*dev
;
127 struct se_dev_limits dev_limits
;
128 struct block_device
*bd
= NULL
;
129 struct request_queue
*q
;
130 struct queue_limits
*limits
;
134 printk(KERN_ERR
"Unable to locate struct iblock_dev parameter\n");
137 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
139 * These settings need to be made tunable..
141 ib_dev
->ibd_bio_set
= bioset_create(32, 64);
142 if (!(ib_dev
->ibd_bio_set
)) {
143 printk(KERN_ERR
"IBLOCK: Unable to create bioset()\n");
146 printk(KERN_INFO
"IBLOCK: Created bio_set()\n");
148 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
149 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
151 printk(KERN_INFO
"IBLOCK: Claiming struct block_device: %s\n",
152 ib_dev
->ibd_udev_path
);
154 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
,
155 FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
, ib_dev
);
159 * Setup the local scope queue_limits from struct request_queue->limits
160 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
162 q
= bdev_get_queue(bd
);
163 limits
= &dev_limits
.limits
;
164 limits
->logical_block_size
= bdev_logical_block_size(bd
);
165 limits
->max_hw_sectors
= queue_max_hw_sectors(q
);
166 limits
->max_sectors
= queue_max_sectors(q
);
167 dev_limits
.hw_queue_depth
= IBLOCK_MAX_DEVICE_QUEUE_DEPTH
;
168 dev_limits
.queue_depth
= IBLOCK_DEVICE_QUEUE_DEPTH
;
170 ib_dev
->ibd_major
= MAJOR(bd
->bd_dev
);
171 ib_dev
->ibd_minor
= MINOR(bd
->bd_dev
);
174 dev
= transport_add_device_to_core_hba(hba
,
175 &iblock_template
, se_dev
, dev_flags
, (void *)ib_dev
,
176 &dev_limits
, "IBLOCK", IBLOCK_VERSION
);
180 ib_dev
->ibd_depth
= dev
->queue_depth
;
183 * Check if the underlying struct block_device request_queue supports
184 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
185 * in ATA and we need to set TPE=1
187 if (blk_queue_discard(bdev_get_queue(bd
))) {
188 struct request_queue
*q
= bdev_get_queue(bd
);
190 DEV_ATTRIB(dev
)->max_unmap_lba_count
=
191 q
->limits
.max_discard_sectors
;
193 * Currently hardcoded to 1 in Linux/SCSI code..
195 DEV_ATTRIB(dev
)->max_unmap_block_desc_count
= 1;
196 DEV_ATTRIB(dev
)->unmap_granularity
=
197 q
->limits
.discard_granularity
;
198 DEV_ATTRIB(dev
)->unmap_granularity_alignment
=
199 q
->limits
.discard_alignment
;
201 printk(KERN_INFO
"IBLOCK: BLOCK Discard support available,"
202 " disabled by default\n");
208 if (ib_dev
->ibd_bio_set
) {
209 bioset_free(ib_dev
->ibd_bio_set
);
210 ib_dev
->ibd_bio_set
= NULL
;
212 ib_dev
->ibd_bd
= NULL
;
213 ib_dev
->ibd_major
= 0;
214 ib_dev
->ibd_minor
= 0;
218 static void iblock_free_device(void *p
)
220 struct iblock_dev
*ib_dev
= p
;
222 if (ib_dev
->ibd_bd
!= NULL
)
223 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
224 if (ib_dev
->ibd_bio_set
!= NULL
)
225 bioset_free(ib_dev
->ibd_bio_set
);
229 static inline struct iblock_req
*IBLOCK_REQ(struct se_task
*task
)
231 return container_of(task
, struct iblock_req
, ib_task
);
234 static struct se_task
*
235 iblock_alloc_task(struct se_cmd
*cmd
)
237 struct iblock_req
*ib_req
;
239 ib_req
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
241 printk(KERN_ERR
"Unable to allocate memory for struct iblock_req\n");
245 ib_req
->ib_dev
= SE_DEV(cmd
)->dev_ptr
;
246 atomic_set(&ib_req
->ib_bio_cnt
, 0);
247 return &ib_req
->ib_task
;
250 static unsigned long long iblock_emulate_read_cap_with_block_size(
251 struct se_device
*dev
,
252 struct block_device
*bd
,
253 struct request_queue
*q
)
255 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
256 bdev_logical_block_size(bd
)) - 1);
257 u32 block_size
= bdev_logical_block_size(bd
);
259 if (block_size
== DEV_ATTRIB(dev
)->block_size
)
262 switch (block_size
) {
264 switch (DEV_ATTRIB(dev
)->block_size
) {
278 switch (DEV_ATTRIB(dev
)->block_size
) {
293 switch (DEV_ATTRIB(dev
)->block_size
) {
308 switch (DEV_ATTRIB(dev
)->block_size
) {
330 * Emulate SYCHRONIZE_CACHE_*
332 static void iblock_emulate_sync_cache(struct se_task
*task
)
334 struct se_cmd
*cmd
= TASK_CMD(task
);
335 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
336 int immed
= (T_TASK(cmd
)->t_task_cdb
[1] & 0x2);
337 sector_t error_sector
;
341 * If the Immediate bit is set, queue up the GOOD response
342 * for this SYNCHRONIZE_CACHE op
345 transport_complete_sync_cache(cmd
, 1);
348 * blkdev_issue_flush() does not support a specifying a range, so
349 * we have to flush the entire cache.
351 ret
= blkdev_issue_flush(ib_dev
->ibd_bd
, GFP_KERNEL
, &error_sector
);
353 printk(KERN_ERR
"IBLOCK: block_issue_flush() failed: %d "
354 " error_sector: %llu\n", ret
,
355 (unsigned long long)error_sector
);
359 transport_complete_sync_cache(cmd
, ret
== 0);
363 * Tell TCM Core that we are capable of WriteCache emulation for
364 * an underlying struct se_device.
366 static int iblock_emulated_write_cache(struct se_device
*dev
)
371 static int iblock_emulated_dpo(struct se_device
*dev
)
377 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
380 static int iblock_emulated_fua_write(struct se_device
*dev
)
385 static int iblock_emulated_fua_read(struct se_device
*dev
)
390 static int iblock_do_task(struct se_task
*task
)
392 struct se_device
*dev
= task
->task_se_cmd
->se_dev
;
393 struct iblock_req
*req
= IBLOCK_REQ(task
);
394 struct bio
*bio
= req
->ib_bio
, *nbio
= NULL
;
395 struct blk_plug plug
;
398 if (task
->task_data_direction
== DMA_TO_DEVICE
) {
400 * Force data to disk if we pretend to not have a volatile
401 * write cache, or the initiator set the Force Unit Access bit.
403 if (DEV_ATTRIB(dev
)->emulate_write_cache
== 0 ||
404 (DEV_ATTRIB(dev
)->emulate_fua_write
> 0 &&
405 T_TASK(task
->task_se_cmd
)->t_tasks_fua
))
413 blk_start_plug(&plug
);
417 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
418 " bio->bi_sector: %llu\n", task
, bio
, bio
->bi_sector
);
423 blk_finish_plug(&plug
);
425 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
428 static int iblock_do_discard(struct se_device
*dev
, sector_t lba
, u32 range
)
430 struct iblock_dev
*ibd
= dev
->dev_ptr
;
431 struct block_device
*bd
= ibd
->ibd_bd
;
434 return blkdev_issue_discard(bd
, lba
, range
, GFP_KERNEL
, barrier
);
437 static void iblock_free_task(struct se_task
*task
)
439 struct iblock_req
*req
= IBLOCK_REQ(task
);
440 struct bio
*bio
, *hbio
= req
->ib_bio
;
442 * We only release the bio(s) here if iblock_bio_done() has not called
443 * bio_put() -> iblock_bio_destructor().
445 while (hbio
!= NULL
) {
447 hbio
= hbio
->bi_next
;
456 Opt_udev_path
, Opt_force
, Opt_err
459 static match_table_t tokens
= {
460 {Opt_udev_path
, "udev_path=%s"},
461 {Opt_force
, "force=%d"},
465 static ssize_t
iblock_set_configfs_dev_params(struct se_hba
*hba
,
466 struct se_subsystem_dev
*se_dev
,
467 const char *page
, ssize_t count
)
469 struct iblock_dev
*ib_dev
= se_dev
->se_dev_su_ptr
;
470 char *orig
, *ptr
, *opts
;
471 substring_t args
[MAX_OPT_ARGS
];
472 int ret
= 0, arg
, token
;
474 opts
= kstrdup(page
, GFP_KERNEL
);
480 while ((ptr
= strsep(&opts
, ",")) != NULL
) {
484 token
= match_token(ptr
, tokens
, args
);
487 if (ib_dev
->ibd_bd
) {
488 printk(KERN_ERR
"Unable to set udev_path= while"
489 " ib_dev->ibd_bd exists\n");
494 ret
= snprintf(ib_dev
->ibd_udev_path
, SE_UDEV_PATH_LEN
,
495 "%s", match_strdup(&args
[0]));
496 printk(KERN_INFO
"IBLOCK: Referencing UDEV path: %s\n",
497 ib_dev
->ibd_udev_path
);
498 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
501 match_int(args
, &arg
);
502 ib_dev
->ibd_force
= arg
;
503 printk(KERN_INFO
"IBLOCK: Set force=%d\n",
513 return (!ret
) ? count
: ret
;
516 static ssize_t
iblock_check_configfs_dev_params(
518 struct se_subsystem_dev
*se_dev
)
520 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
522 if (!(ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
523 printk(KERN_ERR
"Missing udev_path= parameters for IBLOCK\n");
530 static ssize_t
iblock_show_configfs_dev_params(
532 struct se_subsystem_dev
*se_dev
,
535 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
536 struct block_device
*bd
= ibd
->ibd_bd
;
537 char buf
[BDEVNAME_SIZE
];
541 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
543 if (ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
) {
544 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s\n",
547 bl
+= sprintf(b
+ bl
, "\n");
549 bl
+= sprintf(b
+ bl
, " ");
551 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
552 ibd
->ibd_major
, ibd
->ibd_minor
, (!bd
->bd_contains
) ?
553 "" : (bd
->bd_holder
== (struct iblock_dev
*)ibd
) ?
554 "CLAIMED: IBLOCK" : "CLAIMED: OS");
556 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d\n",
557 ibd
->ibd_major
, ibd
->ibd_minor
);
563 static void iblock_bio_destructor(struct bio
*bio
)
565 struct se_task
*task
= bio
->bi_private
;
566 struct iblock_dev
*ib_dev
= task
->se_dev
->dev_ptr
;
568 bio_free(bio
, ib_dev
->ibd_bio_set
);
571 static struct bio
*iblock_get_bio(
572 struct se_task
*task
,
573 struct iblock_req
*ib_req
,
574 struct iblock_dev
*ib_dev
,
581 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
583 printk(KERN_ERR
"Unable to allocate memory for bio\n");
584 *ret
= PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES
;
588 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
589 " %p\n", bio
, task
->task_sg_num
, ib_dev
->ibd_bio_set
);
590 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio
, task
->task_size
);
592 bio
->bi_bdev
= ib_dev
->ibd_bd
;
593 bio
->bi_private
= (void *) task
;
594 bio
->bi_destructor
= iblock_bio_destructor
;
595 bio
->bi_end_io
= &iblock_bio_done
;
596 bio
->bi_sector
= lba
;
597 atomic_inc(&ib_req
->ib_bio_cnt
);
599 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio
->bi_sector
);
600 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
601 atomic_read(&ib_req
->ib_bio_cnt
));
605 static int iblock_map_task_SG(struct se_task
*task
)
607 struct se_cmd
*cmd
= task
->task_se_cmd
;
608 struct se_device
*dev
= SE_DEV(cmd
);
609 struct iblock_dev
*ib_dev
= task
->se_dev
->dev_ptr
;
610 struct iblock_req
*ib_req
= IBLOCK_REQ(task
);
611 struct bio
*bio
= NULL
, *hbio
= NULL
, *tbio
= NULL
;
612 struct scatterlist
*sg
;
614 u32 i
, sg_num
= task
->task_sg_num
;
617 * Do starting conversion up from non 512-byte blocksize with
618 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
620 if (DEV_ATTRIB(dev
)->block_size
== 4096)
621 block_lba
= (task
->task_lba
<< 3);
622 else if (DEV_ATTRIB(dev
)->block_size
== 2048)
623 block_lba
= (task
->task_lba
<< 2);
624 else if (DEV_ATTRIB(dev
)->block_size
== 1024)
625 block_lba
= (task
->task_lba
<< 1);
626 else if (DEV_ATTRIB(dev
)->block_size
== 512)
627 block_lba
= task
->task_lba
;
629 printk(KERN_ERR
"Unsupported SCSI -> BLOCK LBA conversion:"
630 " %u\n", DEV_ATTRIB(dev
)->block_size
);
631 return PYX_TRANSPORT_LU_COMM_FAILURE
;
634 bio
= iblock_get_bio(task
, ib_req
, ib_dev
, &ret
, block_lba
, sg_num
);
638 ib_req
->ib_bio
= bio
;
641 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
642 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
644 for_each_sg(task
->task_sg
, sg
, task
->task_sg_num
, i
) {
645 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
646 " %p len: %u offset: %u\n", task
, bio
, sg_page(sg
),
647 sg
->length
, sg
->offset
);
649 ret
= bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
);
650 if (ret
!= sg
->length
) {
652 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
654 DEBUG_IBLOCK("** task->task_size: %u\n",
656 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
658 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
661 bio
= iblock_get_bio(task
, ib_req
, ib_dev
, &ret
,
666 tbio
= tbio
->bi_next
= bio
;
667 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
668 " list, Going to again\n", bio
);
671 /* Always in 512 byte units for Linux/Block */
672 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
674 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
675 " sg_num to %u\n", task
, sg_num
);
676 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
677 " to %llu\n", task
, block_lba
);
678 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
679 " %u\n", task
, bio
->bi_vcnt
);
686 hbio
= hbio
->bi_next
;
693 static unsigned char *iblock_get_cdb(struct se_task
*task
)
695 return IBLOCK_REQ(task
)->ib_scsi_cdb
;
698 static u32
iblock_get_device_rev(struct se_device
*dev
)
700 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
703 static u32
iblock_get_device_type(struct se_device
*dev
)
708 static sector_t
iblock_get_blocks(struct se_device
*dev
)
710 struct iblock_dev
*ibd
= dev
->dev_ptr
;
711 struct block_device
*bd
= ibd
->ibd_bd
;
712 struct request_queue
*q
= bdev_get_queue(bd
);
714 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
717 static void iblock_bio_done(struct bio
*bio
, int err
)
719 struct se_task
*task
= bio
->bi_private
;
720 struct iblock_req
*ibr
= IBLOCK_REQ(task
);
722 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
724 if (!(test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) && !(err
))
728 printk(KERN_ERR
"test_bit(BIO_UPTODATE) failed for bio: %p,"
729 " err: %d\n", bio
, err
);
731 * Bump the ib_bio_err_cnt and release bio.
733 atomic_inc(&ibr
->ib_bio_err_cnt
);
734 smp_mb__after_atomic_inc();
737 * Wait to complete the task until the last bio as completed.
739 if (!(atomic_dec_and_test(&ibr
->ib_bio_cnt
)))
743 transport_complete_task(task
, 0);
746 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
747 task
, bio
, task
->task_lba
, bio
->bi_sector
, err
);
749 * bio_put() will call iblock_bio_destructor() to release the bio back
750 * to ibr->ib_bio_set.
754 * Wait to complete the task until the last bio as completed.
756 if (!(atomic_dec_and_test(&ibr
->ib_bio_cnt
)))
759 * Return GOOD status for task if zero ib_bio_err_cnt exists.
762 transport_complete_task(task
, (!atomic_read(&ibr
->ib_bio_err_cnt
)));
765 static struct se_subsystem_api iblock_template
= {
767 .owner
= THIS_MODULE
,
768 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
769 .map_task_SG
= iblock_map_task_SG
,
770 .attach_hba
= iblock_attach_hba
,
771 .detach_hba
= iblock_detach_hba
,
772 .allocate_virtdevice
= iblock_allocate_virtdevice
,
773 .create_virtdevice
= iblock_create_virtdevice
,
774 .free_device
= iblock_free_device
,
775 .dpo_emulated
= iblock_emulated_dpo
,
776 .fua_write_emulated
= iblock_emulated_fua_write
,
777 .fua_read_emulated
= iblock_emulated_fua_read
,
778 .write_cache_emulated
= iblock_emulated_write_cache
,
779 .alloc_task
= iblock_alloc_task
,
780 .do_task
= iblock_do_task
,
781 .do_discard
= iblock_do_discard
,
782 .do_sync_cache
= iblock_emulate_sync_cache
,
783 .free_task
= iblock_free_task
,
784 .check_configfs_dev_params
= iblock_check_configfs_dev_params
,
785 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
786 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
787 .get_cdb
= iblock_get_cdb
,
788 .get_device_rev
= iblock_get_device_rev
,
789 .get_device_type
= iblock_get_device_type
,
790 .get_blocks
= iblock_get_blocks
,
793 static int __init
iblock_module_init(void)
795 return transport_subsystem_register(&iblock_template
);
798 static void iblock_module_exit(void)
800 transport_subsystem_release(&iblock_template
);
803 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
804 MODULE_AUTHOR("nab@Linux-iSCSI.org");
805 MODULE_LICENSE("GPL");
807 module_init(iblock_module_init
);
808 module_exit(iblock_module_exit
);