1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/bio.h>
38 #include <linux/genhd.h>
39 #include <linux/file.h>
40 #include <linux/module.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_transport.h>
48 #include "target_core_iblock.h"
50 static struct se_subsystem_api iblock_template
;
52 static void iblock_bio_done(struct bio
*, int);
54 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
58 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
60 struct iblock_hba
*ib_host
;
62 ib_host
= kzalloc(sizeof(struct iblock_hba
), GFP_KERNEL
);
64 pr_err("Unable to allocate memory for"
65 " struct iblock_hba\n");
69 ib_host
->iblock_host_id
= host_id
;
71 hba
->hba_ptr
= ib_host
;
73 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
74 " Generic Target Core Stack %s\n", hba
->hba_id
,
75 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
77 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
78 hba
->hba_id
, ib_host
->iblock_host_id
);
83 static void iblock_detach_hba(struct se_hba
*hba
)
85 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
87 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
88 " Target Core\n", hba
->hba_id
, ib_host
->iblock_host_id
);
94 static void *iblock_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
96 struct iblock_dev
*ib_dev
= NULL
;
97 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
99 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
101 pr_err("Unable to allocate struct iblock_dev\n");
104 ib_dev
->ibd_host
= ib_host
;
106 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
111 static struct se_device
*iblock_create_virtdevice(
113 struct se_subsystem_dev
*se_dev
,
116 struct iblock_dev
*ib_dev
= p
;
117 struct se_device
*dev
;
118 struct se_dev_limits dev_limits
;
119 struct block_device
*bd
= NULL
;
120 struct request_queue
*q
;
121 struct queue_limits
*limits
;
126 pr_err("Unable to locate struct iblock_dev parameter\n");
129 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
131 * These settings need to be made tunable..
133 ib_dev
->ibd_bio_set
= bioset_create(32, 64);
134 if (!ib_dev
->ibd_bio_set
) {
135 pr_err("IBLOCK: Unable to create bioset()\n");
136 return ERR_PTR(-ENOMEM
);
138 pr_debug("IBLOCK: Created bio_set()\n");
140 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
141 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
143 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
144 ib_dev
->ibd_udev_path
);
146 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
,
147 FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
, ib_dev
);
153 * Setup the local scope queue_limits from struct request_queue->limits
154 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
156 q
= bdev_get_queue(bd
);
157 limits
= &dev_limits
.limits
;
158 limits
->logical_block_size
= bdev_logical_block_size(bd
);
159 limits
->max_hw_sectors
= queue_max_hw_sectors(q
);
160 limits
->max_sectors
= queue_max_sectors(q
);
161 dev_limits
.hw_queue_depth
= q
->nr_requests
;
162 dev_limits
.queue_depth
= q
->nr_requests
;
166 dev
= transport_add_device_to_core_hba(hba
,
167 &iblock_template
, se_dev
, dev_flags
, ib_dev
,
168 &dev_limits
, "IBLOCK", IBLOCK_VERSION
);
173 * Check if the underlying struct block_device request_queue supports
174 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
175 * in ATA and we need to set TPE=1
177 if (blk_queue_discard(q
)) {
178 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
=
179 q
->limits
.max_discard_sectors
;
181 * Currently hardcoded to 1 in Linux/SCSI code..
183 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
= 1;
184 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
=
185 q
->limits
.discard_granularity
;
186 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
187 q
->limits
.discard_alignment
;
189 pr_debug("IBLOCK: BLOCK Discard support available,"
190 " disabled by default\n");
193 if (blk_queue_nonrot(q
))
194 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= 1;
199 if (ib_dev
->ibd_bio_set
) {
200 bioset_free(ib_dev
->ibd_bio_set
);
201 ib_dev
->ibd_bio_set
= NULL
;
203 ib_dev
->ibd_bd
= NULL
;
207 static void iblock_free_device(void *p
)
209 struct iblock_dev
*ib_dev
= p
;
211 if (ib_dev
->ibd_bd
!= NULL
)
212 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
213 if (ib_dev
->ibd_bio_set
!= NULL
)
214 bioset_free(ib_dev
->ibd_bio_set
);
218 static inline struct iblock_req
*IBLOCK_REQ(struct se_task
*task
)
220 return container_of(task
, struct iblock_req
, ib_task
);
223 static struct se_task
*
224 iblock_alloc_task(unsigned char *cdb
)
226 struct iblock_req
*ib_req
;
228 ib_req
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
230 pr_err("Unable to allocate memory for struct iblock_req\n");
234 atomic_set(&ib_req
->ib_bio_cnt
, 0);
235 return &ib_req
->ib_task
;
238 static unsigned long long iblock_emulate_read_cap_with_block_size(
239 struct se_device
*dev
,
240 struct block_device
*bd
,
241 struct request_queue
*q
)
243 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
244 bdev_logical_block_size(bd
)) - 1);
245 u32 block_size
= bdev_logical_block_size(bd
);
247 if (block_size
== dev
->se_sub_dev
->se_dev_attrib
.block_size
)
250 switch (block_size
) {
252 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
266 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
281 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
296 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
317 static void iblock_end_io_flush(struct bio
*bio
, int err
)
319 struct se_cmd
*cmd
= bio
->bi_private
;
322 pr_err("IBLOCK: cache flush failed: %d\n", err
);
325 transport_complete_sync_cache(cmd
, err
== 0);
330 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
331 * always flush the whole cache.
333 static void iblock_emulate_sync_cache(struct se_task
*task
)
335 struct se_cmd
*cmd
= task
->task_se_cmd
;
336 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
337 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
341 * If the Immediate bit is set, queue up the GOOD response
342 * for this SYNCHRONIZE_CACHE op.
345 transport_complete_sync_cache(cmd
, 1);
347 bio
= bio_alloc(GFP_KERNEL
, 0);
348 bio
->bi_end_io
= iblock_end_io_flush
;
349 bio
->bi_bdev
= ib_dev
->ibd_bd
;
351 bio
->bi_private
= cmd
;
352 submit_bio(WRITE_FLUSH
, bio
);
355 static int iblock_do_discard(struct se_device
*dev
, sector_t lba
, u32 range
)
357 struct iblock_dev
*ibd
= dev
->dev_ptr
;
358 struct block_device
*bd
= ibd
->ibd_bd
;
361 return blkdev_issue_discard(bd
, lba
, range
, GFP_KERNEL
, barrier
);
364 static void iblock_free_task(struct se_task
*task
)
366 kfree(IBLOCK_REQ(task
));
370 Opt_udev_path
, Opt_force
, Opt_err
373 static match_table_t tokens
= {
374 {Opt_udev_path
, "udev_path=%s"},
375 {Opt_force
, "force=%d"},
379 static ssize_t
iblock_set_configfs_dev_params(struct se_hba
*hba
,
380 struct se_subsystem_dev
*se_dev
,
381 const char *page
, ssize_t count
)
383 struct iblock_dev
*ib_dev
= se_dev
->se_dev_su_ptr
;
384 char *orig
, *ptr
, *arg_p
, *opts
;
385 substring_t args
[MAX_OPT_ARGS
];
388 opts
= kstrdup(page
, GFP_KERNEL
);
394 while ((ptr
= strsep(&opts
, ",")) != NULL
) {
398 token
= match_token(ptr
, tokens
, args
);
401 if (ib_dev
->ibd_bd
) {
402 pr_err("Unable to set udev_path= while"
403 " ib_dev->ibd_bd exists\n");
407 arg_p
= match_strdup(&args
[0]);
412 snprintf(ib_dev
->ibd_udev_path
, SE_UDEV_PATH_LEN
,
415 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
416 ib_dev
->ibd_udev_path
);
417 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
428 return (!ret
) ? count
: ret
;
431 static ssize_t
iblock_check_configfs_dev_params(
433 struct se_subsystem_dev
*se_dev
)
435 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
437 if (!(ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
438 pr_err("Missing udev_path= parameters for IBLOCK\n");
445 static ssize_t
iblock_show_configfs_dev_params(
447 struct se_subsystem_dev
*se_dev
,
450 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
451 struct block_device
*bd
= ibd
->ibd_bd
;
452 char buf
[BDEVNAME_SIZE
];
456 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
458 if (ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
) {
459 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s\n",
462 bl
+= sprintf(b
+ bl
, "\n");
464 bl
+= sprintf(b
+ bl
, " ");
466 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
467 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
), (!bd
->bd_contains
) ?
468 "" : (bd
->bd_holder
== (struct iblock_dev
*)ibd
) ?
469 "CLAIMED: IBLOCK" : "CLAIMED: OS");
471 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
477 static void iblock_bio_destructor(struct bio
*bio
)
479 struct se_task
*task
= bio
->bi_private
;
480 struct iblock_dev
*ib_dev
= task
->task_se_cmd
->se_dev
->dev_ptr
;
482 bio_free(bio
, ib_dev
->ibd_bio_set
);
486 iblock_get_bio(struct se_task
*task
, sector_t lba
, u32 sg_num
)
488 struct iblock_dev
*ib_dev
= task
->task_se_cmd
->se_dev
->dev_ptr
;
489 struct iblock_req
*ib_req
= IBLOCK_REQ(task
);
492 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
494 pr_err("Unable to allocate memory for bio\n");
498 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
499 " %p\n", bio
, task
->task_sg_nents
, ib_dev
->ibd_bio_set
);
500 pr_debug("Allocated bio: %p task_size: %u\n", bio
, task
->task_size
);
502 bio
->bi_bdev
= ib_dev
->ibd_bd
;
503 bio
->bi_private
= task
;
504 bio
->bi_destructor
= iblock_bio_destructor
;
505 bio
->bi_end_io
= &iblock_bio_done
;
506 bio
->bi_sector
= lba
;
507 atomic_inc(&ib_req
->ib_bio_cnt
);
509 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio
->bi_sector
);
510 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
511 atomic_read(&ib_req
->ib_bio_cnt
));
515 static int iblock_do_task(struct se_task
*task
)
517 struct se_cmd
*cmd
= task
->task_se_cmd
;
518 struct se_device
*dev
= cmd
->se_dev
;
520 struct bio_list list
;
521 struct scatterlist
*sg
;
522 u32 i
, sg_num
= task
->task_sg_nents
;
524 struct blk_plug plug
;
527 if (task
->task_data_direction
== DMA_TO_DEVICE
) {
529 * Force data to disk if we pretend to not have a volatile
530 * write cache, or the initiator set the Force Unit Access bit.
532 if (dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
== 0 ||
533 (dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
> 0 &&
534 task
->task_se_cmd
->t_tasks_fua
))
543 * Do starting conversion up from non 512-byte blocksize with
544 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
546 if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 4096)
547 block_lba
= (task
->task_lba
<< 3);
548 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 2048)
549 block_lba
= (task
->task_lba
<< 2);
550 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 1024)
551 block_lba
= (task
->task_lba
<< 1);
552 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 512)
553 block_lba
= task
->task_lba
;
555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
556 " %u\n", dev
->se_sub_dev
->se_dev_attrib
.block_size
);
557 return PYX_TRANSPORT_LU_COMM_FAILURE
;
560 bio
= iblock_get_bio(task
, block_lba
, sg_num
);
562 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES
;
564 bio_list_init(&list
);
565 bio_list_add(&list
, bio
);
567 for_each_sg(task
->task_sg
, sg
, task
->task_sg_nents
, i
) {
569 * XXX: if the length the device accepts is shorter than the
570 * length of the S/G list entry this will cause and
571 * endless loop. Better hope no driver uses huge pages.
573 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
575 bio
= iblock_get_bio(task
, block_lba
, sg_num
);
578 bio_list_add(&list
, bio
);
581 /* Always in 512 byte units for Linux/Block */
582 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
586 blk_start_plug(&plug
);
587 while ((bio
= bio_list_pop(&list
)))
589 blk_finish_plug(&plug
);
591 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
594 while ((bio
= bio_list_pop(&list
)))
596 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES
;
599 static u32
iblock_get_device_rev(struct se_device
*dev
)
601 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
604 static u32
iblock_get_device_type(struct se_device
*dev
)
609 static sector_t
iblock_get_blocks(struct se_device
*dev
)
611 struct iblock_dev
*ibd
= dev
->dev_ptr
;
612 struct block_device
*bd
= ibd
->ibd_bd
;
613 struct request_queue
*q
= bdev_get_queue(bd
);
615 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
618 static void iblock_bio_done(struct bio
*bio
, int err
)
620 struct se_task
*task
= bio
->bi_private
;
621 struct iblock_req
*ibr
= IBLOCK_REQ(task
);
624 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
626 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) && !err
)
630 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
631 " err: %d\n", bio
, err
);
633 * Bump the ib_bio_err_cnt and release bio.
635 atomic_inc(&ibr
->ib_bio_err_cnt
);
636 smp_mb__after_atomic_inc();
641 if (!atomic_dec_and_test(&ibr
->ib_bio_cnt
))
644 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
645 task
, bio
, task
->task_lba
,
646 (unsigned long long)bio
->bi_sector
, err
);
648 transport_complete_task(task
, !atomic_read(&ibr
->ib_bio_err_cnt
));
651 static struct se_subsystem_api iblock_template
= {
653 .owner
= THIS_MODULE
,
654 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
655 .write_cache_emulated
= 1,
656 .fua_write_emulated
= 1,
657 .attach_hba
= iblock_attach_hba
,
658 .detach_hba
= iblock_detach_hba
,
659 .allocate_virtdevice
= iblock_allocate_virtdevice
,
660 .create_virtdevice
= iblock_create_virtdevice
,
661 .free_device
= iblock_free_device
,
662 .alloc_task
= iblock_alloc_task
,
663 .do_task
= iblock_do_task
,
664 .do_discard
= iblock_do_discard
,
665 .do_sync_cache
= iblock_emulate_sync_cache
,
666 .free_task
= iblock_free_task
,
667 .check_configfs_dev_params
= iblock_check_configfs_dev_params
,
668 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
669 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
670 .get_device_rev
= iblock_get_device_rev
,
671 .get_device_type
= iblock_get_device_type
,
672 .get_blocks
= iblock_get_blocks
,
675 static int __init
iblock_module_init(void)
677 return transport_subsystem_register(&iblock_template
);
680 static void iblock_module_exit(void)
682 transport_subsystem_release(&iblock_template
);
685 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
686 MODULE_AUTHOR("nab@Linux-iSCSI.org");
687 MODULE_LICENSE("GPL");
689 module_init(iblock_module_init
);
690 module_exit(iblock_module_exit
);