1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/bio.h>
38 #include <linux/genhd.h>
39 #include <linux/file.h>
40 #include <linux/module.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
47 #include "target_core_iblock.h"
49 static struct se_subsystem_api iblock_template
;
51 static void iblock_bio_done(struct bio
*, int);
53 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
57 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
59 struct iblock_hba
*ib_host
;
61 ib_host
= kzalloc(sizeof(struct iblock_hba
), GFP_KERNEL
);
63 pr_err("Unable to allocate memory for"
64 " struct iblock_hba\n");
68 ib_host
->iblock_host_id
= host_id
;
70 hba
->hba_ptr
= ib_host
;
72 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
73 " Generic Target Core Stack %s\n", hba
->hba_id
,
74 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
76 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
77 hba
->hba_id
, ib_host
->iblock_host_id
);
82 static void iblock_detach_hba(struct se_hba
*hba
)
84 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
86 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
87 " Target Core\n", hba
->hba_id
, ib_host
->iblock_host_id
);
93 static void *iblock_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
95 struct iblock_dev
*ib_dev
= NULL
;
96 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
98 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
100 pr_err("Unable to allocate struct iblock_dev\n");
103 ib_dev
->ibd_host
= ib_host
;
105 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
110 static struct se_device
*iblock_create_virtdevice(
112 struct se_subsystem_dev
*se_dev
,
115 struct iblock_dev
*ib_dev
= p
;
116 struct se_device
*dev
;
117 struct se_dev_limits dev_limits
;
118 struct block_device
*bd
= NULL
;
119 struct request_queue
*q
;
120 struct queue_limits
*limits
;
125 pr_err("Unable to locate struct iblock_dev parameter\n");
128 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
130 * These settings need to be made tunable..
132 ib_dev
->ibd_bio_set
= bioset_create(32, 0);
133 if (!ib_dev
->ibd_bio_set
) {
134 pr_err("IBLOCK: Unable to create bioset()\n");
135 return ERR_PTR(-ENOMEM
);
137 pr_debug("IBLOCK: Created bio_set()\n");
139 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
140 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
142 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
143 ib_dev
->ibd_udev_path
);
145 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
,
146 FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
, ib_dev
);
152 * Setup the local scope queue_limits from struct request_queue->limits
153 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
155 q
= bdev_get_queue(bd
);
156 limits
= &dev_limits
.limits
;
157 limits
->logical_block_size
= bdev_logical_block_size(bd
);
158 limits
->max_hw_sectors
= queue_max_hw_sectors(q
);
159 limits
->max_sectors
= queue_max_sectors(q
);
160 dev_limits
.hw_queue_depth
= q
->nr_requests
;
161 dev_limits
.queue_depth
= q
->nr_requests
;
165 dev
= transport_add_device_to_core_hba(hba
,
166 &iblock_template
, se_dev
, dev_flags
, ib_dev
,
167 &dev_limits
, "IBLOCK", IBLOCK_VERSION
);
172 * Check if the underlying struct block_device request_queue supports
173 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
174 * in ATA and we need to set TPE=1
176 if (blk_queue_discard(q
)) {
177 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
=
178 q
->limits
.max_discard_sectors
;
180 * Currently hardcoded to 1 in Linux/SCSI code..
182 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
= 1;
183 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
=
184 q
->limits
.discard_granularity
>> 9;
185 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
186 q
->limits
.discard_alignment
;
188 pr_debug("IBLOCK: BLOCK Discard support available,"
189 " disabled by default\n");
192 if (blk_queue_nonrot(q
))
193 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= 1;
198 if (ib_dev
->ibd_bio_set
) {
199 bioset_free(ib_dev
->ibd_bio_set
);
200 ib_dev
->ibd_bio_set
= NULL
;
202 ib_dev
->ibd_bd
= NULL
;
206 static void iblock_free_device(void *p
)
208 struct iblock_dev
*ib_dev
= p
;
210 if (ib_dev
->ibd_bd
!= NULL
)
211 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
212 if (ib_dev
->ibd_bio_set
!= NULL
)
213 bioset_free(ib_dev
->ibd_bio_set
);
217 static inline struct iblock_req
*IBLOCK_REQ(struct se_task
*task
)
219 return container_of(task
, struct iblock_req
, ib_task
);
222 static struct se_task
*
223 iblock_alloc_task(unsigned char *cdb
)
225 struct iblock_req
*ib_req
;
227 ib_req
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
229 pr_err("Unable to allocate memory for struct iblock_req\n");
233 atomic_set(&ib_req
->ib_bio_cnt
, 0);
234 return &ib_req
->ib_task
;
237 static unsigned long long iblock_emulate_read_cap_with_block_size(
238 struct se_device
*dev
,
239 struct block_device
*bd
,
240 struct request_queue
*q
)
242 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
243 bdev_logical_block_size(bd
)) - 1);
244 u32 block_size
= bdev_logical_block_size(bd
);
246 if (block_size
== dev
->se_sub_dev
->se_dev_attrib
.block_size
)
249 switch (block_size
) {
251 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
265 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
280 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
295 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
316 static void iblock_end_io_flush(struct bio
*bio
, int err
)
318 struct se_cmd
*cmd
= bio
->bi_private
;
321 pr_err("IBLOCK: cache flush failed: %d\n", err
);
324 transport_complete_sync_cache(cmd
, err
== 0);
329 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
330 * always flush the whole cache.
332 static void iblock_emulate_sync_cache(struct se_task
*task
)
334 struct se_cmd
*cmd
= task
->task_se_cmd
;
335 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
336 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
340 * If the Immediate bit is set, queue up the GOOD response
341 * for this SYNCHRONIZE_CACHE op.
344 transport_complete_sync_cache(cmd
, 1);
346 bio
= bio_alloc(GFP_KERNEL
, 0);
347 bio
->bi_end_io
= iblock_end_io_flush
;
348 bio
->bi_bdev
= ib_dev
->ibd_bd
;
350 bio
->bi_private
= cmd
;
351 submit_bio(WRITE_FLUSH
, bio
);
354 static int iblock_do_discard(struct se_device
*dev
, sector_t lba
, u32 range
)
356 struct iblock_dev
*ibd
= dev
->dev_ptr
;
357 struct block_device
*bd
= ibd
->ibd_bd
;
360 return blkdev_issue_discard(bd
, lba
, range
, GFP_KERNEL
, barrier
);
363 static void iblock_free_task(struct se_task
*task
)
365 kfree(IBLOCK_REQ(task
));
369 Opt_udev_path
, Opt_force
, Opt_err
372 static match_table_t tokens
= {
373 {Opt_udev_path
, "udev_path=%s"},
374 {Opt_force
, "force=%d"},
378 static ssize_t
iblock_set_configfs_dev_params(struct se_hba
*hba
,
379 struct se_subsystem_dev
*se_dev
,
380 const char *page
, ssize_t count
)
382 struct iblock_dev
*ib_dev
= se_dev
->se_dev_su_ptr
;
383 char *orig
, *ptr
, *arg_p
, *opts
;
384 substring_t args
[MAX_OPT_ARGS
];
387 opts
= kstrdup(page
, GFP_KERNEL
);
393 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
397 token
= match_token(ptr
, tokens
, args
);
400 if (ib_dev
->ibd_bd
) {
401 pr_err("Unable to set udev_path= while"
402 " ib_dev->ibd_bd exists\n");
406 arg_p
= match_strdup(&args
[0]);
411 snprintf(ib_dev
->ibd_udev_path
, SE_UDEV_PATH_LEN
,
414 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
415 ib_dev
->ibd_udev_path
);
416 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
427 return (!ret
) ? count
: ret
;
430 static ssize_t
iblock_check_configfs_dev_params(
432 struct se_subsystem_dev
*se_dev
)
434 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
436 if (!(ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
437 pr_err("Missing udev_path= parameters for IBLOCK\n");
444 static ssize_t
iblock_show_configfs_dev_params(
446 struct se_subsystem_dev
*se_dev
,
449 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
450 struct block_device
*bd
= ibd
->ibd_bd
;
451 char buf
[BDEVNAME_SIZE
];
455 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
457 if (ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
) {
458 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s\n",
461 bl
+= sprintf(b
+ bl
, "\n");
463 bl
+= sprintf(b
+ bl
, " ");
465 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
466 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
), (!bd
->bd_contains
) ?
467 "" : (bd
->bd_holder
== ibd
) ?
468 "CLAIMED: IBLOCK" : "CLAIMED: OS");
470 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
476 static void iblock_bio_destructor(struct bio
*bio
)
478 struct se_task
*task
= bio
->bi_private
;
479 struct iblock_dev
*ib_dev
= task
->task_se_cmd
->se_dev
->dev_ptr
;
481 bio_free(bio
, ib_dev
->ibd_bio_set
);
485 iblock_get_bio(struct se_task
*task
, sector_t lba
, u32 sg_num
)
487 struct iblock_dev
*ib_dev
= task
->task_se_cmd
->se_dev
->dev_ptr
;
488 struct iblock_req
*ib_req
= IBLOCK_REQ(task
);
492 * Only allocate as many vector entries as the bio code allows us to,
493 * we'll loop later on until we have handled the whole request.
495 if (sg_num
> BIO_MAX_PAGES
)
496 sg_num
= BIO_MAX_PAGES
;
498 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
500 pr_err("Unable to allocate memory for bio\n");
504 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
505 " %p\n", bio
, task
->task_sg_nents
, ib_dev
->ibd_bio_set
);
506 pr_debug("Allocated bio: %p task_size: %u\n", bio
, task
->task_size
);
508 bio
->bi_bdev
= ib_dev
->ibd_bd
;
509 bio
->bi_private
= task
;
510 bio
->bi_destructor
= iblock_bio_destructor
;
511 bio
->bi_end_io
= &iblock_bio_done
;
512 bio
->bi_sector
= lba
;
513 atomic_inc(&ib_req
->ib_bio_cnt
);
515 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio
->bi_sector
);
516 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
517 atomic_read(&ib_req
->ib_bio_cnt
));
521 static int iblock_do_task(struct se_task
*task
)
523 struct se_cmd
*cmd
= task
->task_se_cmd
;
524 struct se_device
*dev
= cmd
->se_dev
;
526 struct bio_list list
;
527 struct scatterlist
*sg
;
528 u32 i
, sg_num
= task
->task_sg_nents
;
530 struct blk_plug plug
;
533 if (task
->task_data_direction
== DMA_TO_DEVICE
) {
535 * Force data to disk if we pretend to not have a volatile
536 * write cache, or the initiator set the Force Unit Access bit.
538 if (dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
== 0 ||
539 (dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
> 0 &&
540 (cmd
->se_cmd_flags
& SCF_FUA
)))
549 * Do starting conversion up from non 512-byte blocksize with
550 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
552 if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 4096)
553 block_lba
= (task
->task_lba
<< 3);
554 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 2048)
555 block_lba
= (task
->task_lba
<< 2);
556 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 1024)
557 block_lba
= (task
->task_lba
<< 1);
558 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 512)
559 block_lba
= task
->task_lba
;
561 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
562 " %u\n", dev
->se_sub_dev
->se_dev_attrib
.block_size
);
563 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
567 bio
= iblock_get_bio(task
, block_lba
, sg_num
);
569 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
573 bio_list_init(&list
);
574 bio_list_add(&list
, bio
);
576 for_each_sg(task
->task_sg
, sg
, task
->task_sg_nents
, i
) {
578 * XXX: if the length the device accepts is shorter than the
579 * length of the S/G list entry this will cause and
580 * endless loop. Better hope no driver uses huge pages.
582 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
584 bio
= iblock_get_bio(task
, block_lba
, sg_num
);
587 bio_list_add(&list
, bio
);
590 /* Always in 512 byte units for Linux/Block */
591 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
595 blk_start_plug(&plug
);
596 while ((bio
= bio_list_pop(&list
)))
598 blk_finish_plug(&plug
);
603 while ((bio
= bio_list_pop(&list
)))
605 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
609 static u32
iblock_get_device_rev(struct se_device
*dev
)
611 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
614 static u32
iblock_get_device_type(struct se_device
*dev
)
619 static sector_t
iblock_get_blocks(struct se_device
*dev
)
621 struct iblock_dev
*ibd
= dev
->dev_ptr
;
622 struct block_device
*bd
= ibd
->ibd_bd
;
623 struct request_queue
*q
= bdev_get_queue(bd
);
625 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
628 static void iblock_bio_done(struct bio
*bio
, int err
)
630 struct se_task
*task
= bio
->bi_private
;
631 struct iblock_req
*ibr
= IBLOCK_REQ(task
);
634 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
636 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) && !err
)
640 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
641 " err: %d\n", bio
, err
);
643 * Bump the ib_bio_err_cnt and release bio.
645 atomic_inc(&ibr
->ib_bio_err_cnt
);
646 smp_mb__after_atomic_inc();
651 if (!atomic_dec_and_test(&ibr
->ib_bio_cnt
))
654 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
655 task
, bio
, task
->task_lba
,
656 (unsigned long long)bio
->bi_sector
, err
);
658 transport_complete_task(task
, !atomic_read(&ibr
->ib_bio_err_cnt
));
661 static struct se_subsystem_api iblock_template
= {
663 .owner
= THIS_MODULE
,
664 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
665 .write_cache_emulated
= 1,
666 .fua_write_emulated
= 1,
667 .attach_hba
= iblock_attach_hba
,
668 .detach_hba
= iblock_detach_hba
,
669 .allocate_virtdevice
= iblock_allocate_virtdevice
,
670 .create_virtdevice
= iblock_create_virtdevice
,
671 .free_device
= iblock_free_device
,
672 .alloc_task
= iblock_alloc_task
,
673 .do_task
= iblock_do_task
,
674 .do_discard
= iblock_do_discard
,
675 .do_sync_cache
= iblock_emulate_sync_cache
,
676 .free_task
= iblock_free_task
,
677 .check_configfs_dev_params
= iblock_check_configfs_dev_params
,
678 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
679 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
680 .get_device_rev
= iblock_get_device_rev
,
681 .get_device_type
= iblock_get_device_type
,
682 .get_blocks
= iblock_get_blocks
,
685 static int __init
iblock_module_init(void)
687 return transport_subsystem_register(&iblock_template
);
690 static void iblock_module_exit(void)
692 transport_subsystem_release(&iblock_template
);
695 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
696 MODULE_AUTHOR("nab@Linux-iSCSI.org");
697 MODULE_LICENSE("GPL");
699 module_init(iblock_module_init
);
700 module_exit(iblock_module_exit
);