1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_iblock.c
5 * This file contains the Storage Engine <-> Linux BlockIO transport
8 * (c) Copyright 2003-2013 Datera, Inc.
10 * Nicholas A. Bellinger <nab@kernel.org>
12 ******************************************************************************/
14 #include <linux/string.h>
15 #include <linux/parser.h>
16 #include <linux/timer.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/bio.h>
23 #include <linux/file.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
27 #include <scsi/scsi_proto.h>
28 #include <scsi/scsi_common.h>
29 #include <linux/unaligned.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
34 #include "target_core_iblock.h"
35 #include "target_core_pr.h"
37 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
38 #define IBLOCK_BIO_POOL_SIZE 128
40 static inline struct iblock_dev
*IBLOCK_DEV(struct se_device
*dev
)
42 return container_of(dev
, struct iblock_dev
, dev
);
46 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
48 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
49 " Generic Target Core Stack %s\n", hba
->hba_id
,
50 IBLOCK_VERSION
, TARGET_CORE_VERSION
);
54 static void iblock_detach_hba(struct se_hba
*hba
)
58 static struct se_device
*iblock_alloc_device(struct se_hba
*hba
, const char *name
)
60 struct iblock_dev
*ib_dev
= NULL
;
62 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
64 pr_err("Unable to allocate struct iblock_dev\n");
68 ib_dev
->ibd_plug
= kcalloc(nr_cpu_ids
, sizeof(*ib_dev
->ibd_plug
),
70 if (!ib_dev
->ibd_plug
)
73 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
82 static bool iblock_configure_unmap(struct se_device
*dev
)
84 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
86 return target_configure_unmap_from_queue(&dev
->dev_attrib
,
90 static int iblock_configure_device(struct se_device
*dev
)
92 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
93 struct request_queue
*q
;
94 struct file
*bdev_file
;
95 struct block_device
*bd
;
96 struct blk_integrity
*bi
;
97 blk_mode_t mode
= BLK_OPEN_READ
;
98 unsigned int max_write_zeroes_sectors
;
101 if (!(ib_dev
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
102 pr_err("Missing udev_path= parameters for IBLOCK\n");
106 ret
= bioset_init(&ib_dev
->ibd_bio_set
, IBLOCK_BIO_POOL_SIZE
, 0, BIOSET_NEED_BVECS
);
108 pr_err("IBLOCK: Unable to create bioset\n");
112 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
113 ib_dev
->ibd_udev_path
);
115 if (!ib_dev
->ibd_readonly
)
116 mode
|= BLK_OPEN_WRITE
;
118 dev
->dev_flags
|= DF_READ_ONLY
;
120 bdev_file
= bdev_file_open_by_path(ib_dev
->ibd_udev_path
, mode
, ib_dev
,
122 if (IS_ERR(bdev_file
)) {
123 ret
= PTR_ERR(bdev_file
);
124 goto out_free_bioset
;
126 ib_dev
->ibd_bdev_file
= bdev_file
;
127 ib_dev
->ibd_bd
= bd
= file_bdev(bdev_file
);
129 q
= bdev_get_queue(bd
);
131 dev
->dev_attrib
.hw_block_size
= bdev_logical_block_size(bd
);
132 dev
->dev_attrib
.hw_max_sectors
= mult_frac(queue_max_hw_sectors(q
),
134 dev
->dev_attrib
.hw_block_size
);
135 dev
->dev_attrib
.hw_queue_depth
= q
->nr_requests
;
138 * Enable write same emulation for IBLOCK and use 0xFFFF as
139 * the smaller WRITE_SAME(10) only has a two-byte block count.
141 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bd
);
142 if (max_write_zeroes_sectors
)
143 dev
->dev_attrib
.max_write_same_len
= max_write_zeroes_sectors
;
145 dev
->dev_attrib
.max_write_same_len
= 0xFFFF;
148 dev
->dev_attrib
.is_nonrot
= 1;
150 bi
= bdev_get_integrity(bd
);
154 switch (bi
->csum_type
) {
155 case BLK_INTEGRITY_CSUM_IP
:
156 pr_err("IBLOCK export of blk_integrity: %s not supported\n",
157 blk_integrity_profile_name(bi
));
160 case BLK_INTEGRITY_CSUM_CRC
:
161 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
)
162 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE1_PROT
;
164 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE3_PROT
;
170 if (dev
->dev_attrib
.pi_prot_type
) {
171 struct bio_set
*bs
= &ib_dev
->ibd_bio_set
;
173 if (bioset_integrity_create(bs
, IBLOCK_BIO_POOL_SIZE
) < 0) {
174 pr_err("Unable to allocate bioset for PI\n");
178 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
179 &bs
->bio_integrity_pool
);
182 dev
->dev_attrib
.hw_pi_prot_type
= dev
->dev_attrib
.pi_prot_type
;
186 fput(ib_dev
->ibd_bdev_file
);
188 bioset_exit(&ib_dev
->ibd_bio_set
);
193 static void iblock_dev_call_rcu(struct rcu_head
*p
)
195 struct se_device
*dev
= container_of(p
, struct se_device
, rcu_head
);
196 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
198 kfree(ib_dev
->ibd_plug
);
202 static void iblock_free_device(struct se_device
*dev
)
204 call_rcu(&dev
->rcu_head
, iblock_dev_call_rcu
);
207 static void iblock_destroy_device(struct se_device
*dev
)
209 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
211 if (ib_dev
->ibd_bdev_file
)
212 fput(ib_dev
->ibd_bdev_file
);
213 bioset_exit(&ib_dev
->ibd_bio_set
);
216 static struct se_dev_plug
*iblock_plug_device(struct se_device
*se_dev
)
218 struct iblock_dev
*ib_dev
= IBLOCK_DEV(se_dev
);
219 struct iblock_dev_plug
*ib_dev_plug
;
222 * Each se_device has a per cpu work this can be run from. We
223 * shouldn't have multiple threads on the same cpu calling this
226 ib_dev_plug
= &ib_dev
->ibd_plug
[raw_smp_processor_id()];
227 if (test_and_set_bit(IBD_PLUGF_PLUGGED
, &ib_dev_plug
->flags
))
230 blk_start_plug(&ib_dev_plug
->blk_plug
);
231 return &ib_dev_plug
->se_plug
;
234 static void iblock_unplug_device(struct se_dev_plug
*se_plug
)
236 struct iblock_dev_plug
*ib_dev_plug
= container_of(se_plug
,
237 struct iblock_dev_plug
, se_plug
);
239 blk_finish_plug(&ib_dev_plug
->blk_plug
);
240 clear_bit(IBD_PLUGF_PLUGGED
, &ib_dev_plug
->flags
);
243 static sector_t
iblock_get_blocks(struct se_device
*dev
)
245 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
246 u32 block_size
= bdev_logical_block_size(ib_dev
->ibd_bd
);
247 unsigned long long blocks_long
=
248 div_u64(bdev_nr_bytes(ib_dev
->ibd_bd
), block_size
) - 1;
250 if (block_size
== dev
->dev_attrib
.block_size
)
253 switch (block_size
) {
255 switch (dev
->dev_attrib
.block_size
) {
270 switch (dev
->dev_attrib
.block_size
) {
285 switch (dev
->dev_attrib
.block_size
) {
300 switch (dev
->dev_attrib
.block_size
) {
321 static void iblock_complete_cmd(struct se_cmd
*cmd
, blk_status_t blk_status
)
323 struct iblock_req
*ibr
= cmd
->priv
;
326 if (!refcount_dec_and_test(&ibr
->pending
))
329 if (blk_status
== BLK_STS_RESV_CONFLICT
)
330 status
= SAM_STAT_RESERVATION_CONFLICT
;
331 else if (atomic_read(&ibr
->ib_bio_err_cnt
))
332 status
= SAM_STAT_CHECK_CONDITION
;
334 status
= SAM_STAT_GOOD
;
336 target_complete_cmd(cmd
, status
);
340 static void iblock_bio_done(struct bio
*bio
)
342 struct se_cmd
*cmd
= bio
->bi_private
;
343 struct iblock_req
*ibr
= cmd
->priv
;
344 blk_status_t blk_status
= bio
->bi_status
;
346 if (bio
->bi_status
) {
347 pr_err("bio error: %p, err: %d\n", bio
, bio
->bi_status
);
349 * Bump the ib_bio_err_cnt and release bio.
351 atomic_inc(&ibr
->ib_bio_err_cnt
);
352 smp_mb__after_atomic();
357 iblock_complete_cmd(cmd
, blk_status
);
360 static struct bio
*iblock_get_bio(struct se_cmd
*cmd
, sector_t lba
, u32 sg_num
,
363 struct iblock_dev
*ib_dev
= IBLOCK_DEV(cmd
->se_dev
);
367 * Only allocate as many vector entries as the bio code allows us to,
368 * we'll loop later on until we have handled the whole request.
370 bio
= bio_alloc_bioset(ib_dev
->ibd_bd
, bio_max_segs(sg_num
), opf
,
371 GFP_NOIO
, &ib_dev
->ibd_bio_set
);
373 pr_err("Unable to allocate memory for bio\n");
377 bio
->bi_private
= cmd
;
378 bio
->bi_end_io
= &iblock_bio_done
;
379 bio
->bi_iter
.bi_sector
= lba
;
384 static void iblock_submit_bios(struct bio_list
*list
)
386 struct blk_plug plug
;
389 * The block layer handles nested plugs, so just plug/unplug to handle
390 * fabric drivers that didn't support batching and multi bio cmds.
392 blk_start_plug(&plug
);
393 while ((bio
= bio_list_pop(list
)))
395 blk_finish_plug(&plug
);
398 static void iblock_end_io_flush(struct bio
*bio
)
400 struct se_cmd
*cmd
= bio
->bi_private
;
403 pr_err("IBLOCK: cache flush failed: %d\n", bio
->bi_status
);
407 target_complete_cmd(cmd
, SAM_STAT_CHECK_CONDITION
);
409 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
416 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
417 * always flush the whole cache.
419 static sense_reason_t
420 iblock_execute_sync_cache(struct se_cmd
*cmd
)
422 struct iblock_dev
*ib_dev
= IBLOCK_DEV(cmd
->se_dev
);
423 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
427 * If the Immediate bit is set, queue up the GOOD response
428 * for this SYNCHRONIZE_CACHE op.
431 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
433 bio
= bio_alloc(ib_dev
->ibd_bd
, 0, REQ_OP_WRITE
| REQ_PREFLUSH
,
435 bio
->bi_end_io
= iblock_end_io_flush
;
437 bio
->bi_private
= cmd
;
442 static sense_reason_t
443 iblock_execute_unmap(struct se_cmd
*cmd
, sector_t lba
, sector_t nolb
)
445 struct block_device
*bdev
= IBLOCK_DEV(cmd
->se_dev
)->ibd_bd
;
446 struct se_device
*dev
= cmd
->se_dev
;
449 ret
= blkdev_issue_discard(bdev
,
450 target_to_linux_sector(dev
, lba
),
451 target_to_linux_sector(dev
, nolb
),
454 pr_err("blkdev_issue_discard() failed: %d\n", ret
);
455 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
461 static sense_reason_t
462 iblock_execute_zero_out(struct block_device
*bdev
, struct se_cmd
*cmd
)
464 struct se_device
*dev
= cmd
->se_dev
;
465 struct scatterlist
*sg
= &cmd
->t_data_sg
[0];
466 unsigned char *buf
, *not_zero
;
469 buf
= kmap(sg_page(sg
)) + sg
->offset
;
471 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
473 * Fall back to block_execute_write_same() slow-path if
474 * incoming WRITE_SAME payload does not contain zeros.
476 not_zero
= memchr_inv(buf
, 0x00, cmd
->data_length
);
480 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
482 ret
= blkdev_issue_zeroout(bdev
,
483 target_to_linux_sector(dev
, cmd
->t_task_lba
),
484 target_to_linux_sector(dev
,
485 sbc_get_write_same_sectors(cmd
)),
486 GFP_KERNEL
, BLKDEV_ZERO_NOUNMAP
);
488 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
490 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
494 static sense_reason_t
495 iblock_execute_write_same(struct se_cmd
*cmd
)
497 struct block_device
*bdev
= IBLOCK_DEV(cmd
->se_dev
)->ibd_bd
;
498 struct iblock_req
*ibr
;
499 struct scatterlist
*sg
;
501 struct bio_list list
;
502 struct se_device
*dev
= cmd
->se_dev
;
503 sector_t block_lba
= target_to_linux_sector(dev
, cmd
->t_task_lba
);
504 sector_t sectors
= target_to_linux_sector(dev
,
505 sbc_get_write_same_sectors(cmd
));
508 pr_err("WRITE_SAME: Protection information with IBLOCK"
509 " backends not supported\n");
510 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
513 if (!cmd
->t_data_nents
)
514 return TCM_INVALID_CDB_FIELD
;
516 sg
= &cmd
->t_data_sg
[0];
518 if (cmd
->t_data_nents
> 1 ||
519 sg
->length
!= cmd
->se_dev
->dev_attrib
.block_size
) {
520 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
521 " block_size: %u\n", cmd
->t_data_nents
, sg
->length
,
522 cmd
->se_dev
->dev_attrib
.block_size
);
523 return TCM_INVALID_CDB_FIELD
;
526 if (bdev_write_zeroes_sectors(bdev
)) {
527 if (!iblock_execute_zero_out(bdev
, cmd
))
531 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
536 bio
= iblock_get_bio(cmd
, block_lba
, 1, REQ_OP_WRITE
);
540 bio_list_init(&list
);
541 bio_list_add(&list
, bio
);
543 refcount_set(&ibr
->pending
, 1);
546 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
549 bio
= iblock_get_bio(cmd
, block_lba
, 1, REQ_OP_WRITE
);
553 refcount_inc(&ibr
->pending
);
554 bio_list_add(&list
, bio
);
557 /* Always in 512 byte units for Linux/Block */
558 block_lba
+= sg
->length
>> SECTOR_SHIFT
;
559 sectors
-= sg
->length
>> SECTOR_SHIFT
;
562 iblock_submit_bios(&list
);
566 while ((bio
= bio_list_pop(&list
)))
571 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
575 Opt_udev_path
, Opt_readonly
, Opt_force
, Opt_err
578 static match_table_t tokens
= {
579 {Opt_udev_path
, "udev_path=%s"},
580 {Opt_readonly
, "readonly=%d"},
581 {Opt_force
, "force=%d"},
585 static ssize_t
iblock_set_configfs_dev_params(struct se_device
*dev
,
586 const char *page
, ssize_t count
)
588 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
589 char *orig
, *ptr
, *arg_p
, *opts
;
590 substring_t args
[MAX_OPT_ARGS
];
592 unsigned long tmp_readonly
;
594 opts
= kstrdup(page
, GFP_KERNEL
);
600 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
604 token
= match_token(ptr
, tokens
, args
);
607 if (ib_dev
->ibd_bd
) {
608 pr_err("Unable to set udev_path= while"
609 " ib_dev->ibd_bd exists\n");
613 if (match_strlcpy(ib_dev
->ibd_udev_path
, &args
[0],
614 SE_UDEV_PATH_LEN
) == 0) {
618 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
619 ib_dev
->ibd_udev_path
);
620 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
623 arg_p
= match_strdup(&args
[0]);
628 ret
= kstrtoul(arg_p
, 0, &tmp_readonly
);
631 pr_err("kstrtoul() failed for"
635 ib_dev
->ibd_readonly
= tmp_readonly
;
636 pr_debug("IBLOCK: readonly: %d\n", ib_dev
->ibd_readonly
);
647 return (!ret
) ? count
: ret
;
650 static ssize_t
iblock_show_configfs_dev_params(struct se_device
*dev
, char *b
)
652 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
653 struct block_device
*bd
= ib_dev
->ibd_bd
;
657 bl
+= sprintf(b
+ bl
, "iBlock device: %pg", bd
);
658 if (ib_dev
->ibd_flags
& IBDF_HAS_UDEV_PATH
)
659 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s",
660 ib_dev
->ibd_udev_path
);
661 bl
+= sprintf(b
+ bl
, " readonly: %d\n", ib_dev
->ibd_readonly
);
663 bl
+= sprintf(b
+ bl
, " ");
665 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
666 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
),
669 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
676 iblock_alloc_bip(struct se_cmd
*cmd
, struct bio
*bio
,
677 struct sg_mapping_iter
*miter
)
679 struct se_device
*dev
= cmd
->se_dev
;
680 struct blk_integrity
*bi
;
681 struct bio_integrity_payload
*bip
;
682 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
686 bi
= bdev_get_integrity(ib_dev
->ibd_bd
);
688 pr_err("Unable to locate bio_integrity\n");
692 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, bio_max_segs(cmd
->t_prot_nents
));
694 pr_err("Unable to allocate bio_integrity_payload\n");
698 /* virtual start sector must be in integrity interval units */
699 bip_set_seed(bip
, bio
->bi_iter
.bi_sector
>>
700 (bi
->interval_exp
- SECTOR_SHIFT
));
702 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip
->bip_iter
.bi_size
,
703 (unsigned long long)bip
->bip_iter
.bi_sector
);
705 resid
= bio_integrity_bytes(bi
, bio_sectors(bio
));
706 while (resid
> 0 && sg_miter_next(miter
)) {
708 len
= min_t(size_t, miter
->length
, resid
);
709 rc
= bio_integrity_add_page(bio
, miter
->page
, len
,
710 offset_in_page(miter
->addr
));
712 pr_err("bio_integrity_add_page() failed; %d\n", rc
);
713 sg_miter_stop(miter
);
717 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
718 miter
->page
, len
, offset_in_page(miter
->addr
));
721 if (len
< miter
->length
)
722 miter
->consumed
-= miter
->length
- len
;
724 sg_miter_stop(miter
);
729 static sense_reason_t
730 iblock_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
731 enum dma_data_direction data_direction
)
733 struct se_device
*dev
= cmd
->se_dev
;
734 sector_t block_lba
= target_to_linux_sector(dev
, cmd
->t_task_lba
);
735 struct iblock_req
*ibr
;
737 struct bio_list list
;
738 struct scatterlist
*sg
;
739 u32 sg_num
= sgl_nents
;
743 struct sg_mapping_iter prot_miter
;
744 unsigned int miter_dir
;
746 if (data_direction
== DMA_TO_DEVICE
) {
747 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
750 * Set bits to indicate WRITE_ODIRECT so we are not throttled
753 opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
755 * Force writethrough using REQ_FUA if a volatile write cache
756 * is not enabled, or if initiator set the Force Unit Access bit.
758 miter_dir
= SG_MITER_TO_SG
;
759 if (bdev_fua(ib_dev
->ibd_bd
)) {
760 if (cmd
->se_cmd_flags
& SCF_FUA
)
762 else if (!bdev_write_cache(ib_dev
->ibd_bd
))
767 miter_dir
= SG_MITER_FROM_SG
;
770 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
776 refcount_set(&ibr
->pending
, 1);
777 iblock_complete_cmd(cmd
, BLK_STS_OK
);
781 bio
= iblock_get_bio(cmd
, block_lba
, sgl_nents
, opf
);
785 bio_list_init(&list
);
786 bio_list_add(&list
, bio
);
788 refcount_set(&ibr
->pending
, 2);
791 if (cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
)
792 sg_miter_start(&prot_miter
, cmd
->t_prot_sg
, cmd
->t_prot_nents
,
795 for_each_sg(sgl
, sg
, sgl_nents
, i
) {
797 * XXX: if the length the device accepts is shorter than the
798 * length of the S/G list entry this will cause and
799 * endless loop. Better hope no driver uses huge pages.
801 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
803 if (cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
804 rc
= iblock_alloc_bip(cmd
, bio
, &prot_miter
);
809 if (bio_cnt
>= IBLOCK_MAX_BIO_PER_TASK
) {
810 iblock_submit_bios(&list
);
814 bio
= iblock_get_bio(cmd
, block_lba
, sg_num
, opf
);
818 refcount_inc(&ibr
->pending
);
819 bio_list_add(&list
, bio
);
823 /* Always in 512 byte units for Linux/Block */
824 block_lba
+= sg
->length
>> SECTOR_SHIFT
;
828 if (cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
829 rc
= iblock_alloc_bip(cmd
, bio
, &prot_miter
);
834 iblock_submit_bios(&list
);
835 iblock_complete_cmd(cmd
, BLK_STS_OK
);
839 while ((bio
= bio_list_pop(&list
)))
844 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
847 static sense_reason_t
iblock_execute_pr_out(struct se_cmd
*cmd
, u8 sa
, u64 key
,
848 u64 sa_key
, u8 type
, bool aptpl
)
850 struct se_device
*dev
= cmd
->se_dev
;
851 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
852 struct block_device
*bdev
= ib_dev
->ibd_bd
;
853 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
857 pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
858 return TCM_UNSUPPORTED_SCSI_OPCODE
;
863 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY
:
864 if (!ops
->pr_register
) {
865 pr_err("block device does not support pr_register.\n");
866 return TCM_UNSUPPORTED_SCSI_OPCODE
;
869 /* The block layer pr ops always enables aptpl */
871 pr_info("APTPL not set by initiator, but will be used.\n");
873 ret
= ops
->pr_register(bdev
, key
, sa_key
,
874 sa
== PRO_REGISTER
? 0 : PR_FL_IGNORE_KEY
);
877 if (!ops
->pr_reserve
) {
878 pr_err("block_device does not support pr_reserve.\n");
879 return TCM_UNSUPPORTED_SCSI_OPCODE
;
882 ret
= ops
->pr_reserve(bdev
, key
, scsi_pr_type_to_block(type
), 0);
885 if (!ops
->pr_clear
) {
886 pr_err("block_device does not support pr_clear.\n");
887 return TCM_UNSUPPORTED_SCSI_OPCODE
;
890 ret
= ops
->pr_clear(bdev
, key
);
893 case PRO_PREEMPT_AND_ABORT
:
894 if (!ops
->pr_clear
) {
895 pr_err("block_device does not support pr_preempt.\n");
896 return TCM_UNSUPPORTED_SCSI_OPCODE
;
899 ret
= ops
->pr_preempt(bdev
, key
, sa_key
,
900 scsi_pr_type_to_block(type
),
901 sa
== PRO_PREEMPT_AND_ABORT
);
904 if (!ops
->pr_clear
) {
905 pr_err("block_device does not support pr_pclear.\n");
906 return TCM_UNSUPPORTED_SCSI_OPCODE
;
909 ret
= ops
->pr_release(bdev
, key
, scsi_pr_type_to_block(type
));
912 pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa
);
913 return TCM_UNSUPPORTED_SCSI_OPCODE
;
918 else if (ret
== PR_STS_RESERVATION_CONFLICT
)
919 return TCM_RESERVATION_CONFLICT
;
921 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
924 static void iblock_pr_report_caps(unsigned char *param_data
)
928 put_unaligned_be16(len
, ¶m_data
[0]);
930 * When using the pr_ops passthrough method we only support exporting
931 * the device through one target port because from the backend module
932 * level we can't see the target port config. As a result we only
933 * support registration directly from the I_T nexus the cmd is sent
934 * through and do not set ATP_C here.
936 * The block layer pr_ops do not support passing in initiators so
937 * we don't set SIP_C here.
939 /* PTPL_C: Persistence across Target Power Loss bit */
940 param_data
[2] |= 0x01;
942 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
943 * set the TMV: Task Mask Valid bit.
945 param_data
[3] |= 0x80;
947 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
949 param_data
[3] |= 0x10; /* ALLOW COMMANDs field 001b */
951 * PTPL_A: Persistence across Target Power Loss Active bit. The block
952 * layer pr ops always enables this so report it active.
954 param_data
[3] |= 0x01;
956 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
958 param_data
[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
959 param_data
[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
960 param_data
[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
961 param_data
[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
962 param_data
[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
963 param_data
[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
966 static sense_reason_t
iblock_pr_read_keys(struct se_cmd
*cmd
,
967 unsigned char *param_data
)
969 struct se_device
*dev
= cmd
->se_dev
;
970 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
971 struct block_device
*bdev
= ib_dev
->ibd_bd
;
972 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
973 int i
, len
, paths
, data_offset
;
974 struct pr_keys
*keys
;
978 pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
979 return TCM_UNSUPPORTED_SCSI_OPCODE
;
982 if (!ops
->pr_read_keys
) {
983 pr_err("Block device does not support read_keys.\n");
984 return TCM_UNSUPPORTED_SCSI_OPCODE
;
988 * We don't know what's under us, but dm-multipath will register every
989 * path with the same key, so start off with enough space for 16 paths.
990 * which is not a lot of memory and should normally be enough.
995 keys
= kzalloc(sizeof(*keys
) + len
, GFP_KERNEL
);
997 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
999 keys
->num_keys
= paths
;
1000 if (!ops
->pr_read_keys(bdev
, keys
)) {
1001 if (keys
->num_keys
> paths
) {
1007 ret
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1013 put_unaligned_be32(keys
->generation
, ¶m_data
[0]);
1014 if (!keys
->num_keys
) {
1015 put_unaligned_be32(0, ¶m_data
[4]);
1019 put_unaligned_be32(8 * keys
->num_keys
, ¶m_data
[4]);
1022 for (i
= 0; i
< keys
->num_keys
; i
++) {
1023 if (data_offset
+ 8 > cmd
->data_length
)
1026 put_unaligned_be64(keys
->keys
[i
], ¶m_data
[data_offset
]);
1035 static sense_reason_t
iblock_pr_read_reservation(struct se_cmd
*cmd
,
1036 unsigned char *param_data
)
1038 struct se_device
*dev
= cmd
->se_dev
;
1039 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
1040 struct block_device
*bdev
= ib_dev
->ibd_bd
;
1041 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
1042 struct pr_held_reservation rsv
= { };
1045 pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
1046 return TCM_UNSUPPORTED_SCSI_OPCODE
;
1049 if (!ops
->pr_read_reservation
) {
1050 pr_err("Block device does not support read_keys.\n");
1051 return TCM_UNSUPPORTED_SCSI_OPCODE
;
1054 if (ops
->pr_read_reservation(bdev
, &rsv
))
1055 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1057 put_unaligned_be32(rsv
.generation
, ¶m_data
[0]);
1058 if (!block_pr_type_to_scsi(rsv
.type
)) {
1059 put_unaligned_be32(0, ¶m_data
[4]);
1060 return TCM_NO_SENSE
;
1063 put_unaligned_be32(16, ¶m_data
[4]);
1065 if (cmd
->data_length
< 16)
1066 return TCM_NO_SENSE
;
1067 put_unaligned_be64(rsv
.key
, ¶m_data
[8]);
1069 if (cmd
->data_length
< 22)
1070 return TCM_NO_SENSE
;
1071 param_data
[21] = block_pr_type_to_scsi(rsv
.type
);
1073 return TCM_NO_SENSE
;
1076 static sense_reason_t
iblock_execute_pr_in(struct se_cmd
*cmd
, u8 sa
,
1077 unsigned char *param_data
)
1079 sense_reason_t ret
= TCM_NO_SENSE
;
1082 case PRI_REPORT_CAPABILITIES
:
1083 iblock_pr_report_caps(param_data
);
1086 ret
= iblock_pr_read_keys(cmd
, param_data
);
1088 case PRI_READ_RESERVATION
:
1089 ret
= iblock_pr_read_reservation(cmd
, param_data
);
1092 pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa
);
1093 return TCM_UNSUPPORTED_SCSI_OPCODE
;
1099 static sector_t
iblock_get_alignment_offset_lbas(struct se_device
*dev
)
1101 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
1102 struct block_device
*bd
= ib_dev
->ibd_bd
;
1105 ret
= bdev_alignment_offset(bd
);
1109 /* convert offset-bytes to offset-lbas */
1110 return ret
/ bdev_logical_block_size(bd
);
1113 static unsigned int iblock_get_lbppbe(struct se_device
*dev
)
1115 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
1116 struct block_device
*bd
= ib_dev
->ibd_bd
;
1117 unsigned int logs_per_phys
=
1118 bdev_physical_block_size(bd
) / bdev_logical_block_size(bd
);
1120 return ilog2(logs_per_phys
);
1123 static unsigned int iblock_get_io_min(struct se_device
*dev
)
1125 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
1126 struct block_device
*bd
= ib_dev
->ibd_bd
;
1128 return bdev_io_min(bd
);
1131 static unsigned int iblock_get_io_opt(struct se_device
*dev
)
1133 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
1134 struct block_device
*bd
= ib_dev
->ibd_bd
;
1136 return bdev_io_opt(bd
);
1139 static struct exec_cmd_ops iblock_exec_cmd_ops
= {
1140 .execute_rw
= iblock_execute_rw
,
1141 .execute_sync_cache
= iblock_execute_sync_cache
,
1142 .execute_write_same
= iblock_execute_write_same
,
1143 .execute_unmap
= iblock_execute_unmap
,
1144 .execute_pr_out
= iblock_execute_pr_out
,
1145 .execute_pr_in
= iblock_execute_pr_in
,
1148 static sense_reason_t
1149 iblock_parse_cdb(struct se_cmd
*cmd
)
1151 return sbc_parse_cdb(cmd
, &iblock_exec_cmd_ops
);
1154 static bool iblock_get_write_cache(struct se_device
*dev
)
1156 return bdev_write_cache(IBLOCK_DEV(dev
)->ibd_bd
);
1159 static const struct target_backend_ops iblock_ops
= {
1161 .inquiry_prod
= "IBLOCK",
1162 .transport_flags_changeable
= TRANSPORT_FLAG_PASSTHROUGH_PGR
,
1163 .inquiry_rev
= IBLOCK_VERSION
,
1164 .owner
= THIS_MODULE
,
1165 .attach_hba
= iblock_attach_hba
,
1166 .detach_hba
= iblock_detach_hba
,
1167 .alloc_device
= iblock_alloc_device
,
1168 .configure_device
= iblock_configure_device
,
1169 .destroy_device
= iblock_destroy_device
,
1170 .free_device
= iblock_free_device
,
1171 .configure_unmap
= iblock_configure_unmap
,
1172 .plug_device
= iblock_plug_device
,
1173 .unplug_device
= iblock_unplug_device
,
1174 .parse_cdb
= iblock_parse_cdb
,
1175 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
1176 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
1177 .get_device_type
= sbc_get_device_type
,
1178 .get_blocks
= iblock_get_blocks
,
1179 .get_alignment_offset_lbas
= iblock_get_alignment_offset_lbas
,
1180 .get_lbppbe
= iblock_get_lbppbe
,
1181 .get_io_min
= iblock_get_io_min
,
1182 .get_io_opt
= iblock_get_io_opt
,
1183 .get_write_cache
= iblock_get_write_cache
,
1184 .tb_dev_attrib_attrs
= sbc_attrib_attrs
,
1187 static int __init
iblock_module_init(void)
1189 return transport_backend_register(&iblock_ops
);
1192 static void __exit
iblock_module_exit(void)
1194 target_backend_unregister(&iblock_ops
);
1197 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
1198 MODULE_AUTHOR("nab@Linux-iSCSI.org");
1199 MODULE_LICENSE("GPL");
1201 module_init(iblock_module_init
);
1202 module_exit(iblock_module_exit
);