1 /*******************************************************************************
2 * Filename: target_core_file.c
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
6 * (c) Copyright 2005-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/string.h>
27 #include <linux/parser.h>
28 #include <linux/timer.h>
29 #include <linux/blkdev.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/module.h>
33 #include <linux/falloc.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h>
36 #include <asm/unaligned.h>
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_backend_configfs.h>
42 #include "target_core_file.h"
44 static inline struct fd_dev
*FD_DEV(struct se_device
*dev
)
46 return container_of(dev
, struct fd_dev
, dev
);
49 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
53 static int fd_attach_hba(struct se_hba
*hba
, u32 host_id
)
55 struct fd_host
*fd_host
;
57 fd_host
= kzalloc(sizeof(struct fd_host
), GFP_KERNEL
);
59 pr_err("Unable to allocate memory for struct fd_host\n");
63 fd_host
->fd_host_id
= host_id
;
65 hba
->hba_ptr
= fd_host
;
67 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
68 " Target Core Stack %s\n", hba
->hba_id
, FD_VERSION
,
69 TARGET_CORE_MOD_VERSION
);
70 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
71 hba
->hba_id
, fd_host
->fd_host_id
);
76 static void fd_detach_hba(struct se_hba
*hba
)
78 struct fd_host
*fd_host
= hba
->hba_ptr
;
80 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
81 " Target Core\n", hba
->hba_id
, fd_host
->fd_host_id
);
87 static struct se_device
*fd_alloc_device(struct se_hba
*hba
, const char *name
)
89 struct fd_dev
*fd_dev
;
90 struct fd_host
*fd_host
= hba
->hba_ptr
;
92 fd_dev
= kzalloc(sizeof(struct fd_dev
), GFP_KERNEL
);
94 pr_err("Unable to allocate memory for struct fd_dev\n");
98 fd_dev
->fd_host
= fd_host
;
100 pr_debug("FILEIO: Allocated fd_dev for %p\n", name
);
105 static int fd_configure_device(struct se_device
*dev
)
107 struct fd_dev
*fd_dev
= FD_DEV(dev
);
108 struct fd_host
*fd_host
= dev
->se_hba
->hba_ptr
;
110 struct inode
*inode
= NULL
;
111 int flags
, ret
= -EINVAL
;
113 if (!(fd_dev
->fbd_flags
& FBDF_HAS_PATH
)) {
114 pr_err("Missing fd_dev_name=\n");
119 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
120 * of pure timestamp updates.
122 flags
= O_RDWR
| O_CREAT
| O_LARGEFILE
| O_DSYNC
;
125 * Optionally allow fd_buffered_io=1 to be enabled for people
126 * who want use the fs buffer cache as an WriteCache mechanism.
128 * This means that in event of a hard failure, there is a risk
129 * of silent data-loss if the SCSI client has *not* performed a
130 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
131 * to write-out the entire device cache.
133 if (fd_dev
->fbd_flags
& FDBD_HAS_BUFFERED_IO_WCE
) {
134 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
138 file
= filp_open(fd_dev
->fd_dev_name
, flags
, 0600);
140 pr_err("filp_open(%s) failed\n", fd_dev
->fd_dev_name
);
144 fd_dev
->fd_file
= file
;
146 * If using a block backend with this struct file, we extract
147 * fd_dev->fd_[block,dev]_size from struct block_device.
149 * Otherwise, we use the passed fd_size= from configfs
151 inode
= file
->f_mapping
->host
;
152 if (S_ISBLK(inode
->i_mode
)) {
153 struct request_queue
*q
= bdev_get_queue(inode
->i_bdev
);
154 unsigned long long dev_size
;
156 fd_dev
->fd_block_size
= bdev_logical_block_size(inode
->i_bdev
);
158 * Determine the number of bytes from i_size_read() minus
159 * one (1) logical sector from underlying struct block_device
161 dev_size
= (i_size_read(file
->f_mapping
->host
) -
162 fd_dev
->fd_block_size
);
164 pr_debug("FILEIO: Using size: %llu bytes from struct"
165 " block_device blocks: %llu logical_block_size: %d\n",
166 dev_size
, div_u64(dev_size
, fd_dev
->fd_block_size
),
167 fd_dev
->fd_block_size
);
169 * Check if the underlying struct block_device request_queue supports
170 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
171 * in ATA and we need to set TPE=1
173 if (blk_queue_discard(q
)) {
174 dev
->dev_attrib
.max_unmap_lba_count
=
175 q
->limits
.max_discard_sectors
;
177 * Currently hardcoded to 1 in Linux/SCSI code..
179 dev
->dev_attrib
.max_unmap_block_desc_count
= 1;
180 dev
->dev_attrib
.unmap_granularity
=
181 q
->limits
.discard_granularity
>> 9;
182 dev
->dev_attrib
.unmap_granularity_alignment
=
183 q
->limits
.discard_alignment
;
184 pr_debug("IFILE: BLOCK Discard support available,"
185 " disabled by default\n");
188 * Enable write same emulation for IBLOCK and use 0xFFFF as
189 * the smaller WRITE_SAME(10) only has a two-byte block count.
191 dev
->dev_attrib
.max_write_same_len
= 0xFFFF;
193 if (blk_queue_nonrot(q
))
194 dev
->dev_attrib
.is_nonrot
= 1;
196 if (!(fd_dev
->fbd_flags
& FBDF_HAS_SIZE
)) {
197 pr_err("FILEIO: Missing fd_dev_size="
198 " parameter, and no backing struct"
203 fd_dev
->fd_block_size
= FD_BLOCKSIZE
;
205 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
207 dev
->dev_attrib
.max_unmap_lba_count
= 0x2000;
209 * Currently hardcoded to 1 in Linux/SCSI code..
211 dev
->dev_attrib
.max_unmap_block_desc_count
= 1;
212 dev
->dev_attrib
.unmap_granularity
= 1;
213 dev
->dev_attrib
.unmap_granularity_alignment
= 0;
216 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
217 * based upon struct iovec limit for vfs_writev()
219 dev
->dev_attrib
.max_write_same_len
= 0x1000;
222 dev
->dev_attrib
.hw_block_size
= fd_dev
->fd_block_size
;
223 dev
->dev_attrib
.max_bytes_per_io
= FD_MAX_BYTES
;
224 dev
->dev_attrib
.hw_max_sectors
= FD_MAX_BYTES
/ fd_dev
->fd_block_size
;
225 dev
->dev_attrib
.hw_queue_depth
= FD_MAX_DEVICE_QUEUE_DEPTH
;
227 if (fd_dev
->fbd_flags
& FDBD_HAS_BUFFERED_IO_WCE
) {
228 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
229 " with FDBD_HAS_BUFFERED_IO_WCE\n");
230 dev
->dev_attrib
.emulate_write_cache
= 1;
233 fd_dev
->fd_dev_id
= fd_host
->fd_host_dev_id_count
++;
234 fd_dev
->fd_queue_depth
= dev
->queue_depth
;
236 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
237 " %llu total bytes\n", fd_host
->fd_host_id
, fd_dev
->fd_dev_id
,
238 fd_dev
->fd_dev_name
, fd_dev
->fd_dev_size
);
242 if (fd_dev
->fd_file
) {
243 filp_close(fd_dev
->fd_file
, NULL
);
244 fd_dev
->fd_file
= NULL
;
249 static void fd_free_device(struct se_device
*dev
)
251 struct fd_dev
*fd_dev
= FD_DEV(dev
);
253 if (fd_dev
->fd_file
) {
254 filp_close(fd_dev
->fd_file
, NULL
);
255 fd_dev
->fd_file
= NULL
;
261 static int fd_do_prot_rw(struct se_cmd
*cmd
, struct fd_prot
*fd_prot
,
264 struct se_device
*se_dev
= cmd
->se_dev
;
265 struct fd_dev
*dev
= FD_DEV(se_dev
);
266 struct file
*prot_fd
= dev
->fd_prot_file
;
267 loff_t pos
= (cmd
->t_task_lba
* se_dev
->prot_length
);
272 prot_size
= (cmd
->data_length
/ se_dev
->dev_attrib
.block_size
) *
276 fd_prot
->prot_buf
= kzalloc(prot_size
, GFP_KERNEL
);
277 if (!fd_prot
->prot_buf
) {
278 pr_err("Unable to allocate fd_prot->prot_buf\n");
281 buf
= fd_prot
->prot_buf
;
283 fd_prot
->prot_sg_nents
= 1;
284 fd_prot
->prot_sg
= kzalloc(sizeof(struct scatterlist
),
286 if (!fd_prot
->prot_sg
) {
287 pr_err("Unable to allocate fd_prot->prot_sg\n");
288 kfree(fd_prot
->prot_buf
);
291 sg_init_table(fd_prot
->prot_sg
, fd_prot
->prot_sg_nents
);
292 sg_set_buf(fd_prot
->prot_sg
, buf
, prot_size
);
296 rc
= kernel_write(prot_fd
, fd_prot
->prot_buf
, prot_size
, pos
);
297 if (rc
< 0 || prot_size
!= rc
) {
298 pr_err("kernel_write() for fd_do_prot_rw failed:"
303 rc
= kernel_read(prot_fd
, pos
, fd_prot
->prot_buf
, prot_size
);
305 pr_err("kernel_read() for fd_do_prot_rw failed:"
311 if (is_write
|| ret
< 0) {
312 kfree(fd_prot
->prot_sg
);
313 kfree(fd_prot
->prot_buf
);
319 static int fd_do_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
,
320 u32 sgl_nents
, int is_write
)
322 struct se_device
*se_dev
= cmd
->se_dev
;
323 struct fd_dev
*dev
= FD_DEV(se_dev
);
324 struct file
*fd
= dev
->fd_file
;
325 struct scatterlist
*sg
;
326 struct iov_iter iter
;
327 struct bio_vec
*bvec
;
329 loff_t pos
= (cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
);
332 bvec
= kcalloc(sgl_nents
, sizeof(struct bio_vec
), GFP_KERNEL
);
334 pr_err("Unable to allocate fd_do_readv iov[]\n");
338 for_each_sg(sgl
, sg
, sgl_nents
, i
) {
339 bvec
[i
].bv_page
= sg_page(sg
);
340 bvec
[i
].bv_len
= sg
->length
;
341 bvec
[i
].bv_offset
= sg
->offset
;
346 iov_iter_bvec(&iter
, ITER_BVEC
, bvec
, sgl_nents
, len
);
348 ret
= vfs_iter_write(fd
, &iter
, &pos
);
350 ret
= vfs_iter_read(fd
, &iter
, &pos
);
355 if (ret
< 0 || ret
!= cmd
->data_length
) {
356 pr_err("%s() write returned %d\n", __func__
, ret
);
357 return (ret
< 0 ? ret
: -EINVAL
);
361 * Return zeros and GOOD status even if the READ did not return
362 * the expected virt_size for struct file w/o a backing struct
365 if (S_ISBLK(file_inode(fd
)->i_mode
)) {
366 if (ret
< 0 || ret
!= cmd
->data_length
) {
367 pr_err("%s() returned %d, expecting %u for "
368 "S_ISBLK\n", __func__
, ret
,
370 return (ret
< 0 ? ret
: -EINVAL
);
374 pr_err("%s() returned %d for non S_ISBLK\n",
383 static sense_reason_t
384 fd_execute_sync_cache(struct se_cmd
*cmd
)
386 struct se_device
*dev
= cmd
->se_dev
;
387 struct fd_dev
*fd_dev
= FD_DEV(dev
);
388 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
393 * If the Immediate bit is set, queue up the GOOD response
394 * for this SYNCHRONIZE_CACHE op
397 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
400 * Determine if we will be flushing the entire device.
402 if (cmd
->t_task_lba
== 0 && cmd
->data_length
== 0) {
406 start
= cmd
->t_task_lba
* dev
->dev_attrib
.block_size
;
407 if (cmd
->data_length
)
408 end
= start
+ cmd
->data_length
- 1;
413 ret
= vfs_fsync_range(fd_dev
->fd_file
, start
, end
, 1);
415 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret
);
421 target_complete_cmd(cmd
, SAM_STAT_CHECK_CONDITION
);
423 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
428 static sense_reason_t
429 fd_execute_write_same(struct se_cmd
*cmd
)
431 struct se_device
*se_dev
= cmd
->se_dev
;
432 struct fd_dev
*fd_dev
= FD_DEV(se_dev
);
433 loff_t pos
= cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
;
434 sector_t nolb
= sbc_get_write_same_sectors(cmd
);
435 struct iov_iter iter
;
436 struct bio_vec
*bvec
;
437 unsigned int len
= 0, i
;
441 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
445 pr_err("WRITE_SAME: Protection information with FILEIO"
446 " backends not supported\n");
447 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
450 if (cmd
->t_data_nents
> 1 ||
451 cmd
->t_data_sg
[0].length
!= cmd
->se_dev
->dev_attrib
.block_size
) {
452 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
455 cmd
->t_data_sg
[0].length
,
456 cmd
->se_dev
->dev_attrib
.block_size
);
457 return TCM_INVALID_CDB_FIELD
;
460 bvec
= kcalloc(nolb
, sizeof(struct bio_vec
), GFP_KERNEL
);
462 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
464 for (i
= 0; i
< nolb
; i
++) {
465 bvec
[i
].bv_page
= sg_page(&cmd
->t_data_sg
[0]);
466 bvec
[i
].bv_len
= cmd
->t_data_sg
[0].length
;
467 bvec
[i
].bv_offset
= cmd
->t_data_sg
[0].offset
;
469 len
+= se_dev
->dev_attrib
.block_size
;
472 iov_iter_bvec(&iter
, ITER_BVEC
, bvec
, nolb
, len
);
473 ret
= vfs_iter_write(fd_dev
->fd_file
, &iter
, &pos
);
476 if (ret
< 0 || ret
!= len
) {
477 pr_err("vfs_iter_write() returned %zd for write same\n", ret
);
478 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
481 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
486 fd_do_prot_fill(struct se_device
*se_dev
, sector_t lba
, sector_t nolb
,
487 void *buf
, size_t bufsize
)
489 struct fd_dev
*fd_dev
= FD_DEV(se_dev
);
490 struct file
*prot_fd
= fd_dev
->fd_prot_file
;
491 sector_t prot_length
, prot
;
492 loff_t pos
= lba
* se_dev
->prot_length
;
495 pr_err("Unable to locate fd_dev->fd_prot_file\n");
499 prot_length
= nolb
* se_dev
->prot_length
;
501 for (prot
= 0; prot
< prot_length
;) {
502 sector_t len
= min_t(sector_t
, bufsize
, prot_length
- prot
);
503 ssize_t ret
= kernel_write(prot_fd
, buf
, len
, pos
+ prot
);
506 pr_err("vfs_write to prot file failed: %zd\n", ret
);
507 return ret
< 0 ? ret
: -ENODEV
;
516 fd_do_prot_unmap(struct se_cmd
*cmd
, sector_t lba
, sector_t nolb
)
521 buf
= (void *)__get_free_page(GFP_KERNEL
);
523 pr_err("Unable to allocate FILEIO prot buf\n");
526 memset(buf
, 0xff, PAGE_SIZE
);
528 rc
= fd_do_prot_fill(cmd
->se_dev
, lba
, nolb
, buf
, PAGE_SIZE
);
530 free_page((unsigned long)buf
);
535 static sense_reason_t
536 fd_do_unmap(struct se_cmd
*cmd
, void *priv
, sector_t lba
, sector_t nolb
)
538 struct file
*file
= priv
;
539 struct inode
*inode
= file
->f_mapping
->host
;
542 if (cmd
->se_dev
->dev_attrib
.pi_prot_type
) {
543 ret
= fd_do_prot_unmap(cmd
, lba
, nolb
);
545 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
548 if (S_ISBLK(inode
->i_mode
)) {
549 /* The backend is block device, use discard */
550 struct block_device
*bdev
= inode
->i_bdev
;
552 ret
= blkdev_issue_discard(bdev
, lba
,
553 nolb
, GFP_KERNEL
, 0);
555 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
557 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
560 /* The backend is normal file, use fallocate */
561 struct se_device
*se_dev
= cmd
->se_dev
;
562 loff_t pos
= lba
* se_dev
->dev_attrib
.block_size
;
563 unsigned int len
= nolb
* se_dev
->dev_attrib
.block_size
;
564 int mode
= FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
;
566 if (!file
->f_op
->fallocate
)
567 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
569 ret
= file
->f_op
->fallocate(file
, mode
, pos
, len
);
571 pr_warn("FILEIO: fallocate() failed: %d\n", ret
);
572 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
579 static sense_reason_t
580 fd_execute_write_same_unmap(struct se_cmd
*cmd
)
582 struct se_device
*se_dev
= cmd
->se_dev
;
583 struct fd_dev
*fd_dev
= FD_DEV(se_dev
);
584 struct file
*file
= fd_dev
->fd_file
;
585 sector_t lba
= cmd
->t_task_lba
;
586 sector_t nolb
= sbc_get_write_same_sectors(cmd
);
590 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
594 ret
= fd_do_unmap(cmd
, file
, lba
, nolb
);
598 target_complete_cmd(cmd
, GOOD
);
602 static sense_reason_t
603 fd_execute_unmap(struct se_cmd
*cmd
)
605 struct file
*file
= FD_DEV(cmd
->se_dev
)->fd_file
;
607 return sbc_execute_unmap(cmd
, fd_do_unmap
, file
);
610 static sense_reason_t
611 fd_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
612 enum dma_data_direction data_direction
)
614 struct se_device
*dev
= cmd
->se_dev
;
615 struct fd_prot fd_prot
;
619 * We are currently limited by the number of iovecs (2048) per
620 * single vfs_[writev,readv] call.
622 if (cmd
->data_length
> FD_MAX_BYTES
) {
623 pr_err("FILEIO: Not able to process I/O of %u bytes due to"
624 "FD_MAX_BYTES: %u iovec count limitiation\n",
625 cmd
->data_length
, FD_MAX_BYTES
);
626 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
629 * Call vectorized fileio functions to map struct scatterlist
630 * physical memory addresses to struct iovec virtual memory.
632 if (data_direction
== DMA_FROM_DEVICE
) {
633 memset(&fd_prot
, 0, sizeof(struct fd_prot
));
635 if (cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
636 ret
= fd_do_prot_rw(cmd
, &fd_prot
, false);
638 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
641 ret
= fd_do_rw(cmd
, sgl
, sgl_nents
, 0);
643 if (ret
> 0 && cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
644 u32 sectors
= cmd
->data_length
/ dev
->dev_attrib
.block_size
;
646 rc
= sbc_dif_verify_read(cmd
, cmd
->t_task_lba
, sectors
,
647 0, fd_prot
.prot_sg
, 0);
649 kfree(fd_prot
.prot_sg
);
650 kfree(fd_prot
.prot_buf
);
653 kfree(fd_prot
.prot_sg
);
654 kfree(fd_prot
.prot_buf
);
657 memset(&fd_prot
, 0, sizeof(struct fd_prot
));
659 if (cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
660 u32 sectors
= cmd
->data_length
/ dev
->dev_attrib
.block_size
;
662 ret
= fd_do_prot_rw(cmd
, &fd_prot
, false);
664 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
666 rc
= sbc_dif_verify_write(cmd
, cmd
->t_task_lba
, sectors
,
667 0, fd_prot
.prot_sg
, 0);
669 kfree(fd_prot
.prot_sg
);
670 kfree(fd_prot
.prot_buf
);
675 ret
= fd_do_rw(cmd
, sgl
, sgl_nents
, 1);
677 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
678 * for SCSI WRITEs with Forced Unit Access (FUA) set.
679 * Allow this to happen independent of WCE=0 setting.
682 dev
->dev_attrib
.emulate_fua_write
> 0 &&
683 (cmd
->se_cmd_flags
& SCF_FUA
)) {
684 struct fd_dev
*fd_dev
= FD_DEV(dev
);
685 loff_t start
= cmd
->t_task_lba
*
686 dev
->dev_attrib
.block_size
;
689 if (cmd
->data_length
)
690 end
= start
+ cmd
->data_length
- 1;
694 vfs_fsync_range(fd_dev
->fd_file
, start
, end
, 1);
697 if (ret
> 0 && cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
698 ret
= fd_do_prot_rw(cmd
, &fd_prot
, true);
700 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
705 kfree(fd_prot
.prot_sg
);
706 kfree(fd_prot
.prot_buf
);
707 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
711 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
716 Opt_fd_dev_name
, Opt_fd_dev_size
, Opt_fd_buffered_io
, Opt_err
719 static match_table_t tokens
= {
720 {Opt_fd_dev_name
, "fd_dev_name=%s"},
721 {Opt_fd_dev_size
, "fd_dev_size=%s"},
722 {Opt_fd_buffered_io
, "fd_buffered_io=%d"},
726 static ssize_t
fd_set_configfs_dev_params(struct se_device
*dev
,
727 const char *page
, ssize_t count
)
729 struct fd_dev
*fd_dev
= FD_DEV(dev
);
730 char *orig
, *ptr
, *arg_p
, *opts
;
731 substring_t args
[MAX_OPT_ARGS
];
732 int ret
= 0, arg
, token
;
734 opts
= kstrdup(page
, GFP_KERNEL
);
740 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
744 token
= match_token(ptr
, tokens
, args
);
746 case Opt_fd_dev_name
:
747 if (match_strlcpy(fd_dev
->fd_dev_name
, &args
[0],
748 FD_MAX_DEV_NAME
) == 0) {
752 pr_debug("FILEIO: Referencing Path: %s\n",
753 fd_dev
->fd_dev_name
);
754 fd_dev
->fbd_flags
|= FBDF_HAS_PATH
;
756 case Opt_fd_dev_size
:
757 arg_p
= match_strdup(&args
[0]);
762 ret
= kstrtoull(arg_p
, 0, &fd_dev
->fd_dev_size
);
765 pr_err("kstrtoull() failed for"
769 pr_debug("FILEIO: Referencing Size: %llu"
770 " bytes\n", fd_dev
->fd_dev_size
);
771 fd_dev
->fbd_flags
|= FBDF_HAS_SIZE
;
773 case Opt_fd_buffered_io
:
774 ret
= match_int(args
, &arg
);
778 pr_err("bogus fd_buffered_io=%d value\n", arg
);
783 pr_debug("FILEIO: Using buffered I/O"
784 " operations for struct fd_dev\n");
786 fd_dev
->fbd_flags
|= FDBD_HAS_BUFFERED_IO_WCE
;
795 return (!ret
) ? count
: ret
;
798 static ssize_t
fd_show_configfs_dev_params(struct se_device
*dev
, char *b
)
800 struct fd_dev
*fd_dev
= FD_DEV(dev
);
803 bl
= sprintf(b
+ bl
, "TCM FILEIO ID: %u", fd_dev
->fd_dev_id
);
804 bl
+= sprintf(b
+ bl
, " File: %s Size: %llu Mode: %s\n",
805 fd_dev
->fd_dev_name
, fd_dev
->fd_dev_size
,
806 (fd_dev
->fbd_flags
& FDBD_HAS_BUFFERED_IO_WCE
) ?
807 "Buffered-WCE" : "O_DSYNC");
811 static sector_t
fd_get_blocks(struct se_device
*dev
)
813 struct fd_dev
*fd_dev
= FD_DEV(dev
);
814 struct file
*f
= fd_dev
->fd_file
;
815 struct inode
*i
= f
->f_mapping
->host
;
816 unsigned long long dev_size
;
818 * When using a file that references an underlying struct block_device,
819 * ensure dev_size is always based on the current inode size in order
820 * to handle underlying block_device resize operations.
822 if (S_ISBLK(i
->i_mode
))
823 dev_size
= i_size_read(i
);
825 dev_size
= fd_dev
->fd_dev_size
;
827 return div_u64(dev_size
- dev
->dev_attrib
.block_size
,
828 dev
->dev_attrib
.block_size
);
831 static int fd_init_prot(struct se_device
*dev
)
833 struct fd_dev
*fd_dev
= FD_DEV(dev
);
834 struct file
*prot_file
, *file
= fd_dev
->fd_file
;
836 int ret
, flags
= O_RDWR
| O_CREAT
| O_LARGEFILE
| O_DSYNC
;
837 char buf
[FD_MAX_DEV_PROT_NAME
];
840 pr_err("Unable to locate fd_dev->fd_file\n");
844 inode
= file
->f_mapping
->host
;
845 if (S_ISBLK(inode
->i_mode
)) {
846 pr_err("FILEIO Protection emulation only supported on"
851 if (fd_dev
->fbd_flags
& FDBD_HAS_BUFFERED_IO_WCE
)
854 snprintf(buf
, FD_MAX_DEV_PROT_NAME
, "%s.protection",
855 fd_dev
->fd_dev_name
);
857 prot_file
= filp_open(buf
, flags
, 0600);
858 if (IS_ERR(prot_file
)) {
859 pr_err("filp_open(%s) failed\n", buf
);
860 ret
= PTR_ERR(prot_file
);
863 fd_dev
->fd_prot_file
= prot_file
;
868 static int fd_format_prot(struct se_device
*dev
)
871 int unit_size
= FDBD_FORMAT_UNIT_SIZE
* dev
->dev_attrib
.block_size
;
874 if (!dev
->dev_attrib
.pi_prot_type
) {
875 pr_err("Unable to format_prot while pi_prot_type == 0\n");
879 buf
= vzalloc(unit_size
);
881 pr_err("Unable to allocate FILEIO prot buf\n");
885 pr_debug("Using FILEIO prot_length: %llu\n",
886 (unsigned long long)(dev
->transport
->get_blocks(dev
) + 1) *
889 memset(buf
, 0xff, unit_size
);
890 ret
= fd_do_prot_fill(dev
, 0, dev
->transport
->get_blocks(dev
) + 1,
896 static void fd_free_prot(struct se_device
*dev
)
898 struct fd_dev
*fd_dev
= FD_DEV(dev
);
900 if (!fd_dev
->fd_prot_file
)
903 filp_close(fd_dev
->fd_prot_file
, NULL
);
904 fd_dev
->fd_prot_file
= NULL
;
907 static struct sbc_ops fd_sbc_ops
= {
908 .execute_rw
= fd_execute_rw
,
909 .execute_sync_cache
= fd_execute_sync_cache
,
910 .execute_write_same
= fd_execute_write_same
,
911 .execute_write_same_unmap
= fd_execute_write_same_unmap
,
912 .execute_unmap
= fd_execute_unmap
,
915 static sense_reason_t
916 fd_parse_cdb(struct se_cmd
*cmd
)
918 return sbc_parse_cdb(cmd
, &fd_sbc_ops
);
921 DEF_TB_DEFAULT_ATTRIBS(fileio
);
923 static struct configfs_attribute
*fileio_backend_dev_attrs
[] = {
924 &fileio_dev_attrib_emulate_model_alias
.attr
,
925 &fileio_dev_attrib_emulate_dpo
.attr
,
926 &fileio_dev_attrib_emulate_fua_write
.attr
,
927 &fileio_dev_attrib_emulate_fua_read
.attr
,
928 &fileio_dev_attrib_emulate_write_cache
.attr
,
929 &fileio_dev_attrib_emulate_ua_intlck_ctrl
.attr
,
930 &fileio_dev_attrib_emulate_tas
.attr
,
931 &fileio_dev_attrib_emulate_tpu
.attr
,
932 &fileio_dev_attrib_emulate_tpws
.attr
,
933 &fileio_dev_attrib_emulate_caw
.attr
,
934 &fileio_dev_attrib_emulate_3pc
.attr
,
935 &fileio_dev_attrib_pi_prot_type
.attr
,
936 &fileio_dev_attrib_hw_pi_prot_type
.attr
,
937 &fileio_dev_attrib_pi_prot_format
.attr
,
938 &fileio_dev_attrib_enforce_pr_isids
.attr
,
939 &fileio_dev_attrib_is_nonrot
.attr
,
940 &fileio_dev_attrib_emulate_rest_reord
.attr
,
941 &fileio_dev_attrib_force_pr_aptpl
.attr
,
942 &fileio_dev_attrib_hw_block_size
.attr
,
943 &fileio_dev_attrib_block_size
.attr
,
944 &fileio_dev_attrib_hw_max_sectors
.attr
,
945 &fileio_dev_attrib_optimal_sectors
.attr
,
946 &fileio_dev_attrib_hw_queue_depth
.attr
,
947 &fileio_dev_attrib_queue_depth
.attr
,
948 &fileio_dev_attrib_max_unmap_lba_count
.attr
,
949 &fileio_dev_attrib_max_unmap_block_desc_count
.attr
,
950 &fileio_dev_attrib_unmap_granularity
.attr
,
951 &fileio_dev_attrib_unmap_granularity_alignment
.attr
,
952 &fileio_dev_attrib_max_write_same_len
.attr
,
956 static struct se_subsystem_api fileio_template
= {
958 .inquiry_prod
= "FILEIO",
959 .inquiry_rev
= FD_VERSION
,
960 .owner
= THIS_MODULE
,
961 .attach_hba
= fd_attach_hba
,
962 .detach_hba
= fd_detach_hba
,
963 .alloc_device
= fd_alloc_device
,
964 .configure_device
= fd_configure_device
,
965 .free_device
= fd_free_device
,
966 .parse_cdb
= fd_parse_cdb
,
967 .set_configfs_dev_params
= fd_set_configfs_dev_params
,
968 .show_configfs_dev_params
= fd_show_configfs_dev_params
,
969 .get_device_type
= sbc_get_device_type
,
970 .get_blocks
= fd_get_blocks
,
971 .init_prot
= fd_init_prot
,
972 .format_prot
= fd_format_prot
,
973 .free_prot
= fd_free_prot
,
976 static int __init
fileio_module_init(void)
978 struct target_backend_cits
*tbc
= &fileio_template
.tb_cits
;
980 target_core_setup_sub_cits(&fileio_template
);
981 tbc
->tb_dev_attrib_cit
.ct_attrs
= fileio_backend_dev_attrs
;
983 return transport_subsystem_register(&fileio_template
);
986 static void __exit
fileio_module_exit(void)
988 transport_subsystem_release(&fileio_template
);
991 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
992 MODULE_AUTHOR("nab@Linux-iSCSI.org");
993 MODULE_LICENSE("GPL");
995 module_init(fileio_module_init
);
996 module_exit(fileio_module_exit
);