1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_backend_configfs.h>
39 #include "target_core_rd.h"
41 static inline struct rd_dev
*RD_DEV(struct se_device
*dev
)
43 return container_of(dev
, struct rd_dev
, dev
);
46 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
50 static int rd_attach_hba(struct se_hba
*hba
, u32 host_id
)
52 struct rd_host
*rd_host
;
54 rd_host
= kzalloc(sizeof(struct rd_host
), GFP_KERNEL
);
56 pr_err("Unable to allocate memory for struct rd_host\n");
60 rd_host
->rd_host_id
= host_id
;
62 hba
->hba_ptr
= rd_host
;
64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba
->hba_id
,
66 RD_HBA_VERSION
, TARGET_CORE_MOD_VERSION
);
71 static void rd_detach_hba(struct se_hba
*hba
)
73 struct rd_host
*rd_host
= hba
->hba_ptr
;
75 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
76 " Generic Target Core\n", hba
->hba_id
, rd_host
->rd_host_id
);
82 static u32
rd_release_sgl_table(struct rd_dev
*rd_dev
, struct rd_dev_sg_table
*sg_table
,
86 struct scatterlist
*sg
;
87 u32 i
, j
, page_count
= 0, sg_per_table
;
89 for (i
= 0; i
< sg_table_count
; i
++) {
90 sg
= sg_table
[i
].sg_table
;
91 sg_per_table
= sg_table
[i
].rd_sg_count
;
93 for (j
= 0; j
< sg_per_table
; j
++) {
107 static void rd_release_device_space(struct rd_dev
*rd_dev
)
111 if (!rd_dev
->sg_table_array
|| !rd_dev
->sg_table_count
)
114 page_count
= rd_release_sgl_table(rd_dev
, rd_dev
->sg_table_array
,
115 rd_dev
->sg_table_count
);
117 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
118 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
119 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
120 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
122 rd_dev
->sg_table_array
= NULL
;
123 rd_dev
->sg_table_count
= 0;
127 /* rd_build_device_space():
131 static int rd_allocate_sgl_table(struct rd_dev
*rd_dev
, struct rd_dev_sg_table
*sg_table
,
132 u32 total_sg_needed
, unsigned char init_payload
)
134 u32 i
= 0, j
, page_offset
= 0, sg_per_table
;
135 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
136 sizeof(struct scatterlist
));
138 struct scatterlist
*sg
;
141 while (total_sg_needed
) {
142 sg_per_table
= (total_sg_needed
> max_sg_per_table
) ?
143 max_sg_per_table
: total_sg_needed
;
145 sg
= kzalloc(sg_per_table
* sizeof(struct scatterlist
),
148 pr_err("Unable to allocate scatterlist array"
149 " for struct rd_dev\n");
153 sg_init_table(sg
, sg_per_table
);
155 sg_table
[i
].sg_table
= sg
;
156 sg_table
[i
].rd_sg_count
= sg_per_table
;
157 sg_table
[i
].page_start_offset
= page_offset
;
158 sg_table
[i
++].page_end_offset
= (page_offset
+ sg_per_table
)
161 for (j
= 0; j
< sg_per_table
; j
++) {
162 pg
= alloc_pages(GFP_KERNEL
, 0);
164 pr_err("Unable to allocate scatterlist"
165 " pages for struct rd_dev_sg_table\n");
168 sg_assign_page(&sg
[j
], pg
);
169 sg
[j
].length
= PAGE_SIZE
;
172 memset(p
, init_payload
, PAGE_SIZE
);
176 page_offset
+= sg_per_table
;
177 total_sg_needed
-= sg_per_table
;
183 static int rd_build_device_space(struct rd_dev
*rd_dev
)
185 struct rd_dev_sg_table
*sg_table
;
186 u32 sg_tables
, total_sg_needed
;
187 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
188 sizeof(struct scatterlist
));
191 if (rd_dev
->rd_page_count
<= 0) {
192 pr_err("Illegal page count: %u for Ramdisk device\n",
193 rd_dev
->rd_page_count
);
197 /* Don't need backing pages for NULLIO */
198 if (rd_dev
->rd_flags
& RDF_NULLIO
)
201 total_sg_needed
= rd_dev
->rd_page_count
;
203 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
205 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
207 pr_err("Unable to allocate memory for Ramdisk"
208 " scatterlist tables\n");
212 rd_dev
->sg_table_array
= sg_table
;
213 rd_dev
->sg_table_count
= sg_tables
;
215 rc
= rd_allocate_sgl_table(rd_dev
, sg_table
, total_sg_needed
, 0x00);
219 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
220 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
221 rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
222 rd_dev
->sg_table_count
);
227 static void rd_release_prot_space(struct rd_dev
*rd_dev
)
231 if (!rd_dev
->sg_prot_array
|| !rd_dev
->sg_prot_count
)
234 page_count
= rd_release_sgl_table(rd_dev
, rd_dev
->sg_prot_array
,
235 rd_dev
->sg_prot_count
);
237 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
238 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
239 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
240 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
242 rd_dev
->sg_prot_array
= NULL
;
243 rd_dev
->sg_prot_count
= 0;
246 static int rd_build_prot_space(struct rd_dev
*rd_dev
, int prot_length
, int block_size
)
248 struct rd_dev_sg_table
*sg_table
;
249 u32 total_sg_needed
, sg_tables
;
250 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
251 sizeof(struct scatterlist
));
254 if (rd_dev
->rd_flags
& RDF_NULLIO
)
257 * prot_length=8byte dif data
258 * tot sg needed = rd_page_count * (PGSZ/block_size) *
259 * (prot_length/block_size) + pad
260 * PGSZ canceled each other.
262 total_sg_needed
= (rd_dev
->rd_page_count
* prot_length
/ block_size
) + 1;
264 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
266 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
268 pr_err("Unable to allocate memory for Ramdisk protection"
269 " scatterlist tables\n");
273 rd_dev
->sg_prot_array
= sg_table
;
274 rd_dev
->sg_prot_count
= sg_tables
;
276 rc
= rd_allocate_sgl_table(rd_dev
, sg_table
, total_sg_needed
, 0xff);
280 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
281 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
282 rd_dev
->rd_dev_id
, total_sg_needed
, rd_dev
->sg_prot_count
);
287 static struct se_device
*rd_alloc_device(struct se_hba
*hba
, const char *name
)
289 struct rd_dev
*rd_dev
;
290 struct rd_host
*rd_host
= hba
->hba_ptr
;
292 rd_dev
= kzalloc(sizeof(struct rd_dev
), GFP_KERNEL
);
294 pr_err("Unable to allocate memory for struct rd_dev\n");
298 rd_dev
->rd_host
= rd_host
;
303 static int rd_configure_device(struct se_device
*dev
)
305 struct rd_dev
*rd_dev
= RD_DEV(dev
);
306 struct rd_host
*rd_host
= dev
->se_hba
->hba_ptr
;
309 if (!(rd_dev
->rd_flags
& RDF_HAS_PAGE_COUNT
)) {
310 pr_debug("Missing rd_pages= parameter\n");
314 ret
= rd_build_device_space(rd_dev
);
318 dev
->dev_attrib
.hw_block_size
= RD_BLOCKSIZE
;
319 dev
->dev_attrib
.hw_max_sectors
= UINT_MAX
;
320 dev
->dev_attrib
.hw_queue_depth
= RD_MAX_DEVICE_QUEUE_DEPTH
;
322 rd_dev
->rd_dev_id
= rd_host
->rd_host_dev_id_count
++;
324 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
325 " %u pages in %u tables, %lu total bytes\n",
326 rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
327 rd_dev
->sg_table_count
,
328 (unsigned long)(rd_dev
->rd_page_count
* PAGE_SIZE
));
333 rd_release_device_space(rd_dev
);
337 static void rd_free_device(struct se_device
*dev
)
339 struct rd_dev
*rd_dev
= RD_DEV(dev
);
341 rd_release_device_space(rd_dev
);
345 static struct rd_dev_sg_table
*rd_get_sg_table(struct rd_dev
*rd_dev
, u32 page
)
347 struct rd_dev_sg_table
*sg_table
;
348 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
349 sizeof(struct scatterlist
));
351 i
= page
/ sg_per_table
;
352 if (i
< rd_dev
->sg_table_count
) {
353 sg_table
= &rd_dev
->sg_table_array
[i
];
354 if ((sg_table
->page_start_offset
<= page
) &&
355 (sg_table
->page_end_offset
>= page
))
359 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
365 static struct rd_dev_sg_table
*rd_get_prot_table(struct rd_dev
*rd_dev
, u32 page
)
367 struct rd_dev_sg_table
*sg_table
;
368 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
369 sizeof(struct scatterlist
));
371 i
= page
/ sg_per_table
;
372 if (i
< rd_dev
->sg_prot_count
) {
373 sg_table
= &rd_dev
->sg_prot_array
[i
];
374 if ((sg_table
->page_start_offset
<= page
) &&
375 (sg_table
->page_end_offset
>= page
))
379 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
385 static sense_reason_t
386 rd_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
387 enum dma_data_direction data_direction
)
389 struct se_device
*se_dev
= cmd
->se_dev
;
390 struct rd_dev
*dev
= RD_DEV(se_dev
);
391 struct rd_dev_sg_table
*table
;
392 struct scatterlist
*rd_sg
;
393 struct sg_mapping_iter m
;
401 if (dev
->rd_flags
& RDF_NULLIO
) {
402 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
406 tmp
= cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
;
407 rd_offset
= do_div(tmp
, PAGE_SIZE
);
409 rd_size
= cmd
->data_length
;
411 table
= rd_get_sg_table(dev
, rd_page
);
413 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
415 rd_sg
= &table
->sg_table
[rd_page
- table
->page_start_offset
];
417 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
419 data_direction
== DMA_FROM_DEVICE
? "Read" : "Write",
420 cmd
->t_task_lba
, rd_size
, rd_page
, rd_offset
);
422 if (cmd
->prot_type
&& data_direction
== DMA_TO_DEVICE
) {
423 struct rd_dev_sg_table
*prot_table
;
424 struct scatterlist
*prot_sg
;
425 u32 sectors
= cmd
->data_length
/ se_dev
->dev_attrib
.block_size
;
426 u32 prot_offset
, prot_page
;
428 tmp
= cmd
->t_task_lba
* se_dev
->prot_length
;
429 prot_offset
= do_div(tmp
, PAGE_SIZE
);
432 prot_table
= rd_get_prot_table(dev
, prot_page
);
434 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
436 prot_sg
= &prot_table
->sg_table
[prot_page
- prot_table
->page_start_offset
];
438 rc
= sbc_dif_verify_write(cmd
, cmd
->t_task_lba
, sectors
, 0,
439 prot_sg
, prot_offset
);
444 src_len
= PAGE_SIZE
- rd_offset
;
445 sg_miter_start(&m
, sgl
, sgl_nents
,
446 data_direction
== DMA_FROM_DEVICE
?
447 SG_MITER_TO_SG
: SG_MITER_FROM_SG
);
453 if (!(u32
)m
.length
) {
454 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
455 dev
->rd_dev_id
, m
.addr
, m
.length
);
457 return TCM_INCORRECT_AMOUNT_OF_DATA
;
459 len
= min((u32
)m
.length
, src_len
);
461 pr_debug("RD[%u]: size underrun page %d offset %d "
462 "size %d\n", dev
->rd_dev_id
,
463 rd_page
, rd_offset
, rd_size
);
468 rd_addr
= sg_virt(rd_sg
) + rd_offset
;
470 if (data_direction
== DMA_FROM_DEVICE
)
471 memcpy(m
.addr
, rd_addr
, len
);
473 memcpy(rd_addr
, m
.addr
, len
);
485 /* rd page completed, next one please */
489 if (rd_page
<= table
->page_end_offset
) {
494 table
= rd_get_sg_table(dev
, rd_page
);
497 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
500 /* since we increment, the first sg entry is correct */
501 rd_sg
= table
->sg_table
;
505 if (cmd
->prot_type
&& data_direction
== DMA_FROM_DEVICE
) {
506 struct rd_dev_sg_table
*prot_table
;
507 struct scatterlist
*prot_sg
;
508 u32 sectors
= cmd
->data_length
/ se_dev
->dev_attrib
.block_size
;
509 u32 prot_offset
, prot_page
;
511 tmp
= cmd
->t_task_lba
* se_dev
->prot_length
;
512 prot_offset
= do_div(tmp
, PAGE_SIZE
);
515 prot_table
= rd_get_prot_table(dev
, prot_page
);
517 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
519 prot_sg
= &prot_table
->sg_table
[prot_page
- prot_table
->page_start_offset
];
521 rc
= sbc_dif_verify_read(cmd
, cmd
->t_task_lba
, sectors
, 0,
522 prot_sg
, prot_offset
);
527 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
532 Opt_rd_pages
, Opt_rd_nullio
, Opt_err
535 static match_table_t tokens
= {
536 {Opt_rd_pages
, "rd_pages=%d"},
537 {Opt_rd_nullio
, "rd_nullio=%d"},
541 static ssize_t
rd_set_configfs_dev_params(struct se_device
*dev
,
542 const char *page
, ssize_t count
)
544 struct rd_dev
*rd_dev
= RD_DEV(dev
);
545 char *orig
, *ptr
, *opts
;
546 substring_t args
[MAX_OPT_ARGS
];
547 int ret
= 0, arg
, token
;
549 opts
= kstrdup(page
, GFP_KERNEL
);
555 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
559 token
= match_token(ptr
, tokens
, args
);
562 match_int(args
, &arg
);
563 rd_dev
->rd_page_count
= arg
;
564 pr_debug("RAMDISK: Referencing Page"
565 " Count: %u\n", rd_dev
->rd_page_count
);
566 rd_dev
->rd_flags
|= RDF_HAS_PAGE_COUNT
;
569 match_int(args
, &arg
);
573 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg
);
574 rd_dev
->rd_flags
|= RDF_NULLIO
;
582 return (!ret
) ? count
: ret
;
585 static ssize_t
rd_show_configfs_dev_params(struct se_device
*dev
, char *b
)
587 struct rd_dev
*rd_dev
= RD_DEV(dev
);
589 ssize_t bl
= sprintf(b
, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
591 bl
+= sprintf(b
+ bl
, " PAGES/PAGE_SIZE: %u*%lu"
592 " SG_table_count: %u nullio: %d\n", rd_dev
->rd_page_count
,
593 PAGE_SIZE
, rd_dev
->sg_table_count
,
594 !!(rd_dev
->rd_flags
& RDF_NULLIO
));
598 static sector_t
rd_get_blocks(struct se_device
*dev
)
600 struct rd_dev
*rd_dev
= RD_DEV(dev
);
602 unsigned long long blocks_long
= ((rd_dev
->rd_page_count
* PAGE_SIZE
) /
603 dev
->dev_attrib
.block_size
) - 1;
608 static int rd_init_prot(struct se_device
*dev
)
610 struct rd_dev
*rd_dev
= RD_DEV(dev
);
612 if (!dev
->dev_attrib
.pi_prot_type
)
615 return rd_build_prot_space(rd_dev
, dev
->prot_length
,
616 dev
->dev_attrib
.block_size
);
619 static void rd_free_prot(struct se_device
*dev
)
621 struct rd_dev
*rd_dev
= RD_DEV(dev
);
623 rd_release_prot_space(rd_dev
);
626 static struct sbc_ops rd_sbc_ops
= {
627 .execute_rw
= rd_execute_rw
,
630 static sense_reason_t
631 rd_parse_cdb(struct se_cmd
*cmd
)
633 return sbc_parse_cdb(cmd
, &rd_sbc_ops
);
636 DEF_TB_DEFAULT_ATTRIBS(rd_mcp
);
638 static struct configfs_attribute
*rd_mcp_backend_dev_attrs
[] = {
639 &rd_mcp_dev_attrib_emulate_model_alias
.attr
,
640 &rd_mcp_dev_attrib_emulate_dpo
.attr
,
641 &rd_mcp_dev_attrib_emulate_fua_write
.attr
,
642 &rd_mcp_dev_attrib_emulate_fua_read
.attr
,
643 &rd_mcp_dev_attrib_emulate_write_cache
.attr
,
644 &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl
.attr
,
645 &rd_mcp_dev_attrib_emulate_tas
.attr
,
646 &rd_mcp_dev_attrib_emulate_tpu
.attr
,
647 &rd_mcp_dev_attrib_emulate_tpws
.attr
,
648 &rd_mcp_dev_attrib_emulate_caw
.attr
,
649 &rd_mcp_dev_attrib_emulate_3pc
.attr
,
650 &rd_mcp_dev_attrib_pi_prot_type
.attr
,
651 &rd_mcp_dev_attrib_hw_pi_prot_type
.attr
,
652 &rd_mcp_dev_attrib_pi_prot_format
.attr
,
653 &rd_mcp_dev_attrib_enforce_pr_isids
.attr
,
654 &rd_mcp_dev_attrib_is_nonrot
.attr
,
655 &rd_mcp_dev_attrib_emulate_rest_reord
.attr
,
656 &rd_mcp_dev_attrib_force_pr_aptpl
.attr
,
657 &rd_mcp_dev_attrib_hw_block_size
.attr
,
658 &rd_mcp_dev_attrib_block_size
.attr
,
659 &rd_mcp_dev_attrib_hw_max_sectors
.attr
,
660 &rd_mcp_dev_attrib_fabric_max_sectors
.attr
,
661 &rd_mcp_dev_attrib_optimal_sectors
.attr
,
662 &rd_mcp_dev_attrib_hw_queue_depth
.attr
,
663 &rd_mcp_dev_attrib_queue_depth
.attr
,
664 &rd_mcp_dev_attrib_max_unmap_lba_count
.attr
,
665 &rd_mcp_dev_attrib_max_unmap_block_desc_count
.attr
,
666 &rd_mcp_dev_attrib_unmap_granularity
.attr
,
667 &rd_mcp_dev_attrib_unmap_granularity_alignment
.attr
,
668 &rd_mcp_dev_attrib_max_write_same_len
.attr
,
672 static struct se_subsystem_api rd_mcp_template
= {
674 .inquiry_prod
= "RAMDISK-MCP",
675 .inquiry_rev
= RD_MCP_VERSION
,
676 .transport_type
= TRANSPORT_PLUGIN_VHBA_VDEV
,
677 .attach_hba
= rd_attach_hba
,
678 .detach_hba
= rd_detach_hba
,
679 .alloc_device
= rd_alloc_device
,
680 .configure_device
= rd_configure_device
,
681 .free_device
= rd_free_device
,
682 .parse_cdb
= rd_parse_cdb
,
683 .set_configfs_dev_params
= rd_set_configfs_dev_params
,
684 .show_configfs_dev_params
= rd_show_configfs_dev_params
,
685 .get_device_type
= sbc_get_device_type
,
686 .get_blocks
= rd_get_blocks
,
687 .init_prot
= rd_init_prot
,
688 .free_prot
= rd_free_prot
,
691 int __init
rd_module_init(void)
693 struct target_backend_cits
*tbc
= &rd_mcp_template
.tb_cits
;
696 target_core_setup_sub_cits(&rd_mcp_template
);
697 tbc
->tb_dev_attrib_cit
.ct_attrs
= rd_mcp_backend_dev_attrs
;
699 ret
= transport_subsystem_register(&rd_mcp_template
);
707 void rd_module_exit(void)
709 transport_subsystem_release(&rd_mcp_template
);