1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/blkdev.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_host.h>
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
39 #include "target_core_rd.h"
41 static inline struct rd_dev
*RD_DEV(struct se_device
*dev
)
43 return container_of(dev
, struct rd_dev
, dev
);
46 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
50 static int rd_attach_hba(struct se_hba
*hba
, u32 host_id
)
52 struct rd_host
*rd_host
;
54 rd_host
= kzalloc(sizeof(struct rd_host
), GFP_KERNEL
);
56 pr_err("Unable to allocate memory for struct rd_host\n");
60 rd_host
->rd_host_id
= host_id
;
62 hba
->hba_ptr
= rd_host
;
64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba
->hba_id
,
66 RD_HBA_VERSION
, TARGET_CORE_MOD_VERSION
);
71 static void rd_detach_hba(struct se_hba
*hba
)
73 struct rd_host
*rd_host
= hba
->hba_ptr
;
75 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
76 " Generic Target Core\n", hba
->hba_id
, rd_host
->rd_host_id
);
82 /* rd_release_device_space():
86 static void rd_release_device_space(struct rd_dev
*rd_dev
)
88 u32 i
, j
, page_count
= 0, sg_per_table
;
89 struct rd_dev_sg_table
*sg_table
;
91 struct scatterlist
*sg
;
93 if (!rd_dev
->sg_table_array
|| !rd_dev
->sg_table_count
)
96 sg_table
= rd_dev
->sg_table_array
;
98 for (i
= 0; i
< rd_dev
->sg_table_count
; i
++) {
99 sg
= sg_table
[i
].sg_table
;
100 sg_per_table
= sg_table
[i
].rd_sg_count
;
102 for (j
= 0; j
< sg_per_table
; j
++) {
103 pg
= sg_page(&sg
[j
]);
113 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
114 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
115 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
116 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
119 rd_dev
->sg_table_array
= NULL
;
120 rd_dev
->sg_table_count
= 0;
124 /* rd_build_device_space():
128 static int rd_build_device_space(struct rd_dev
*rd_dev
)
130 u32 i
= 0, j
, page_offset
= 0, sg_per_table
, sg_tables
, total_sg_needed
;
131 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
132 sizeof(struct scatterlist
));
133 struct rd_dev_sg_table
*sg_table
;
135 struct scatterlist
*sg
;
137 if (rd_dev
->rd_page_count
<= 0) {
138 pr_err("Illegal page count: %u for Ramdisk device\n",
139 rd_dev
->rd_page_count
);
143 /* Don't need backing pages for NULLIO */
144 if (rd_dev
->rd_flags
& RDF_NULLIO
)
147 total_sg_needed
= rd_dev
->rd_page_count
;
149 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
151 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
153 pr_err("Unable to allocate memory for Ramdisk"
154 " scatterlist tables\n");
158 rd_dev
->sg_table_array
= sg_table
;
159 rd_dev
->sg_table_count
= sg_tables
;
161 while (total_sg_needed
) {
162 sg_per_table
= (total_sg_needed
> max_sg_per_table
) ?
163 max_sg_per_table
: total_sg_needed
;
165 sg
= kzalloc(sg_per_table
* sizeof(struct scatterlist
),
168 pr_err("Unable to allocate scatterlist array"
169 " for struct rd_dev\n");
173 sg_init_table(sg
, sg_per_table
);
175 sg_table
[i
].sg_table
= sg
;
176 sg_table
[i
].rd_sg_count
= sg_per_table
;
177 sg_table
[i
].page_start_offset
= page_offset
;
178 sg_table
[i
++].page_end_offset
= (page_offset
+ sg_per_table
)
181 for (j
= 0; j
< sg_per_table
; j
++) {
182 pg
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, 0);
184 pr_err("Unable to allocate scatterlist"
185 " pages for struct rd_dev_sg_table\n");
188 sg_assign_page(&sg
[j
], pg
);
189 sg
[j
].length
= PAGE_SIZE
;
192 page_offset
+= sg_per_table
;
193 total_sg_needed
-= sg_per_table
;
196 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
197 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
198 rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
199 rd_dev
->sg_table_count
);
204 static struct se_device
*rd_alloc_device(struct se_hba
*hba
, const char *name
)
206 struct rd_dev
*rd_dev
;
207 struct rd_host
*rd_host
= hba
->hba_ptr
;
209 rd_dev
= kzalloc(sizeof(struct rd_dev
), GFP_KERNEL
);
211 pr_err("Unable to allocate memory for struct rd_dev\n");
215 rd_dev
->rd_host
= rd_host
;
220 static int rd_configure_device(struct se_device
*dev
)
222 struct rd_dev
*rd_dev
= RD_DEV(dev
);
223 struct rd_host
*rd_host
= dev
->se_hba
->hba_ptr
;
226 if (!(rd_dev
->rd_flags
& RDF_HAS_PAGE_COUNT
)) {
227 pr_debug("Missing rd_pages= parameter\n");
231 ret
= rd_build_device_space(rd_dev
);
235 dev
->dev_attrib
.hw_block_size
= RD_BLOCKSIZE
;
236 dev
->dev_attrib
.hw_max_sectors
= UINT_MAX
;
237 dev
->dev_attrib
.hw_queue_depth
= RD_MAX_DEVICE_QUEUE_DEPTH
;
239 rd_dev
->rd_dev_id
= rd_host
->rd_host_dev_id_count
++;
241 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
242 " %u pages in %u tables, %lu total bytes\n",
243 rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
244 rd_dev
->sg_table_count
,
245 (unsigned long)(rd_dev
->rd_page_count
* PAGE_SIZE
));
250 rd_release_device_space(rd_dev
);
254 static void rd_free_device(struct se_device
*dev
)
256 struct rd_dev
*rd_dev
= RD_DEV(dev
);
258 rd_release_device_space(rd_dev
);
262 static struct rd_dev_sg_table
*rd_get_sg_table(struct rd_dev
*rd_dev
, u32 page
)
264 struct rd_dev_sg_table
*sg_table
;
265 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
266 sizeof(struct scatterlist
));
268 i
= page
/ sg_per_table
;
269 if (i
< rd_dev
->sg_table_count
) {
270 sg_table
= &rd_dev
->sg_table_array
[i
];
271 if ((sg_table
->page_start_offset
<= page
) &&
272 (sg_table
->page_end_offset
>= page
))
276 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
282 static sense_reason_t
283 rd_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
284 enum dma_data_direction data_direction
)
286 struct se_device
*se_dev
= cmd
->se_dev
;
287 struct rd_dev
*dev
= RD_DEV(se_dev
);
288 struct rd_dev_sg_table
*table
;
289 struct scatterlist
*rd_sg
;
290 struct sg_mapping_iter m
;
297 if (dev
->rd_flags
& RDF_NULLIO
) {
298 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
302 tmp
= cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
;
303 rd_offset
= do_div(tmp
, PAGE_SIZE
);
305 rd_size
= cmd
->data_length
;
307 table
= rd_get_sg_table(dev
, rd_page
);
309 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
311 rd_sg
= &table
->sg_table
[rd_page
- table
->page_start_offset
];
313 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
315 data_direction
== DMA_FROM_DEVICE
? "Read" : "Write",
316 cmd
->t_task_lba
, rd_size
, rd_page
, rd_offset
);
318 src_len
= PAGE_SIZE
- rd_offset
;
319 sg_miter_start(&m
, sgl
, sgl_nents
,
320 data_direction
== DMA_FROM_DEVICE
?
321 SG_MITER_TO_SG
: SG_MITER_FROM_SG
);
327 if (!(u32
)m
.length
) {
328 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
329 dev
->rd_dev_id
, m
.addr
, m
.length
);
331 return TCM_INCORRECT_AMOUNT_OF_DATA
;
333 len
= min((u32
)m
.length
, src_len
);
335 pr_debug("RD[%u]: size underrun page %d offset %d "
336 "size %d\n", dev
->rd_dev_id
,
337 rd_page
, rd_offset
, rd_size
);
342 rd_addr
= sg_virt(rd_sg
) + rd_offset
;
344 if (data_direction
== DMA_FROM_DEVICE
)
345 memcpy(m
.addr
, rd_addr
, len
);
347 memcpy(rd_addr
, m
.addr
, len
);
359 /* rd page completed, next one please */
363 if (rd_page
<= table
->page_end_offset
) {
368 table
= rd_get_sg_table(dev
, rd_page
);
371 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
374 /* since we increment, the first sg entry is correct */
375 rd_sg
= table
->sg_table
;
379 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
384 Opt_rd_pages
, Opt_rd_nullio
, Opt_err
387 static match_table_t tokens
= {
388 {Opt_rd_pages
, "rd_pages=%d"},
389 {Opt_rd_nullio
, "rd_nullio=%d"},
393 static ssize_t
rd_set_configfs_dev_params(struct se_device
*dev
,
394 const char *page
, ssize_t count
)
396 struct rd_dev
*rd_dev
= RD_DEV(dev
);
397 char *orig
, *ptr
, *opts
;
398 substring_t args
[MAX_OPT_ARGS
];
399 int ret
= 0, arg
, token
;
401 opts
= kstrdup(page
, GFP_KERNEL
);
407 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
411 token
= match_token(ptr
, tokens
, args
);
414 match_int(args
, &arg
);
415 rd_dev
->rd_page_count
= arg
;
416 pr_debug("RAMDISK: Referencing Page"
417 " Count: %u\n", rd_dev
->rd_page_count
);
418 rd_dev
->rd_flags
|= RDF_HAS_PAGE_COUNT
;
421 match_int(args
, &arg
);
425 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg
);
426 rd_dev
->rd_flags
|= RDF_NULLIO
;
434 return (!ret
) ? count
: ret
;
437 static ssize_t
rd_show_configfs_dev_params(struct se_device
*dev
, char *b
)
439 struct rd_dev
*rd_dev
= RD_DEV(dev
);
441 ssize_t bl
= sprintf(b
, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
443 bl
+= sprintf(b
+ bl
, " PAGES/PAGE_SIZE: %u*%lu"
444 " SG_table_count: %u nullio: %d\n", rd_dev
->rd_page_count
,
445 PAGE_SIZE
, rd_dev
->sg_table_count
,
446 !!(rd_dev
->rd_flags
& RDF_NULLIO
));
450 static sector_t
rd_get_blocks(struct se_device
*dev
)
452 struct rd_dev
*rd_dev
= RD_DEV(dev
);
454 unsigned long long blocks_long
= ((rd_dev
->rd_page_count
* PAGE_SIZE
) /
455 dev
->dev_attrib
.block_size
) - 1;
460 static struct sbc_ops rd_sbc_ops
= {
461 .execute_rw
= rd_execute_rw
,
464 static sense_reason_t
465 rd_parse_cdb(struct se_cmd
*cmd
)
467 return sbc_parse_cdb(cmd
, &rd_sbc_ops
);
470 static struct se_subsystem_api rd_mcp_template
= {
472 .inquiry_prod
= "RAMDISK-MCP",
473 .inquiry_rev
= RD_MCP_VERSION
,
474 .transport_type
= TRANSPORT_PLUGIN_VHBA_VDEV
,
475 .attach_hba
= rd_attach_hba
,
476 .detach_hba
= rd_detach_hba
,
477 .alloc_device
= rd_alloc_device
,
478 .configure_device
= rd_configure_device
,
479 .free_device
= rd_free_device
,
480 .parse_cdb
= rd_parse_cdb
,
481 .set_configfs_dev_params
= rd_set_configfs_dev_params
,
482 .show_configfs_dev_params
= rd_show_configfs_dev_params
,
483 .get_device_type
= sbc_get_device_type
,
484 .get_blocks
= rd_get_blocks
,
487 int __init
rd_module_init(void)
491 ret
= transport_subsystem_register(&rd_mcp_template
);
499 void rd_module_exit(void)
501 transport_subsystem_release(&rd_mcp_template
);