1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_backend_configfs.h>
39 #include "target_core_rd.h"
41 static inline struct rd_dev
*RD_DEV(struct se_device
*dev
)
43 return container_of(dev
, struct rd_dev
, dev
);
46 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
50 static int rd_attach_hba(struct se_hba
*hba
, u32 host_id
)
52 struct rd_host
*rd_host
;
54 rd_host
= kzalloc(sizeof(struct rd_host
), GFP_KERNEL
);
56 pr_err("Unable to allocate memory for struct rd_host\n");
60 rd_host
->rd_host_id
= host_id
;
62 hba
->hba_ptr
= rd_host
;
64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba
->hba_id
,
66 RD_HBA_VERSION
, TARGET_CORE_MOD_VERSION
);
71 static void rd_detach_hba(struct se_hba
*hba
)
73 struct rd_host
*rd_host
= hba
->hba_ptr
;
75 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
76 " Generic Target Core\n", hba
->hba_id
, rd_host
->rd_host_id
);
82 static u32
rd_release_sgl_table(struct rd_dev
*rd_dev
, struct rd_dev_sg_table
*sg_table
,
86 struct scatterlist
*sg
;
87 u32 i
, j
, page_count
= 0, sg_per_table
;
89 for (i
= 0; i
< sg_table_count
; i
++) {
90 sg
= sg_table
[i
].sg_table
;
91 sg_per_table
= sg_table
[i
].rd_sg_count
;
93 for (j
= 0; j
< sg_per_table
; j
++) {
107 static void rd_release_device_space(struct rd_dev
*rd_dev
)
111 if (!rd_dev
->sg_table_array
|| !rd_dev
->sg_table_count
)
114 page_count
= rd_release_sgl_table(rd_dev
, rd_dev
->sg_table_array
,
115 rd_dev
->sg_table_count
);
117 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
118 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
119 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
120 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
122 rd_dev
->sg_table_array
= NULL
;
123 rd_dev
->sg_table_count
= 0;
127 /* rd_build_device_space():
131 static int rd_allocate_sgl_table(struct rd_dev
*rd_dev
, struct rd_dev_sg_table
*sg_table
,
132 u32 total_sg_needed
, unsigned char init_payload
)
134 u32 i
= 0, j
, page_offset
= 0, sg_per_table
;
135 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
136 sizeof(struct scatterlist
));
138 struct scatterlist
*sg
;
141 while (total_sg_needed
) {
142 unsigned int chain_entry
= 0;
144 sg_per_table
= (total_sg_needed
> max_sg_per_table
) ?
145 max_sg_per_table
: total_sg_needed
;
147 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
150 * Reserve extra element for chain entry
152 if (sg_per_table
< total_sg_needed
)
155 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
157 sg
= kcalloc(sg_per_table
+ chain_entry
, sizeof(*sg
),
160 pr_err("Unable to allocate scatterlist array"
161 " for struct rd_dev\n");
165 sg_init_table(sg
, sg_per_table
+ chain_entry
);
167 #ifdef CONFIG_ARCH_HAS_SG_CHAIN
170 sg_chain(sg_table
[i
- 1].sg_table
,
171 max_sg_per_table
+ 1, sg
);
174 #endif /* CONFIG_ARCH_HAS_SG_CHAIN */
176 sg_table
[i
].sg_table
= sg
;
177 sg_table
[i
].rd_sg_count
= sg_per_table
;
178 sg_table
[i
].page_start_offset
= page_offset
;
179 sg_table
[i
++].page_end_offset
= (page_offset
+ sg_per_table
)
182 for (j
= 0; j
< sg_per_table
; j
++) {
183 pg
= alloc_pages(GFP_KERNEL
, 0);
185 pr_err("Unable to allocate scatterlist"
186 " pages for struct rd_dev_sg_table\n");
189 sg_assign_page(&sg
[j
], pg
);
190 sg
[j
].length
= PAGE_SIZE
;
193 memset(p
, init_payload
, PAGE_SIZE
);
197 page_offset
+= sg_per_table
;
198 total_sg_needed
-= sg_per_table
;
204 static int rd_build_device_space(struct rd_dev
*rd_dev
)
206 struct rd_dev_sg_table
*sg_table
;
207 u32 sg_tables
, total_sg_needed
;
208 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
209 sizeof(struct scatterlist
));
212 if (rd_dev
->rd_page_count
<= 0) {
213 pr_err("Illegal page count: %u for Ramdisk device\n",
214 rd_dev
->rd_page_count
);
218 /* Don't need backing pages for NULLIO */
219 if (rd_dev
->rd_flags
& RDF_NULLIO
)
222 total_sg_needed
= rd_dev
->rd_page_count
;
224 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
226 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
228 pr_err("Unable to allocate memory for Ramdisk"
229 " scatterlist tables\n");
233 rd_dev
->sg_table_array
= sg_table
;
234 rd_dev
->sg_table_count
= sg_tables
;
236 rc
= rd_allocate_sgl_table(rd_dev
, sg_table
, total_sg_needed
, 0x00);
240 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
241 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
242 rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
243 rd_dev
->sg_table_count
);
248 static void rd_release_prot_space(struct rd_dev
*rd_dev
)
252 if (!rd_dev
->sg_prot_array
|| !rd_dev
->sg_prot_count
)
255 page_count
= rd_release_sgl_table(rd_dev
, rd_dev
->sg_prot_array
,
256 rd_dev
->sg_prot_count
);
258 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
259 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
260 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
261 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
263 rd_dev
->sg_prot_array
= NULL
;
264 rd_dev
->sg_prot_count
= 0;
267 static int rd_build_prot_space(struct rd_dev
*rd_dev
, int prot_length
, int block_size
)
269 struct rd_dev_sg_table
*sg_table
;
270 u32 total_sg_needed
, sg_tables
;
271 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
272 sizeof(struct scatterlist
));
275 if (rd_dev
->rd_flags
& RDF_NULLIO
)
278 * prot_length=8byte dif data
279 * tot sg needed = rd_page_count * (PGSZ/block_size) *
280 * (prot_length/block_size) + pad
281 * PGSZ canceled each other.
283 total_sg_needed
= (rd_dev
->rd_page_count
* prot_length
/ block_size
) + 1;
285 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
287 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
289 pr_err("Unable to allocate memory for Ramdisk protection"
290 " scatterlist tables\n");
294 rd_dev
->sg_prot_array
= sg_table
;
295 rd_dev
->sg_prot_count
= sg_tables
;
297 rc
= rd_allocate_sgl_table(rd_dev
, sg_table
, total_sg_needed
, 0xff);
301 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
302 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
303 rd_dev
->rd_dev_id
, total_sg_needed
, rd_dev
->sg_prot_count
);
308 static struct se_device
*rd_alloc_device(struct se_hba
*hba
, const char *name
)
310 struct rd_dev
*rd_dev
;
311 struct rd_host
*rd_host
= hba
->hba_ptr
;
313 rd_dev
= kzalloc(sizeof(struct rd_dev
), GFP_KERNEL
);
315 pr_err("Unable to allocate memory for struct rd_dev\n");
319 rd_dev
->rd_host
= rd_host
;
324 static int rd_configure_device(struct se_device
*dev
)
326 struct rd_dev
*rd_dev
= RD_DEV(dev
);
327 struct rd_host
*rd_host
= dev
->se_hba
->hba_ptr
;
330 if (!(rd_dev
->rd_flags
& RDF_HAS_PAGE_COUNT
)) {
331 pr_debug("Missing rd_pages= parameter\n");
335 ret
= rd_build_device_space(rd_dev
);
339 dev
->dev_attrib
.hw_block_size
= RD_BLOCKSIZE
;
340 dev
->dev_attrib
.hw_max_sectors
= UINT_MAX
;
341 dev
->dev_attrib
.hw_queue_depth
= RD_MAX_DEVICE_QUEUE_DEPTH
;
343 rd_dev
->rd_dev_id
= rd_host
->rd_host_dev_id_count
++;
345 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
346 " %u pages in %u tables, %lu total bytes\n",
347 rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
348 rd_dev
->sg_table_count
,
349 (unsigned long)(rd_dev
->rd_page_count
* PAGE_SIZE
));
354 rd_release_device_space(rd_dev
);
358 static void rd_free_device(struct se_device
*dev
)
360 struct rd_dev
*rd_dev
= RD_DEV(dev
);
362 rd_release_device_space(rd_dev
);
366 static struct rd_dev_sg_table
*rd_get_sg_table(struct rd_dev
*rd_dev
, u32 page
)
368 struct rd_dev_sg_table
*sg_table
;
369 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
370 sizeof(struct scatterlist
));
372 i
= page
/ sg_per_table
;
373 if (i
< rd_dev
->sg_table_count
) {
374 sg_table
= &rd_dev
->sg_table_array
[i
];
375 if ((sg_table
->page_start_offset
<= page
) &&
376 (sg_table
->page_end_offset
>= page
))
380 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
386 static struct rd_dev_sg_table
*rd_get_prot_table(struct rd_dev
*rd_dev
, u32 page
)
388 struct rd_dev_sg_table
*sg_table
;
389 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
390 sizeof(struct scatterlist
));
392 i
= page
/ sg_per_table
;
393 if (i
< rd_dev
->sg_prot_count
) {
394 sg_table
= &rd_dev
->sg_prot_array
[i
];
395 if ((sg_table
->page_start_offset
<= page
) &&
396 (sg_table
->page_end_offset
>= page
))
400 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
406 typedef sense_reason_t (*dif_verify
)(struct se_cmd
*, sector_t
, unsigned int,
407 unsigned int, struct scatterlist
*, int);
409 static sense_reason_t
rd_do_prot_rw(struct se_cmd
*cmd
, dif_verify dif_verify
)
411 struct se_device
*se_dev
= cmd
->se_dev
;
412 struct rd_dev
*dev
= RD_DEV(se_dev
);
413 struct rd_dev_sg_table
*prot_table
;
414 bool need_to_release
= false;
415 struct scatterlist
*prot_sg
;
416 u32 sectors
= cmd
->data_length
/ se_dev
->dev_attrib
.block_size
;
417 u32 prot_offset
, prot_page
;
418 u32 prot_npages __maybe_unused
;
420 sense_reason_t rc
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
422 tmp
= cmd
->t_task_lba
* se_dev
->prot_length
;
423 prot_offset
= do_div(tmp
, PAGE_SIZE
);
426 prot_table
= rd_get_prot_table(dev
, prot_page
);
428 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
430 prot_sg
= &prot_table
->sg_table
[prot_page
-
431 prot_table
->page_start_offset
];
433 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
435 prot_npages
= DIV_ROUND_UP(prot_offset
+ sectors
* se_dev
->prot_length
,
439 * Allocate temporaly contiguous scatterlist entries if prot pages
440 * straddles multiple scatterlist tables.
442 if (prot_table
->page_end_offset
< prot_page
+ prot_npages
- 1) {
445 prot_sg
= kcalloc(prot_npages
, sizeof(*prot_sg
), GFP_KERNEL
);
447 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
449 need_to_release
= true;
450 sg_init_table(prot_sg
, prot_npages
);
452 for (i
= 0; i
< prot_npages
; i
++) {
453 if (prot_page
+ i
> prot_table
->page_end_offset
) {
454 prot_table
= rd_get_prot_table(dev
,
460 sg_unmark_end(&prot_sg
[i
- 1]);
462 prot_sg
[i
] = prot_table
->sg_table
[prot_page
+ i
-
463 prot_table
->page_start_offset
];
467 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
469 rc
= dif_verify(cmd
, cmd
->t_task_lba
, sectors
, 0, prot_sg
, prot_offset
);
476 static sense_reason_t
477 rd_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
478 enum dma_data_direction data_direction
)
480 struct se_device
*se_dev
= cmd
->se_dev
;
481 struct rd_dev
*dev
= RD_DEV(se_dev
);
482 struct rd_dev_sg_table
*table
;
483 struct scatterlist
*rd_sg
;
484 struct sg_mapping_iter m
;
492 if (dev
->rd_flags
& RDF_NULLIO
) {
493 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
497 tmp
= cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
;
498 rd_offset
= do_div(tmp
, PAGE_SIZE
);
500 rd_size
= cmd
->data_length
;
502 table
= rd_get_sg_table(dev
, rd_page
);
504 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
506 rd_sg
= &table
->sg_table
[rd_page
- table
->page_start_offset
];
508 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
510 data_direction
== DMA_FROM_DEVICE
? "Read" : "Write",
511 cmd
->t_task_lba
, rd_size
, rd_page
, rd_offset
);
513 if (cmd
->prot_type
&& se_dev
->dev_attrib
.pi_prot_type
&&
514 data_direction
== DMA_TO_DEVICE
) {
515 rc
= rd_do_prot_rw(cmd
, sbc_dif_verify_write
);
520 src_len
= PAGE_SIZE
- rd_offset
;
521 sg_miter_start(&m
, sgl
, sgl_nents
,
522 data_direction
== DMA_FROM_DEVICE
?
523 SG_MITER_TO_SG
: SG_MITER_FROM_SG
);
529 if (!(u32
)m
.length
) {
530 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
531 dev
->rd_dev_id
, m
.addr
, m
.length
);
533 return TCM_INCORRECT_AMOUNT_OF_DATA
;
535 len
= min((u32
)m
.length
, src_len
);
537 pr_debug("RD[%u]: size underrun page %d offset %d "
538 "size %d\n", dev
->rd_dev_id
,
539 rd_page
, rd_offset
, rd_size
);
544 rd_addr
= sg_virt(rd_sg
) + rd_offset
;
546 if (data_direction
== DMA_FROM_DEVICE
)
547 memcpy(m
.addr
, rd_addr
, len
);
549 memcpy(rd_addr
, m
.addr
, len
);
561 /* rd page completed, next one please */
565 if (rd_page
<= table
->page_end_offset
) {
570 table
= rd_get_sg_table(dev
, rd_page
);
573 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
576 /* since we increment, the first sg entry is correct */
577 rd_sg
= table
->sg_table
;
581 if (cmd
->prot_type
&& se_dev
->dev_attrib
.pi_prot_type
&&
582 data_direction
== DMA_FROM_DEVICE
) {
583 rc
= rd_do_prot_rw(cmd
, sbc_dif_verify_read
);
588 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
593 Opt_rd_pages
, Opt_rd_nullio
, Opt_err
596 static match_table_t tokens
= {
597 {Opt_rd_pages
, "rd_pages=%d"},
598 {Opt_rd_nullio
, "rd_nullio=%d"},
602 static ssize_t
rd_set_configfs_dev_params(struct se_device
*dev
,
603 const char *page
, ssize_t count
)
605 struct rd_dev
*rd_dev
= RD_DEV(dev
);
606 char *orig
, *ptr
, *opts
;
607 substring_t args
[MAX_OPT_ARGS
];
608 int ret
= 0, arg
, token
;
610 opts
= kstrdup(page
, GFP_KERNEL
);
616 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
620 token
= match_token(ptr
, tokens
, args
);
623 match_int(args
, &arg
);
624 rd_dev
->rd_page_count
= arg
;
625 pr_debug("RAMDISK: Referencing Page"
626 " Count: %u\n", rd_dev
->rd_page_count
);
627 rd_dev
->rd_flags
|= RDF_HAS_PAGE_COUNT
;
630 match_int(args
, &arg
);
634 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg
);
635 rd_dev
->rd_flags
|= RDF_NULLIO
;
643 return (!ret
) ? count
: ret
;
646 static ssize_t
rd_show_configfs_dev_params(struct se_device
*dev
, char *b
)
648 struct rd_dev
*rd_dev
= RD_DEV(dev
);
650 ssize_t bl
= sprintf(b
, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
652 bl
+= sprintf(b
+ bl
, " PAGES/PAGE_SIZE: %u*%lu"
653 " SG_table_count: %u nullio: %d\n", rd_dev
->rd_page_count
,
654 PAGE_SIZE
, rd_dev
->sg_table_count
,
655 !!(rd_dev
->rd_flags
& RDF_NULLIO
));
659 static sector_t
rd_get_blocks(struct se_device
*dev
)
661 struct rd_dev
*rd_dev
= RD_DEV(dev
);
663 unsigned long long blocks_long
= ((rd_dev
->rd_page_count
* PAGE_SIZE
) /
664 dev
->dev_attrib
.block_size
) - 1;
669 static int rd_init_prot(struct se_device
*dev
)
671 struct rd_dev
*rd_dev
= RD_DEV(dev
);
673 if (!dev
->dev_attrib
.pi_prot_type
)
676 return rd_build_prot_space(rd_dev
, dev
->prot_length
,
677 dev
->dev_attrib
.block_size
);
680 static void rd_free_prot(struct se_device
*dev
)
682 struct rd_dev
*rd_dev
= RD_DEV(dev
);
684 rd_release_prot_space(rd_dev
);
687 static struct sbc_ops rd_sbc_ops
= {
688 .execute_rw
= rd_execute_rw
,
691 static sense_reason_t
692 rd_parse_cdb(struct se_cmd
*cmd
)
694 return sbc_parse_cdb(cmd
, &rd_sbc_ops
);
697 DEF_TB_DEFAULT_ATTRIBS(rd_mcp
);
699 static struct configfs_attribute
*rd_mcp_backend_dev_attrs
[] = {
700 &rd_mcp_dev_attrib_emulate_model_alias
.attr
,
701 &rd_mcp_dev_attrib_emulate_dpo
.attr
,
702 &rd_mcp_dev_attrib_emulate_fua_write
.attr
,
703 &rd_mcp_dev_attrib_emulate_fua_read
.attr
,
704 &rd_mcp_dev_attrib_emulate_write_cache
.attr
,
705 &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl
.attr
,
706 &rd_mcp_dev_attrib_emulate_tas
.attr
,
707 &rd_mcp_dev_attrib_emulate_tpu
.attr
,
708 &rd_mcp_dev_attrib_emulate_tpws
.attr
,
709 &rd_mcp_dev_attrib_emulate_caw
.attr
,
710 &rd_mcp_dev_attrib_emulate_3pc
.attr
,
711 &rd_mcp_dev_attrib_pi_prot_type
.attr
,
712 &rd_mcp_dev_attrib_hw_pi_prot_type
.attr
,
713 &rd_mcp_dev_attrib_pi_prot_format
.attr
,
714 &rd_mcp_dev_attrib_enforce_pr_isids
.attr
,
715 &rd_mcp_dev_attrib_is_nonrot
.attr
,
716 &rd_mcp_dev_attrib_emulate_rest_reord
.attr
,
717 &rd_mcp_dev_attrib_force_pr_aptpl
.attr
,
718 &rd_mcp_dev_attrib_hw_block_size
.attr
,
719 &rd_mcp_dev_attrib_block_size
.attr
,
720 &rd_mcp_dev_attrib_hw_max_sectors
.attr
,
721 &rd_mcp_dev_attrib_optimal_sectors
.attr
,
722 &rd_mcp_dev_attrib_hw_queue_depth
.attr
,
723 &rd_mcp_dev_attrib_queue_depth
.attr
,
724 &rd_mcp_dev_attrib_max_unmap_lba_count
.attr
,
725 &rd_mcp_dev_attrib_max_unmap_block_desc_count
.attr
,
726 &rd_mcp_dev_attrib_unmap_granularity
.attr
,
727 &rd_mcp_dev_attrib_unmap_granularity_alignment
.attr
,
728 &rd_mcp_dev_attrib_max_write_same_len
.attr
,
732 static struct se_subsystem_api rd_mcp_template
= {
734 .inquiry_prod
= "RAMDISK-MCP",
735 .inquiry_rev
= RD_MCP_VERSION
,
736 .attach_hba
= rd_attach_hba
,
737 .detach_hba
= rd_detach_hba
,
738 .alloc_device
= rd_alloc_device
,
739 .configure_device
= rd_configure_device
,
740 .free_device
= rd_free_device
,
741 .parse_cdb
= rd_parse_cdb
,
742 .set_configfs_dev_params
= rd_set_configfs_dev_params
,
743 .show_configfs_dev_params
= rd_show_configfs_dev_params
,
744 .get_device_type
= sbc_get_device_type
,
745 .get_blocks
= rd_get_blocks
,
746 .init_prot
= rd_init_prot
,
747 .free_prot
= rd_free_prot
,
750 int __init
rd_module_init(void)
752 struct target_backend_cits
*tbc
= &rd_mcp_template
.tb_cits
;
755 target_core_setup_sub_cits(&rd_mcp_template
);
756 tbc
->tb_dev_attrib_cit
.ct_attrs
= rd_mcp_backend_dev_attrs
;
758 ret
= transport_subsystem_register(&rd_mcp_template
);
766 void rd_module_exit(void)
768 transport_subsystem_release(&rd_mcp_template
);