ubi/upd: Always flush after prepared for an update
[linux/fpc-iii.git] / drivers / target / target_core_rd.c
blob47a833f3a145a86112a58afcd9052d6b284e0cbc
1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <scsi/scsi_proto.h>
34 #include <target/target_core_base.h>
35 #include <target/target_core_backend.h>
37 #include "target_core_rd.h"
39 static inline struct rd_dev *RD_DEV(struct se_device *dev)
41 return container_of(dev, struct rd_dev, dev);
44 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
46 struct rd_host *rd_host;
48 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
49 if (!rd_host) {
50 pr_err("Unable to allocate memory for struct rd_host\n");
51 return -ENOMEM;
54 rd_host->rd_host_id = host_id;
56 hba->hba_ptr = rd_host;
58 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
59 " Generic Target Core Stack %s\n", hba->hba_id,
60 RD_HBA_VERSION, TARGET_CORE_VERSION);
62 return 0;
65 static void rd_detach_hba(struct se_hba *hba)
67 struct rd_host *rd_host = hba->hba_ptr;
69 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
70 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
72 kfree(rd_host);
73 hba->hba_ptr = NULL;
76 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
77 u32 sg_table_count)
79 struct page *pg;
80 struct scatterlist *sg;
81 u32 i, j, page_count = 0, sg_per_table;
83 for (i = 0; i < sg_table_count; i++) {
84 sg = sg_table[i].sg_table;
85 sg_per_table = sg_table[i].rd_sg_count;
87 for (j = 0; j < sg_per_table; j++) {
88 pg = sg_page(&sg[j]);
89 if (pg) {
90 __free_page(pg);
91 page_count++;
94 kfree(sg);
97 kfree(sg_table);
98 return page_count;
101 static void rd_release_device_space(struct rd_dev *rd_dev)
103 u32 page_count;
105 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
106 return;
108 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
109 rd_dev->sg_table_count);
111 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
112 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
113 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
114 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
116 rd_dev->sg_table_array = NULL;
117 rd_dev->sg_table_count = 0;
121 /* rd_build_device_space():
125 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
126 u32 total_sg_needed, unsigned char init_payload)
128 u32 i = 0, j, page_offset = 0, sg_per_table;
129 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
130 sizeof(struct scatterlist));
131 struct page *pg;
132 struct scatterlist *sg;
133 unsigned char *p;
135 while (total_sg_needed) {
136 unsigned int chain_entry = 0;
138 sg_per_table = (total_sg_needed > max_sg_per_table) ?
139 max_sg_per_table : total_sg_needed;
142 * Reserve extra element for chain entry
144 if (sg_per_table < total_sg_needed)
145 chain_entry = 1;
147 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
148 GFP_KERNEL);
149 if (!sg) {
150 pr_err("Unable to allocate scatterlist array"
151 " for struct rd_dev\n");
152 return -ENOMEM;
155 sg_init_table(sg, sg_per_table + chain_entry);
157 if (i > 0) {
158 sg_chain(sg_table[i - 1].sg_table,
159 max_sg_per_table + 1, sg);
162 sg_table[i].sg_table = sg;
163 sg_table[i].rd_sg_count = sg_per_table;
164 sg_table[i].page_start_offset = page_offset;
165 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
166 - 1;
168 for (j = 0; j < sg_per_table; j++) {
169 pg = alloc_pages(GFP_KERNEL, 0);
170 if (!pg) {
171 pr_err("Unable to allocate scatterlist"
172 " pages for struct rd_dev_sg_table\n");
173 return -ENOMEM;
175 sg_assign_page(&sg[j], pg);
176 sg[j].length = PAGE_SIZE;
178 p = kmap(pg);
179 memset(p, init_payload, PAGE_SIZE);
180 kunmap(pg);
183 page_offset += sg_per_table;
184 total_sg_needed -= sg_per_table;
187 return 0;
190 static int rd_build_device_space(struct rd_dev *rd_dev)
192 struct rd_dev_sg_table *sg_table;
193 u32 sg_tables, total_sg_needed;
194 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
195 sizeof(struct scatterlist));
196 int rc;
198 if (rd_dev->rd_page_count <= 0) {
199 pr_err("Illegal page count: %u for Ramdisk device\n",
200 rd_dev->rd_page_count);
201 return -EINVAL;
204 /* Don't need backing pages for NULLIO */
205 if (rd_dev->rd_flags & RDF_NULLIO)
206 return 0;
208 total_sg_needed = rd_dev->rd_page_count;
210 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
212 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
213 if (!sg_table) {
214 pr_err("Unable to allocate memory for Ramdisk"
215 " scatterlist tables\n");
216 return -ENOMEM;
219 rd_dev->sg_table_array = sg_table;
220 rd_dev->sg_table_count = sg_tables;
222 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
223 if (rc)
224 return rc;
226 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
227 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
228 rd_dev->rd_dev_id, rd_dev->rd_page_count,
229 rd_dev->sg_table_count);
231 return 0;
234 static void rd_release_prot_space(struct rd_dev *rd_dev)
236 u32 page_count;
238 if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
239 return;
241 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
242 rd_dev->sg_prot_count);
244 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
245 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
246 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
247 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
249 rd_dev->sg_prot_array = NULL;
250 rd_dev->sg_prot_count = 0;
253 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
255 struct rd_dev_sg_table *sg_table;
256 u32 total_sg_needed, sg_tables;
257 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
258 sizeof(struct scatterlist));
259 int rc;
261 if (rd_dev->rd_flags & RDF_NULLIO)
262 return 0;
264 * prot_length=8byte dif data
265 * tot sg needed = rd_page_count * (PGSZ/block_size) *
266 * (prot_length/block_size) + pad
267 * PGSZ canceled each other.
269 total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
271 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
273 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
274 if (!sg_table) {
275 pr_err("Unable to allocate memory for Ramdisk protection"
276 " scatterlist tables\n");
277 return -ENOMEM;
280 rd_dev->sg_prot_array = sg_table;
281 rd_dev->sg_prot_count = sg_tables;
283 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
284 if (rc)
285 return rc;
287 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
288 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
289 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
291 return 0;
294 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
296 struct rd_dev *rd_dev;
297 struct rd_host *rd_host = hba->hba_ptr;
299 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
300 if (!rd_dev) {
301 pr_err("Unable to allocate memory for struct rd_dev\n");
302 return NULL;
305 rd_dev->rd_host = rd_host;
307 return &rd_dev->dev;
310 static int rd_configure_device(struct se_device *dev)
312 struct rd_dev *rd_dev = RD_DEV(dev);
313 struct rd_host *rd_host = dev->se_hba->hba_ptr;
314 int ret;
316 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
317 pr_debug("Missing rd_pages= parameter\n");
318 return -EINVAL;
321 ret = rd_build_device_space(rd_dev);
322 if (ret < 0)
323 goto fail;
325 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
326 dev->dev_attrib.hw_max_sectors = UINT_MAX;
327 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
328 dev->dev_attrib.is_nonrot = 1;
330 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
332 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
333 " %u pages in %u tables, %lu total bytes\n",
334 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
335 rd_dev->sg_table_count,
336 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
338 return 0;
340 fail:
341 rd_release_device_space(rd_dev);
342 return ret;
345 static void rd_dev_call_rcu(struct rcu_head *p)
347 struct se_device *dev = container_of(p, struct se_device, rcu_head);
348 struct rd_dev *rd_dev = RD_DEV(dev);
350 kfree(rd_dev);
353 static void rd_free_device(struct se_device *dev)
355 struct rd_dev *rd_dev = RD_DEV(dev);
357 rd_release_device_space(rd_dev);
358 call_rcu(&dev->rcu_head, rd_dev_call_rcu);
361 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
363 struct rd_dev_sg_table *sg_table;
364 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
365 sizeof(struct scatterlist));
367 i = page / sg_per_table;
368 if (i < rd_dev->sg_table_count) {
369 sg_table = &rd_dev->sg_table_array[i];
370 if ((sg_table->page_start_offset <= page) &&
371 (sg_table->page_end_offset >= page))
372 return sg_table;
375 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
376 page);
378 return NULL;
381 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
383 struct rd_dev_sg_table *sg_table;
384 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
385 sizeof(struct scatterlist));
387 i = page / sg_per_table;
388 if (i < rd_dev->sg_prot_count) {
389 sg_table = &rd_dev->sg_prot_array[i];
390 if ((sg_table->page_start_offset <= page) &&
391 (sg_table->page_end_offset >= page))
392 return sg_table;
395 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
396 page);
398 return NULL;
401 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
403 struct se_device *se_dev = cmd->se_dev;
404 struct rd_dev *dev = RD_DEV(se_dev);
405 struct rd_dev_sg_table *prot_table;
406 bool need_to_release = false;
407 struct scatterlist *prot_sg;
408 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
409 u32 prot_offset, prot_page;
410 u32 prot_npages __maybe_unused;
411 u64 tmp;
412 sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
414 tmp = cmd->t_task_lba * se_dev->prot_length;
415 prot_offset = do_div(tmp, PAGE_SIZE);
416 prot_page = tmp;
418 prot_table = rd_get_prot_table(dev, prot_page);
419 if (!prot_table)
420 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
422 prot_sg = &prot_table->sg_table[prot_page -
423 prot_table->page_start_offset];
425 if (is_read)
426 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
427 prot_sg, prot_offset);
428 else
429 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
430 cmd->t_prot_sg, 0);
432 if (!rc)
433 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
435 if (need_to_release)
436 kfree(prot_sg);
438 return rc;
441 static sense_reason_t
442 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
443 enum dma_data_direction data_direction)
445 struct se_device *se_dev = cmd->se_dev;
446 struct rd_dev *dev = RD_DEV(se_dev);
447 struct rd_dev_sg_table *table;
448 struct scatterlist *rd_sg;
449 struct sg_mapping_iter m;
450 u32 rd_offset;
451 u32 rd_size;
452 u32 rd_page;
453 u32 src_len;
454 u64 tmp;
455 sense_reason_t rc;
457 if (dev->rd_flags & RDF_NULLIO) {
458 target_complete_cmd(cmd, SAM_STAT_GOOD);
459 return 0;
462 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
463 rd_offset = do_div(tmp, PAGE_SIZE);
464 rd_page = tmp;
465 rd_size = cmd->data_length;
467 table = rd_get_sg_table(dev, rd_page);
468 if (!table)
469 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
471 rd_sg = &table->sg_table[rd_page - table->page_start_offset];
473 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
474 dev->rd_dev_id,
475 data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
476 cmd->t_task_lba, rd_size, rd_page, rd_offset);
478 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
479 data_direction == DMA_TO_DEVICE) {
480 rc = rd_do_prot_rw(cmd, false);
481 if (rc)
482 return rc;
485 src_len = PAGE_SIZE - rd_offset;
486 sg_miter_start(&m, sgl, sgl_nents,
487 data_direction == DMA_FROM_DEVICE ?
488 SG_MITER_TO_SG : SG_MITER_FROM_SG);
489 while (rd_size) {
490 u32 len;
491 void *rd_addr;
493 sg_miter_next(&m);
494 if (!(u32)m.length) {
495 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
496 dev->rd_dev_id, m.addr, m.length);
497 sg_miter_stop(&m);
498 return TCM_INCORRECT_AMOUNT_OF_DATA;
500 len = min((u32)m.length, src_len);
501 if (len > rd_size) {
502 pr_debug("RD[%u]: size underrun page %d offset %d "
503 "size %d\n", dev->rd_dev_id,
504 rd_page, rd_offset, rd_size);
505 len = rd_size;
507 m.consumed = len;
509 rd_addr = sg_virt(rd_sg) + rd_offset;
511 if (data_direction == DMA_FROM_DEVICE)
512 memcpy(m.addr, rd_addr, len);
513 else
514 memcpy(rd_addr, m.addr, len);
516 rd_size -= len;
517 if (!rd_size)
518 continue;
520 src_len -= len;
521 if (src_len) {
522 rd_offset += len;
523 continue;
526 /* rd page completed, next one please */
527 rd_page++;
528 rd_offset = 0;
529 src_len = PAGE_SIZE;
530 if (rd_page <= table->page_end_offset) {
531 rd_sg++;
532 continue;
535 table = rd_get_sg_table(dev, rd_page);
536 if (!table) {
537 sg_miter_stop(&m);
538 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
541 /* since we increment, the first sg entry is correct */
542 rd_sg = table->sg_table;
544 sg_miter_stop(&m);
546 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
547 data_direction == DMA_FROM_DEVICE) {
548 rc = rd_do_prot_rw(cmd, true);
549 if (rc)
550 return rc;
553 target_complete_cmd(cmd, SAM_STAT_GOOD);
554 return 0;
557 enum {
558 Opt_rd_pages, Opt_rd_nullio, Opt_err
561 static match_table_t tokens = {
562 {Opt_rd_pages, "rd_pages=%d"},
563 {Opt_rd_nullio, "rd_nullio=%d"},
564 {Opt_err, NULL}
567 static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
568 const char *page, ssize_t count)
570 struct rd_dev *rd_dev = RD_DEV(dev);
571 char *orig, *ptr, *opts;
572 substring_t args[MAX_OPT_ARGS];
573 int ret = 0, arg, token;
575 opts = kstrdup(page, GFP_KERNEL);
576 if (!opts)
577 return -ENOMEM;
579 orig = opts;
581 while ((ptr = strsep(&opts, ",\n")) != NULL) {
582 if (!*ptr)
583 continue;
585 token = match_token(ptr, tokens, args);
586 switch (token) {
587 case Opt_rd_pages:
588 match_int(args, &arg);
589 rd_dev->rd_page_count = arg;
590 pr_debug("RAMDISK: Referencing Page"
591 " Count: %u\n", rd_dev->rd_page_count);
592 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
593 break;
594 case Opt_rd_nullio:
595 match_int(args, &arg);
596 if (arg != 1)
597 break;
599 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
600 rd_dev->rd_flags |= RDF_NULLIO;
601 break;
602 default:
603 break;
607 kfree(orig);
608 return (!ret) ? count : ret;
611 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
613 struct rd_dev *rd_dev = RD_DEV(dev);
615 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
616 rd_dev->rd_dev_id);
617 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
618 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
619 PAGE_SIZE, rd_dev->sg_table_count,
620 !!(rd_dev->rd_flags & RDF_NULLIO));
621 return bl;
624 static sector_t rd_get_blocks(struct se_device *dev)
626 struct rd_dev *rd_dev = RD_DEV(dev);
628 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
629 dev->dev_attrib.block_size) - 1;
631 return blocks_long;
634 static int rd_init_prot(struct se_device *dev)
636 struct rd_dev *rd_dev = RD_DEV(dev);
638 if (!dev->dev_attrib.pi_prot_type)
639 return 0;
641 return rd_build_prot_space(rd_dev, dev->prot_length,
642 dev->dev_attrib.block_size);
645 static void rd_free_prot(struct se_device *dev)
647 struct rd_dev *rd_dev = RD_DEV(dev);
649 rd_release_prot_space(rd_dev);
652 static struct sbc_ops rd_sbc_ops = {
653 .execute_rw = rd_execute_rw,
656 static sense_reason_t
657 rd_parse_cdb(struct se_cmd *cmd)
659 return sbc_parse_cdb(cmd, &rd_sbc_ops);
662 static const struct target_backend_ops rd_mcp_ops = {
663 .name = "rd_mcp",
664 .inquiry_prod = "RAMDISK-MCP",
665 .inquiry_rev = RD_MCP_VERSION,
666 .attach_hba = rd_attach_hba,
667 .detach_hba = rd_detach_hba,
668 .alloc_device = rd_alloc_device,
669 .configure_device = rd_configure_device,
670 .free_device = rd_free_device,
671 .parse_cdb = rd_parse_cdb,
672 .set_configfs_dev_params = rd_set_configfs_dev_params,
673 .show_configfs_dev_params = rd_show_configfs_dev_params,
674 .get_device_type = sbc_get_device_type,
675 .get_blocks = rd_get_blocks,
676 .init_prot = rd_init_prot,
677 .free_prot = rd_free_prot,
678 .tb_dev_attrib_attrs = sbc_attrib_attrs,
681 int __init rd_module_init(void)
683 return transport_backend_register(&rd_mcp_ops);
686 void rd_module_exit(void)
688 target_backend_unregister(&rd_mcp_ops);