1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/platform_device.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/genalloc.h>
8 #include <linux/vmalloc.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/list_sort.h>
11 #include <linux/libnvdimm.h>
12 #include <linux/ndctl.h>
14 #include <linux/printk.h>
15 #include <linux/seq_buf.h>
16 #include <linux/papr_scm.h>
17 #include <uapi/linux/papr_pdsm.h>
19 #include "../watermark.h"
20 #include "nfit_test.h"
28 NDTEST_MAX_MAPPING
= 6,
31 #define NDTEST_SCM_DIMM_CMD_MASK \
32 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
33 (1ul << ND_CMD_GET_CONFIG_DATA) | \
34 (1ul << ND_CMD_SET_CONFIG_DATA) | \
37 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
38 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
39 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
41 static DEFINE_SPINLOCK(ndtest_lock
);
42 static struct ndtest_priv
*instances
[NUM_INSTANCES
];
44 static const struct class ndtest_dimm_class
= {
45 .name
= "nfit_test_dimm",
48 static struct gen_pool
*ndtest_pool
;
50 static struct ndtest_dimm dimm_group1
[] = {
53 .handle
= NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
54 .uuid_str
= "1e5c75d2-b618-11ea-9aa3-507b9ddc0f72",
60 .handle
= NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
61 .uuid_str
= "1c4d43ac-b618-11ea-be80-507b9ddc0f72",
67 .handle
= NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
68 .uuid_str
= "a9f17ffc-b618-11ea-b36d-507b9ddc0f72",
74 .handle
= NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
75 .uuid_str
= "b6b83b22-b618-11ea-8aae-507b9ddc0f72",
81 .handle
= NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
82 .uuid_str
= "bf9baaee-b618-11ea-b181-507b9ddc0f72",
88 static struct ndtest_dimm dimm_group2
[] = {
91 .handle
= NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
92 .uuid_str
= "ca0817e2-b618-11ea-9db3-507b9ddc0f72",
95 .flags
= PAPR_PMEM_UNARMED
| PAPR_PMEM_EMPTY
|
96 PAPR_PMEM_SAVE_FAILED
| PAPR_PMEM_SHUTDOWN_DIRTY
|
97 PAPR_PMEM_HEALTH_FATAL
,
101 static struct ndtest_mapping region0_mapping
[] = {
116 static struct ndtest_mapping region1_mapping
[] = {
143 static struct ndtest_region bus0_regions
[] = {
145 .type
= ND_DEVICE_NAMESPACE_PMEM
,
146 .num_mappings
= ARRAY_SIZE(region0_mapping
),
147 .mapping
= region0_mapping
,
152 .type
= ND_DEVICE_NAMESPACE_PMEM
,
153 .num_mappings
= ARRAY_SIZE(region1_mapping
),
154 .mapping
= region1_mapping
,
155 .size
= DIMM_SIZE
* 2,
160 static struct ndtest_mapping region6_mapping
[] = {
169 static struct ndtest_region bus1_regions
[] = {
171 .type
= ND_DEVICE_NAMESPACE_IO
,
172 .num_mappings
= ARRAY_SIZE(region6_mapping
),
173 .mapping
= region6_mapping
,
179 static struct ndtest_config bus_configs
[NUM_INSTANCES
] = {
183 .dimm_count
= ARRAY_SIZE(dimm_group1
),
184 .dimms
= dimm_group1
,
185 .regions
= bus0_regions
,
186 .num_regions
= ARRAY_SIZE(bus0_regions
),
190 .dimm_start
= ARRAY_SIZE(dimm_group1
),
191 .dimm_count
= ARRAY_SIZE(dimm_group2
),
192 .dimms
= dimm_group2
,
193 .regions
= bus1_regions
,
194 .num_regions
= ARRAY_SIZE(bus1_regions
),
198 static inline struct ndtest_priv
*to_ndtest_priv(struct device
*dev
)
200 struct platform_device
*pdev
= to_platform_device(dev
);
202 return container_of(pdev
, struct ndtest_priv
, pdev
);
205 static int ndtest_config_get(struct ndtest_dimm
*p
, unsigned int buf_len
,
206 struct nd_cmd_get_config_data_hdr
*hdr
)
210 if ((hdr
->in_offset
+ hdr
->in_length
) > LABEL_SIZE
)
214 len
= min(hdr
->in_length
, LABEL_SIZE
- hdr
->in_offset
);
215 memcpy(hdr
->out_buf
, p
->label_area
+ hdr
->in_offset
, len
);
217 return buf_len
- len
;
220 static int ndtest_config_set(struct ndtest_dimm
*p
, unsigned int buf_len
,
221 struct nd_cmd_set_config_hdr
*hdr
)
225 if ((hdr
->in_offset
+ hdr
->in_length
) > LABEL_SIZE
)
228 len
= min(hdr
->in_length
, LABEL_SIZE
- hdr
->in_offset
);
229 memcpy(p
->label_area
+ hdr
->in_offset
, hdr
->in_buf
, len
);
231 return buf_len
- len
;
234 static int ndtest_get_config_size(struct ndtest_dimm
*dimm
, unsigned int buf_len
,
235 struct nd_cmd_get_config_size
*size
)
239 size
->config_size
= dimm
->config_size
;
244 static int ndtest_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
245 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
246 unsigned int buf_len
, int *cmd_rc
)
248 struct ndtest_dimm
*dimm
;
259 dimm
= nvdimm_provider_data(nvdimm
);
264 case ND_CMD_GET_CONFIG_SIZE
:
265 *cmd_rc
= ndtest_get_config_size(dimm
, buf_len
, buf
);
267 case ND_CMD_GET_CONFIG_DATA
:
268 *cmd_rc
= ndtest_config_get(dimm
, buf_len
, buf
);
270 case ND_CMD_SET_CONFIG_DATA
:
271 *cmd_rc
= ndtest_config_set(dimm
, buf_len
, buf
);
277 /* Failures for a DIMM can be injected using fail_cmd and
278 * fail_cmd_code, see the device attributes below
280 if ((1 << cmd
) & dimm
->fail_cmd
)
281 return dimm
->fail_cmd_code
? dimm
->fail_cmd_code
: -EIO
;
286 static struct nfit_test_resource
*ndtest_resource_lookup(resource_size_t addr
)
290 for (i
= 0; i
< NUM_INSTANCES
; i
++) {
291 struct nfit_test_resource
*n
, *nfit_res
= NULL
;
292 struct ndtest_priv
*t
= instances
[i
];
296 spin_lock(&ndtest_lock
);
297 list_for_each_entry(n
, &t
->resources
, list
) {
298 if (addr
>= n
->res
.start
&& (addr
< n
->res
.start
299 + resource_size(&n
->res
))) {
302 } else if (addr
>= (unsigned long) n
->buf
303 && (addr
< (unsigned long) n
->buf
304 + resource_size(&n
->res
))) {
309 spin_unlock(&ndtest_lock
);
314 pr_warn("Failed to get resource\n");
319 static void ndtest_release_resource(void *data
)
321 struct nfit_test_resource
*res
= data
;
323 spin_lock(&ndtest_lock
);
324 list_del(&res
->list
);
325 spin_unlock(&ndtest_lock
);
327 if (resource_size(&res
->res
) >= DIMM_SIZE
)
328 gen_pool_free(ndtest_pool
, res
->res
.start
,
329 resource_size(&res
->res
));
334 static void *ndtest_alloc_resource(struct ndtest_priv
*p
, size_t size
,
339 struct nfit_test_resource
*res
;
340 struct genpool_data_align data
= {
344 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
349 if (size
>= DIMM_SIZE
)
350 __dma
= gen_pool_alloc_algo(ndtest_pool
, size
,
351 gen_pool_first_fit_align
, &data
);
353 __dma
= (unsigned long) buf
;
358 INIT_LIST_HEAD(&res
->list
);
359 res
->dev
= &p
->pdev
.dev
;
361 res
->res
.start
= __dma
;
362 res
->res
.end
= __dma
+ size
- 1;
363 res
->res
.name
= "NFIT";
364 spin_lock_init(&res
->lock
);
365 INIT_LIST_HEAD(&res
->requests
);
366 spin_lock(&ndtest_lock
);
367 list_add(&res
->list
, &p
->resources
);
368 spin_unlock(&ndtest_lock
);
373 if (!devm_add_action(&p
->pdev
.dev
, ndtest_release_resource
, res
))
377 if (__dma
&& size
>= DIMM_SIZE
)
378 gen_pool_free(ndtest_pool
, __dma
, size
);
386 static ssize_t
range_index_show(struct device
*dev
,
387 struct device_attribute
*attr
, char *buf
)
389 struct nd_region
*nd_region
= to_nd_region(dev
);
390 struct ndtest_region
*region
= nd_region_provider_data(nd_region
);
392 return sprintf(buf
, "%d\n", region
->range_index
);
394 static DEVICE_ATTR_RO(range_index
);
396 static struct attribute
*ndtest_region_attributes
[] = {
397 &dev_attr_range_index
.attr
,
401 static const struct attribute_group ndtest_region_attribute_group
= {
403 .attrs
= ndtest_region_attributes
,
406 static const struct attribute_group
*ndtest_region_attribute_groups
[] = {
407 &ndtest_region_attribute_group
,
411 static int ndtest_create_region(struct ndtest_priv
*p
,
412 struct ndtest_region
*region
)
414 struct nd_mapping_desc mappings
[NDTEST_MAX_MAPPING
];
415 struct nd_region_desc
*ndr_desc
, _ndr_desc
;
416 struct nd_interleave_set
*nd_set
;
418 int i
, ndimm
= region
->mapping
[0].dimm
;
421 memset(&res
, 0, sizeof(res
));
422 memset(&mappings
, 0, sizeof(mappings
));
423 memset(&_ndr_desc
, 0, sizeof(_ndr_desc
));
424 ndr_desc
= &_ndr_desc
;
426 if (!ndtest_alloc_resource(p
, region
->size
, &res
.start
))
429 res
.end
= res
.start
+ region
->size
- 1;
430 ndr_desc
->mapping
= mappings
;
431 ndr_desc
->res
= &res
;
432 ndr_desc
->provider_data
= region
;
433 ndr_desc
->attr_groups
= ndtest_region_attribute_groups
;
435 if (uuid_parse(p
->config
->dimms
[ndimm
].uuid_str
, (uuid_t
*)uuid
)) {
436 pr_err("failed to parse UUID\n");
440 nd_set
= devm_kzalloc(&p
->pdev
.dev
, sizeof(*nd_set
), GFP_KERNEL
);
444 nd_set
->cookie1
= cpu_to_le64(uuid
[0]);
445 nd_set
->cookie2
= cpu_to_le64(uuid
[1]);
446 nd_set
->altcookie
= nd_set
->cookie1
;
447 ndr_desc
->nd_set
= nd_set
;
449 for (i
= 0; i
< region
->num_mappings
; i
++) {
450 ndimm
= region
->mapping
[i
].dimm
;
451 mappings
[i
].start
= region
->mapping
[i
].start
;
452 mappings
[i
].size
= region
->mapping
[i
].size
;
453 mappings
[i
].position
= region
->mapping
[i
].position
;
454 mappings
[i
].nvdimm
= p
->config
->dimms
[ndimm
].nvdimm
;
457 ndr_desc
->num_mappings
= region
->num_mappings
;
458 region
->region
= nvdimm_pmem_region_create(p
->bus
, ndr_desc
);
460 if (!region
->region
) {
461 dev_err(&p
->pdev
.dev
, "Error registering region %pR\n",
469 static int ndtest_init_regions(struct ndtest_priv
*p
)
473 for (i
= 0; i
< p
->config
->num_regions
; i
++) {
474 ret
= ndtest_create_region(p
, &p
->config
->regions
[i
]);
482 static void put_dimms(void *data
)
484 struct ndtest_priv
*p
= data
;
487 for (i
= 0; i
< p
->config
->dimm_count
; i
++)
488 if (p
->config
->dimms
[i
].dev
) {
489 device_unregister(p
->config
->dimms
[i
].dev
);
490 p
->config
->dimms
[i
].dev
= NULL
;
494 static ssize_t
handle_show(struct device
*dev
, struct device_attribute
*attr
,
497 struct ndtest_dimm
*dimm
= dev_get_drvdata(dev
);
499 return sprintf(buf
, "%#x\n", dimm
->handle
);
501 static DEVICE_ATTR_RO(handle
);
503 static ssize_t
fail_cmd_show(struct device
*dev
, struct device_attribute
*attr
,
506 struct ndtest_dimm
*dimm
= dev_get_drvdata(dev
);
508 return sprintf(buf
, "%#x\n", dimm
->fail_cmd
);
511 static ssize_t
fail_cmd_store(struct device
*dev
, struct device_attribute
*attr
,
512 const char *buf
, size_t size
)
514 struct ndtest_dimm
*dimm
= dev_get_drvdata(dev
);
518 rc
= kstrtol(buf
, 0, &val
);
522 dimm
->fail_cmd
= val
;
526 static DEVICE_ATTR_RW(fail_cmd
);
528 static ssize_t
fail_cmd_code_show(struct device
*dev
, struct device_attribute
*attr
,
531 struct ndtest_dimm
*dimm
= dev_get_drvdata(dev
);
533 return sprintf(buf
, "%d\n", dimm
->fail_cmd_code
);
536 static ssize_t
fail_cmd_code_store(struct device
*dev
, struct device_attribute
*attr
,
537 const char *buf
, size_t size
)
539 struct ndtest_dimm
*dimm
= dev_get_drvdata(dev
);
543 rc
= kstrtol(buf
, 0, &val
);
547 dimm
->fail_cmd_code
= val
;
550 static DEVICE_ATTR_RW(fail_cmd_code
);
552 static struct attribute
*dimm_attributes
[] = {
553 &dev_attr_handle
.attr
,
554 &dev_attr_fail_cmd
.attr
,
555 &dev_attr_fail_cmd_code
.attr
,
559 static struct attribute_group dimm_attribute_group
= {
560 .attrs
= dimm_attributes
,
563 static const struct attribute_group
*dimm_attribute_groups
[] = {
564 &dimm_attribute_group
,
568 static ssize_t
phys_id_show(struct device
*dev
,
569 struct device_attribute
*attr
, char *buf
)
571 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
572 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
574 return sprintf(buf
, "%#x\n", dimm
->physical_id
);
576 static DEVICE_ATTR_RO(phys_id
);
578 static ssize_t
vendor_show(struct device
*dev
,
579 struct device_attribute
*attr
, char *buf
)
581 return sprintf(buf
, "0x1234567\n");
583 static DEVICE_ATTR_RO(vendor
);
585 static ssize_t
id_show(struct device
*dev
,
586 struct device_attribute
*attr
, char *buf
)
588 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
589 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
591 return sprintf(buf
, "%04x-%02x-%04x-%08x", 0xabcd,
592 0xa, 2016, ~(dimm
->handle
));
594 static DEVICE_ATTR_RO(id
);
596 static ssize_t
nvdimm_handle_show(struct device
*dev
,
597 struct device_attribute
*attr
, char *buf
)
599 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
600 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
602 return sprintf(buf
, "%#x\n", dimm
->handle
);
605 static struct device_attribute dev_attr_nvdimm_show_handle
= {
606 .attr
= { .name
= "handle", .mode
= 0444 },
607 .show
= nvdimm_handle_show
,
610 static ssize_t
subsystem_vendor_show(struct device
*dev
,
611 struct device_attribute
*attr
, char *buf
)
613 return sprintf(buf
, "0x%04x\n", 0);
615 static DEVICE_ATTR_RO(subsystem_vendor
);
617 static ssize_t
dirty_shutdown_show(struct device
*dev
,
618 struct device_attribute
*attr
, char *buf
)
620 return sprintf(buf
, "%d\n", 42);
622 static DEVICE_ATTR_RO(dirty_shutdown
);
624 static ssize_t
formats_show(struct device
*dev
,
625 struct device_attribute
*attr
, char *buf
)
627 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
628 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
630 return sprintf(buf
, "%d\n", dimm
->num_formats
);
632 static DEVICE_ATTR_RO(formats
);
634 static ssize_t
format_show(struct device
*dev
,
635 struct device_attribute
*attr
, char *buf
)
637 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
638 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
640 if (dimm
->num_formats
> 1)
641 return sprintf(buf
, "0x201\n");
643 return sprintf(buf
, "0x101\n");
645 static DEVICE_ATTR_RO(format
);
647 static ssize_t
format1_show(struct device
*dev
, struct device_attribute
*attr
,
650 return sprintf(buf
, "0x301\n");
652 static DEVICE_ATTR_RO(format1
);
654 static umode_t
ndtest_nvdimm_attr_visible(struct kobject
*kobj
,
655 struct attribute
*a
, int n
)
657 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
658 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
659 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
661 if (a
== &dev_attr_format1
.attr
&& dimm
->num_formats
<= 1)
667 static ssize_t
flags_show(struct device
*dev
,
668 struct device_attribute
*attr
, char *buf
)
670 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
671 struct ndtest_dimm
*dimm
= nvdimm_provider_data(nvdimm
);
677 seq_buf_init(&s
, buf
, PAGE_SIZE
);
678 if (flags
& PAPR_PMEM_UNARMED_MASK
)
679 seq_buf_printf(&s
, "not_armed ");
681 if (flags
& PAPR_PMEM_BAD_SHUTDOWN_MASK
)
682 seq_buf_printf(&s
, "flush_fail ");
684 if (flags
& PAPR_PMEM_BAD_RESTORE_MASK
)
685 seq_buf_printf(&s
, "restore_fail ");
687 if (flags
& PAPR_PMEM_SAVE_MASK
)
688 seq_buf_printf(&s
, "save_fail ");
690 if (flags
& PAPR_PMEM_SMART_EVENT_MASK
)
691 seq_buf_printf(&s
, "smart_notify ");
694 if (seq_buf_used(&s
))
695 seq_buf_printf(&s
, "\n");
697 return seq_buf_used(&s
);
699 static DEVICE_ATTR_RO(flags
);
701 static struct attribute
*ndtest_nvdimm_attributes
[] = {
702 &dev_attr_nvdimm_show_handle
.attr
,
703 &dev_attr_vendor
.attr
,
705 &dev_attr_phys_id
.attr
,
706 &dev_attr_subsystem_vendor
.attr
,
707 &dev_attr_dirty_shutdown
.attr
,
708 &dev_attr_formats
.attr
,
709 &dev_attr_format
.attr
,
710 &dev_attr_format1
.attr
,
711 &dev_attr_flags
.attr
,
715 static const struct attribute_group ndtest_nvdimm_attribute_group
= {
717 .attrs
= ndtest_nvdimm_attributes
,
718 .is_visible
= ndtest_nvdimm_attr_visible
,
721 static const struct attribute_group
*ndtest_nvdimm_attribute_groups
[] = {
722 &ndtest_nvdimm_attribute_group
,
726 static int ndtest_dimm_register(struct ndtest_priv
*priv
,
727 struct ndtest_dimm
*dimm
, int id
)
729 struct device
*dev
= &priv
->pdev
.dev
;
730 unsigned long dimm_flags
= dimm
->flags
;
732 if (dimm
->num_formats
> 1)
733 set_bit(NDD_LABELING
, &dimm_flags
);
735 if (dimm
->flags
& PAPR_PMEM_UNARMED_MASK
)
736 set_bit(NDD_UNARMED
, &dimm_flags
);
738 dimm
->nvdimm
= nvdimm_create(priv
->bus
, dimm
,
739 ndtest_nvdimm_attribute_groups
, dimm_flags
,
740 NDTEST_SCM_DIMM_CMD_MASK
, 0, NULL
);
742 dev_err(dev
, "Error creating DIMM object for %pOF\n", priv
->dn
);
746 dimm
->dev
= device_create_with_groups(&ndtest_dimm_class
,
748 0, dimm
, dimm_attribute_groups
,
751 pr_err("Could not create dimm device attributes\n");
758 static int ndtest_nvdimm_init(struct ndtest_priv
*p
)
760 struct ndtest_dimm
*d
;
764 for (i
= 0; i
< p
->config
->dimm_count
; i
++) {
765 d
= &p
->config
->dimms
[i
];
766 d
->id
= id
= p
->config
->dimm_start
+ i
;
767 res
= ndtest_alloc_resource(p
, LABEL_SIZE
, NULL
);
772 sprintf(d
->label_area
, "label%d", id
);
773 d
->config_size
= LABEL_SIZE
;
775 if (!ndtest_alloc_resource(p
, d
->size
,
779 if (!ndtest_alloc_resource(p
, LABEL_SIZE
,
783 if (!ndtest_alloc_resource(p
, LABEL_SIZE
,
787 d
->address
= p
->dimm_dma
[id
];
789 ndtest_dimm_register(p
, d
, id
);
795 static ssize_t
compatible_show(struct device
*dev
,
796 struct device_attribute
*attr
, char *buf
)
798 return sprintf(buf
, "nvdimm_test");
800 static DEVICE_ATTR_RO(compatible
);
802 static struct attribute
*of_node_attributes
[] = {
803 &dev_attr_compatible
.attr
,
807 static const struct attribute_group of_node_attribute_group
= {
809 .attrs
= of_node_attributes
,
812 static const struct attribute_group
*ndtest_attribute_groups
[] = {
813 &of_node_attribute_group
,
817 static int ndtest_bus_register(struct ndtest_priv
*p
)
819 p
->config
= &bus_configs
[p
->pdev
.id
];
821 p
->bus_desc
.ndctl
= ndtest_ctl
;
822 p
->bus_desc
.module
= THIS_MODULE
;
823 p
->bus_desc
.provider_name
= NULL
;
824 p
->bus_desc
.attr_groups
= ndtest_attribute_groups
;
826 p
->bus
= nvdimm_bus_register(&p
->pdev
.dev
, &p
->bus_desc
);
828 dev_err(&p
->pdev
.dev
, "Error creating nvdimm bus %pOF\n", p
->dn
);
835 static void ndtest_remove(struct platform_device
*pdev
)
837 struct ndtest_priv
*p
= to_ndtest_priv(&pdev
->dev
);
839 nvdimm_bus_unregister(p
->bus
);
842 static int ndtest_probe(struct platform_device
*pdev
)
844 struct ndtest_priv
*p
;
847 p
= to_ndtest_priv(&pdev
->dev
);
848 if (ndtest_bus_register(p
))
851 p
->dcr_dma
= devm_kcalloc(&p
->pdev
.dev
, NUM_DCR
,
852 sizeof(dma_addr_t
), GFP_KERNEL
);
853 p
->label_dma
= devm_kcalloc(&p
->pdev
.dev
, NUM_DCR
,
854 sizeof(dma_addr_t
), GFP_KERNEL
);
855 p
->dimm_dma
= devm_kcalloc(&p
->pdev
.dev
, NUM_DCR
,
856 sizeof(dma_addr_t
), GFP_KERNEL
);
858 rc
= ndtest_nvdimm_init(p
);
862 rc
= ndtest_init_regions(p
);
866 rc
= devm_add_action_or_reset(&pdev
->dev
, put_dimms
, p
);
870 platform_set_drvdata(pdev
, p
);
875 pr_err("%s:%d Failed nvdimm init\n", __func__
, __LINE__
);
879 static const struct platform_device_id ndtest_id
[] = {
884 static struct platform_driver ndtest_driver
= {
885 .probe
= ndtest_probe
,
886 .remove
= ndtest_remove
,
888 .name
= KBUILD_MODNAME
,
890 .id_table
= ndtest_id
,
893 static void ndtest_release(struct device
*dev
)
895 struct ndtest_priv
*p
= to_ndtest_priv(dev
);
900 static void cleanup_devices(void)
904 for (i
= 0; i
< NUM_INSTANCES
; i
++)
906 platform_device_unregister(&instances
[i
]->pdev
);
908 nfit_test_teardown();
911 gen_pool_destroy(ndtest_pool
);
914 class_unregister(&ndtest_dimm_class
);
917 static __init
int ndtest_init(void)
926 nfit_test_setup(ndtest_resource_lookup
, NULL
);
928 rc
= class_register(&ndtest_dimm_class
);
932 ndtest_pool
= gen_pool_create(ilog2(SZ_4M
), NUMA_NO_NODE
);
938 if (gen_pool_add(ndtest_pool
, SZ_4G
, SZ_4G
, NUMA_NO_NODE
)) {
943 /* Each instance can be taken as a bus, which can have multiple dimms */
944 for (i
= 0; i
< NUM_INSTANCES
; i
++) {
945 struct ndtest_priv
*priv
;
946 struct platform_device
*pdev
;
948 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
954 INIT_LIST_HEAD(&priv
->resources
);
956 pdev
->name
= KBUILD_MODNAME
;
958 pdev
->dev
.release
= ndtest_release
;
959 rc
= platform_device_register(pdev
);
961 put_device(&pdev
->dev
);
964 get_device(&pdev
->dev
);
969 rc
= platform_driver_register(&ndtest_driver
);
976 pr_err("Error registering platform device\n");
982 static __exit
void ndtest_exit(void)
985 platform_driver_unregister(&ndtest_driver
);
988 module_init(ndtest_init
);
989 module_exit(ndtest_exit
);
990 MODULE_DESCRIPTION("Test non-NFIT devices");
991 MODULE_LICENSE("GPL");
992 MODULE_AUTHOR("IBM Corporation");