2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/sort.h>
23 static DEFINE_IDA(region_ida
);
25 static void nd_region_release(struct device
*dev
)
27 struct nd_region
*nd_region
= to_nd_region(dev
);
30 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
31 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
32 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
34 put_device(&nvdimm
->dev
);
36 free_percpu(nd_region
->lane
);
37 ida_simple_remove(®ion_ida
, nd_region
->id
);
39 kfree(to_nd_blk_region(dev
));
44 static struct device_type nd_blk_device_type
= {
46 .release
= nd_region_release
,
49 static struct device_type nd_pmem_device_type
= {
51 .release
= nd_region_release
,
54 static struct device_type nd_volatile_device_type
= {
55 .name
= "nd_volatile",
56 .release
= nd_region_release
,
59 bool is_nd_pmem(struct device
*dev
)
61 return dev
? dev
->type
== &nd_pmem_device_type
: false;
64 bool is_nd_blk(struct device
*dev
)
66 return dev
? dev
->type
== &nd_blk_device_type
: false;
69 struct nd_region
*to_nd_region(struct device
*dev
)
71 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
73 WARN_ON(dev
->type
->release
!= nd_region_release
);
76 EXPORT_SYMBOL_GPL(to_nd_region
);
78 struct nd_blk_region
*to_nd_blk_region(struct device
*dev
)
80 struct nd_region
*nd_region
= to_nd_region(dev
);
82 WARN_ON(!is_nd_blk(dev
));
83 return container_of(nd_region
, struct nd_blk_region
, nd_region
);
85 EXPORT_SYMBOL_GPL(to_nd_blk_region
);
87 void *nd_region_provider_data(struct nd_region
*nd_region
)
89 return nd_region
->provider_data
;
91 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
93 void *nd_blk_region_provider_data(struct nd_blk_region
*ndbr
)
95 return ndbr
->blk_provider_data
;
97 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data
);
99 void nd_blk_region_set_provider_data(struct nd_blk_region
*ndbr
, void *data
)
101 ndbr
->blk_provider_data
= data
;
103 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data
);
106 * nd_region_to_nstype() - region to an integer namespace type
107 * @nd_region: region-device to interrogate
109 * This is the 'nstype' attribute of a region as well, an input to the
110 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
111 * namespace devices with namespace drivers.
113 int nd_region_to_nstype(struct nd_region
*nd_region
)
115 if (is_nd_pmem(&nd_region
->dev
)) {
118 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
119 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
120 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
122 if (nvdimm
->flags
& NDD_ALIASING
)
126 return ND_DEVICE_NAMESPACE_PMEM
;
128 return ND_DEVICE_NAMESPACE_IO
;
129 } else if (is_nd_blk(&nd_region
->dev
)) {
130 return ND_DEVICE_NAMESPACE_BLK
;
135 EXPORT_SYMBOL(nd_region_to_nstype
);
137 static int is_uuid_busy(struct device
*dev
, void *data
)
139 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
142 switch (nd_region_to_nstype(nd_region
)) {
143 case ND_DEVICE_NAMESPACE_PMEM
: {
144 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
148 if (memcmp(uuid
, nspm
->uuid
, NSLABEL_UUID_LEN
) == 0)
152 case ND_DEVICE_NAMESPACE_BLK
: {
153 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
157 if (memcmp(uuid
, nsblk
->uuid
, NSLABEL_UUID_LEN
) == 0)
168 static int is_namespace_uuid_busy(struct device
*dev
, void *data
)
170 if (is_nd_pmem(dev
) || is_nd_blk(dev
))
171 return device_for_each_child(dev
, data
, is_uuid_busy
);
176 * nd_is_uuid_unique - verify that no other namespace has @uuid
177 * @dev: any device on a nvdimm_bus
178 * @uuid: uuid to check
180 bool nd_is_uuid_unique(struct device
*dev
, u8
*uuid
)
182 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
186 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus
->dev
));
187 if (device_for_each_child(&nvdimm_bus
->dev
, uuid
,
188 is_namespace_uuid_busy
) != 0)
193 static ssize_t
size_show(struct device
*dev
,
194 struct device_attribute
*attr
, char *buf
)
196 struct nd_region
*nd_region
= to_nd_region(dev
);
197 unsigned long long size
= 0;
199 if (is_nd_pmem(dev
)) {
200 size
= nd_region
->ndr_size
;
201 } else if (nd_region
->ndr_mappings
== 1) {
202 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
204 size
= nd_mapping
->size
;
207 return sprintf(buf
, "%llu\n", size
);
209 static DEVICE_ATTR_RO(size
);
211 static ssize_t
mappings_show(struct device
*dev
,
212 struct device_attribute
*attr
, char *buf
)
214 struct nd_region
*nd_region
= to_nd_region(dev
);
216 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
218 static DEVICE_ATTR_RO(mappings
);
220 static ssize_t
nstype_show(struct device
*dev
,
221 struct device_attribute
*attr
, char *buf
)
223 struct nd_region
*nd_region
= to_nd_region(dev
);
225 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
227 static DEVICE_ATTR_RO(nstype
);
229 static ssize_t
set_cookie_show(struct device
*dev
,
230 struct device_attribute
*attr
, char *buf
)
232 struct nd_region
*nd_region
= to_nd_region(dev
);
233 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
235 if (is_nd_pmem(dev
) && nd_set
)
236 /* pass, should be precluded by region_visible */;
240 return sprintf(buf
, "%#llx\n", nd_set
->cookie
);
242 static DEVICE_ATTR_RO(set_cookie
);
244 resource_size_t
nd_region_available_dpa(struct nd_region
*nd_region
)
246 resource_size_t blk_max_overlap
= 0, available
, overlap
;
249 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
253 overlap
= blk_max_overlap
;
254 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
255 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
256 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
258 /* if a dimm is disabled the available capacity is zero */
262 if (is_nd_pmem(&nd_region
->dev
)) {
263 available
+= nd_pmem_available_dpa(nd_region
,
264 nd_mapping
, &overlap
);
265 if (overlap
> blk_max_overlap
) {
266 blk_max_overlap
= overlap
;
269 } else if (is_nd_blk(&nd_region
->dev
)) {
270 available
+= nd_blk_available_dpa(nd_mapping
);
277 static ssize_t
available_size_show(struct device
*dev
,
278 struct device_attribute
*attr
, char *buf
)
280 struct nd_region
*nd_region
= to_nd_region(dev
);
281 unsigned long long available
= 0;
284 * Flush in-flight updates and grab a snapshot of the available
285 * size. Of course, this value is potentially invalidated the
286 * memory nvdimm_bus_lock() is dropped, but that's userspace's
287 * problem to not race itself.
289 nvdimm_bus_lock(dev
);
290 wait_nvdimm_bus_probe_idle(dev
);
291 available
= nd_region_available_dpa(nd_region
);
292 nvdimm_bus_unlock(dev
);
294 return sprintf(buf
, "%llu\n", available
);
296 static DEVICE_ATTR_RO(available_size
);
298 static ssize_t
init_namespaces_show(struct device
*dev
,
299 struct device_attribute
*attr
, char *buf
)
301 struct nd_region_namespaces
*num_ns
= dev_get_drvdata(dev
);
304 nvdimm_bus_lock(dev
);
306 rc
= sprintf(buf
, "%d/%d\n", num_ns
->active
, num_ns
->count
);
309 nvdimm_bus_unlock(dev
);
313 static DEVICE_ATTR_RO(init_namespaces
);
315 static ssize_t
namespace_seed_show(struct device
*dev
,
316 struct device_attribute
*attr
, char *buf
)
318 struct nd_region
*nd_region
= to_nd_region(dev
);
321 nvdimm_bus_lock(dev
);
322 if (nd_region
->ns_seed
)
323 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->ns_seed
));
325 rc
= sprintf(buf
, "\n");
326 nvdimm_bus_unlock(dev
);
329 static DEVICE_ATTR_RO(namespace_seed
);
331 static ssize_t
btt_seed_show(struct device
*dev
,
332 struct device_attribute
*attr
, char *buf
)
334 struct nd_region
*nd_region
= to_nd_region(dev
);
337 nvdimm_bus_lock(dev
);
338 if (nd_region
->btt_seed
)
339 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->btt_seed
));
341 rc
= sprintf(buf
, "\n");
342 nvdimm_bus_unlock(dev
);
346 static DEVICE_ATTR_RO(btt_seed
);
348 static ssize_t
pfn_seed_show(struct device
*dev
,
349 struct device_attribute
*attr
, char *buf
)
351 struct nd_region
*nd_region
= to_nd_region(dev
);
354 nvdimm_bus_lock(dev
);
355 if (nd_region
->pfn_seed
)
356 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->pfn_seed
));
358 rc
= sprintf(buf
, "\n");
359 nvdimm_bus_unlock(dev
);
363 static DEVICE_ATTR_RO(pfn_seed
);
365 static ssize_t
read_only_show(struct device
*dev
,
366 struct device_attribute
*attr
, char *buf
)
368 struct nd_region
*nd_region
= to_nd_region(dev
);
370 return sprintf(buf
, "%d\n", nd_region
->ro
);
373 static ssize_t
read_only_store(struct device
*dev
,
374 struct device_attribute
*attr
, const char *buf
, size_t len
)
377 int rc
= strtobool(buf
, &ro
);
378 struct nd_region
*nd_region
= to_nd_region(dev
);
386 static DEVICE_ATTR_RW(read_only
);
388 static struct attribute
*nd_region_attributes
[] = {
390 &dev_attr_nstype
.attr
,
391 &dev_attr_mappings
.attr
,
392 &dev_attr_btt_seed
.attr
,
393 &dev_attr_pfn_seed
.attr
,
394 &dev_attr_read_only
.attr
,
395 &dev_attr_set_cookie
.attr
,
396 &dev_attr_available_size
.attr
,
397 &dev_attr_namespace_seed
.attr
,
398 &dev_attr_init_namespaces
.attr
,
402 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
404 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
405 struct nd_region
*nd_region
= to_nd_region(dev
);
406 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
407 int type
= nd_region_to_nstype(nd_region
);
409 if (a
!= &dev_attr_set_cookie
.attr
410 && a
!= &dev_attr_available_size
.attr
)
413 if ((type
== ND_DEVICE_NAMESPACE_PMEM
414 || type
== ND_DEVICE_NAMESPACE_BLK
)
415 && a
== &dev_attr_available_size
.attr
)
417 else if (is_nd_pmem(dev
) && nd_set
)
423 struct attribute_group nd_region_attribute_group
= {
424 .attrs
= nd_region_attributes
,
425 .is_visible
= region_visible
,
427 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
429 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
)
431 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
434 return nd_set
->cookie
;
439 * Upon successful probe/remove, take/release a reference on the
440 * associated interleave set (if present), and plant new btt + namespace
441 * seeds. Also, on the removal of a BLK region, notify the provider to
442 * disable the region.
444 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
445 struct device
*dev
, bool probe
)
447 struct nd_region
*nd_region
;
449 if (!probe
&& (is_nd_pmem(dev
) || is_nd_blk(dev
))) {
452 nd_region
= to_nd_region(dev
);
453 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
454 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
455 struct nvdimm_drvdata
*ndd
= nd_mapping
->ndd
;
456 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
458 kfree(nd_mapping
->labels
);
459 nd_mapping
->labels
= NULL
;
461 nd_mapping
->ndd
= NULL
;
463 atomic_dec(&nvdimm
->busy
);
469 to_nd_blk_region(dev
)->disable(nvdimm_bus
, dev
);
471 if (dev
->parent
&& is_nd_blk(dev
->parent
) && probe
) {
472 nd_region
= to_nd_region(dev
->parent
);
473 nvdimm_bus_lock(dev
);
474 if (nd_region
->ns_seed
== dev
)
475 nd_region_create_blk_seed(nd_region
);
476 nvdimm_bus_unlock(dev
);
478 if (is_nd_btt(dev
) && probe
) {
479 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
481 nd_region
= to_nd_region(dev
->parent
);
482 nvdimm_bus_lock(dev
);
483 if (nd_region
->btt_seed
== dev
)
484 nd_region_create_btt_seed(nd_region
);
485 if (nd_region
->ns_seed
== &nd_btt
->ndns
->dev
&&
486 is_nd_blk(dev
->parent
))
487 nd_region_create_blk_seed(nd_region
);
488 nvdimm_bus_unlock(dev
);
492 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
494 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
497 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
499 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
502 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
504 struct nd_region
*nd_region
= to_nd_region(dev
);
505 struct nd_mapping
*nd_mapping
;
506 struct nvdimm
*nvdimm
;
508 if (n
>= nd_region
->ndr_mappings
)
510 nd_mapping
= &nd_region
->mapping
[n
];
511 nvdimm
= nd_mapping
->nvdimm
;
513 return sprintf(buf
, "%s,%llu,%llu\n", dev_name(&nvdimm
->dev
),
514 nd_mapping
->start
, nd_mapping
->size
);
517 #define REGION_MAPPING(idx) \
518 static ssize_t mapping##idx##_show(struct device *dev, \
519 struct device_attribute *attr, char *buf) \
521 return mappingN(dev, buf, idx); \
523 static DEVICE_ATTR_RO(mapping##idx)
526 * 32 should be enough for a while, even in the presence of socket
527 * interleave a 32-way interleave set is a degenerate case.
562 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
564 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
565 struct nd_region
*nd_region
= to_nd_region(dev
);
567 if (n
< nd_region
->ndr_mappings
)
572 static struct attribute
*mapping_attributes
[] = {
573 &dev_attr_mapping0
.attr
,
574 &dev_attr_mapping1
.attr
,
575 &dev_attr_mapping2
.attr
,
576 &dev_attr_mapping3
.attr
,
577 &dev_attr_mapping4
.attr
,
578 &dev_attr_mapping5
.attr
,
579 &dev_attr_mapping6
.attr
,
580 &dev_attr_mapping7
.attr
,
581 &dev_attr_mapping8
.attr
,
582 &dev_attr_mapping9
.attr
,
583 &dev_attr_mapping10
.attr
,
584 &dev_attr_mapping11
.attr
,
585 &dev_attr_mapping12
.attr
,
586 &dev_attr_mapping13
.attr
,
587 &dev_attr_mapping14
.attr
,
588 &dev_attr_mapping15
.attr
,
589 &dev_attr_mapping16
.attr
,
590 &dev_attr_mapping17
.attr
,
591 &dev_attr_mapping18
.attr
,
592 &dev_attr_mapping19
.attr
,
593 &dev_attr_mapping20
.attr
,
594 &dev_attr_mapping21
.attr
,
595 &dev_attr_mapping22
.attr
,
596 &dev_attr_mapping23
.attr
,
597 &dev_attr_mapping24
.attr
,
598 &dev_attr_mapping25
.attr
,
599 &dev_attr_mapping26
.attr
,
600 &dev_attr_mapping27
.attr
,
601 &dev_attr_mapping28
.attr
,
602 &dev_attr_mapping29
.attr
,
603 &dev_attr_mapping30
.attr
,
604 &dev_attr_mapping31
.attr
,
608 struct attribute_group nd_mapping_attribute_group
= {
609 .is_visible
= mapping_visible
,
610 .attrs
= mapping_attributes
,
612 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
614 int nd_blk_region_init(struct nd_region
*nd_region
)
616 struct device
*dev
= &nd_region
->dev
;
617 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
622 if (nd_region
->ndr_mappings
< 1) {
623 dev_err(dev
, "invalid BLK region\n");
627 return to_nd_blk_region(dev
)->enable(nvdimm_bus
, dev
);
631 * nd_region_acquire_lane - allocate and lock a lane
632 * @nd_region: region id and number of lanes possible
634 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
635 * We optimize for the common case where there are 256 lanes, one
636 * per-cpu. For larger systems we need to lock to share lanes. For now
637 * this implementation assumes the cost of maintaining an allocator for
638 * free lanes is on the order of the lock hold time, so it implements a
639 * static lane = cpu % num_lanes mapping.
641 * In the case of a BTT instance on top of a BLK namespace a lane may be
642 * acquired recursively. We lock on the first instance.
644 * In the case of a BTT instance on top of PMEM, we only acquire a lane
645 * for the BTT metadata updates.
647 unsigned int nd_region_acquire_lane(struct nd_region
*nd_region
)
649 unsigned int cpu
, lane
;
652 if (nd_region
->num_lanes
< nr_cpu_ids
) {
653 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
655 lane
= cpu
% nd_region
->num_lanes
;
656 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
657 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
658 if (ndl_count
->count
++ == 0)
659 spin_lock(&ndl_lock
->lock
);
665 EXPORT_SYMBOL(nd_region_acquire_lane
);
667 void nd_region_release_lane(struct nd_region
*nd_region
, unsigned int lane
)
669 if (nd_region
->num_lanes
< nr_cpu_ids
) {
670 unsigned int cpu
= get_cpu();
671 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
673 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
674 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
675 if (--ndl_count
->count
== 0)
676 spin_unlock(&ndl_lock
->lock
);
681 EXPORT_SYMBOL(nd_region_release_lane
);
683 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
684 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
687 struct nd_region
*nd_region
;
693 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
694 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
695 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
697 if ((nd_mapping
->start
| nd_mapping
->size
) % SZ_4K
) {
698 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
699 caller
, dev_name(&nvdimm
->dev
), i
);
704 if (nvdimm
->flags
& NDD_UNARMED
)
708 if (dev_type
== &nd_blk_device_type
) {
709 struct nd_blk_region_desc
*ndbr_desc
;
710 struct nd_blk_region
*ndbr
;
712 ndbr_desc
= to_blk_region_desc(ndr_desc
);
713 ndbr
= kzalloc(sizeof(*ndbr
) + sizeof(struct nd_mapping
)
714 * ndr_desc
->num_mappings
,
717 nd_region
= &ndbr
->nd_region
;
718 ndbr
->enable
= ndbr_desc
->enable
;
719 ndbr
->disable
= ndbr_desc
->disable
;
720 ndbr
->do_io
= ndbr_desc
->do_io
;
724 nd_region
= kzalloc(sizeof(struct nd_region
)
725 + sizeof(struct nd_mapping
)
726 * ndr_desc
->num_mappings
,
728 region_buf
= nd_region
;
733 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
734 if (nd_region
->id
< 0)
737 nd_region
->lane
= alloc_percpu(struct nd_percpu_lane
);
738 if (!nd_region
->lane
)
741 for (i
= 0; i
< nr_cpu_ids
; i
++) {
742 struct nd_percpu_lane
*ndl
;
744 ndl
= per_cpu_ptr(nd_region
->lane
, i
);
745 spin_lock_init(&ndl
->lock
);
749 memcpy(nd_region
->mapping
, ndr_desc
->nd_mapping
,
750 sizeof(struct nd_mapping
) * ndr_desc
->num_mappings
);
751 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
752 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
753 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
755 get_device(&nvdimm
->dev
);
757 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
758 nd_region
->provider_data
= ndr_desc
->provider_data
;
759 nd_region
->nd_set
= ndr_desc
->nd_set
;
760 nd_region
->num_lanes
= ndr_desc
->num_lanes
;
761 nd_region
->flags
= ndr_desc
->flags
;
763 nd_region
->numa_node
= ndr_desc
->numa_node
;
764 ida_init(&nd_region
->ns_ida
);
765 ida_init(&nd_region
->btt_ida
);
766 ida_init(&nd_region
->pfn_ida
);
767 dev
= &nd_region
->dev
;
768 dev_set_name(dev
, "region%d", nd_region
->id
);
769 dev
->parent
= &nvdimm_bus
->dev
;
770 dev
->type
= dev_type
;
771 dev
->groups
= ndr_desc
->attr_groups
;
772 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
773 nd_region
->ndr_start
= ndr_desc
->res
->start
;
774 nd_device_register(dev
);
779 ida_simple_remove(®ion_ida
, nd_region
->id
);
785 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
786 struct nd_region_desc
*ndr_desc
)
788 ndr_desc
->num_lanes
= ND_MAX_LANES
;
789 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
792 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
794 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
795 struct nd_region_desc
*ndr_desc
)
797 if (ndr_desc
->num_mappings
> 1)
799 ndr_desc
->num_lanes
= min(ndr_desc
->num_lanes
, ND_MAX_LANES
);
800 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
803 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
805 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
806 struct nd_region_desc
*ndr_desc
)
808 ndr_desc
->num_lanes
= ND_MAX_LANES
;
809 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
812 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);