1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/moduleparam.h>
7 #include <linux/vmalloc.h>
8 #include <linux/device.h>
9 #include <linux/ndctl.h>
10 #include <linux/slab.h>
19 static DEFINE_IDA(dimm_ida
);
22 module_param(noblk
, bool, 0444);
23 MODULE_PARM_DESC(noblk
, "force disable BLK / local alias support");
26 * Retrieve bus and dimm handle and return if this bus supports
27 * get_config_data commands
29 int nvdimm_check_config_data(struct device
*dev
)
31 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
33 if (!nvdimm
->cmd_mask
||
34 !test_bit(ND_CMD_GET_CONFIG_DATA
, &nvdimm
->cmd_mask
)) {
35 if (test_bit(NDD_LABELING
, &nvdimm
->flags
))
44 static int validate_dimm(struct nvdimm_drvdata
*ndd
)
51 rc
= nvdimm_check_config_data(ndd
->dev
);
53 dev_dbg(ndd
->dev
, "%ps: %s error: %d\n",
54 __builtin_return_address(0), __func__
, rc
);
59 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
60 * @nvdimm: dimm to initialize
62 int nvdimm_init_nsarea(struct nvdimm_drvdata
*ndd
)
64 struct nd_cmd_get_config_size
*cmd
= &ndd
->nsarea
;
65 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
66 struct nvdimm_bus_descriptor
*nd_desc
;
67 int rc
= validate_dimm(ndd
);
74 return 0; /* already valid */
76 memset(cmd
, 0, sizeof(*cmd
));
77 nd_desc
= nvdimm_bus
->nd_desc
;
78 rc
= nd_desc
->ndctl(nd_desc
, to_nvdimm(ndd
->dev
),
79 ND_CMD_GET_CONFIG_SIZE
, cmd
, sizeof(*cmd
), &cmd_rc
);
85 int nvdimm_get_config_data(struct nvdimm_drvdata
*ndd
, void *buf
,
86 size_t offset
, size_t len
)
88 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
89 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
90 int rc
= validate_dimm(ndd
), cmd_rc
= 0;
91 struct nd_cmd_get_config_data_hdr
*cmd
;
92 size_t max_cmd_size
, buf_offset
;
97 if (offset
+ len
> ndd
->nsarea
.config_size
)
100 max_cmd_size
= min_t(u32
, len
, ndd
->nsarea
.max_xfer
);
101 cmd
= kvzalloc(max_cmd_size
+ sizeof(*cmd
), GFP_KERNEL
);
105 for (buf_offset
= 0; len
;
106 len
-= cmd
->in_length
, buf_offset
+= cmd
->in_length
) {
109 cmd
->in_offset
= offset
+ buf_offset
;
110 cmd
->in_length
= min(max_cmd_size
, len
);
112 cmd_size
= sizeof(*cmd
) + cmd
->in_length
;
114 rc
= nd_desc
->ndctl(nd_desc
, to_nvdimm(ndd
->dev
),
115 ND_CMD_GET_CONFIG_DATA
, cmd
, cmd_size
, &cmd_rc
);
123 /* out_buf should be valid, copy it into our output buffer */
124 memcpy(buf
+ buf_offset
, cmd
->out_buf
, cmd
->in_length
);
131 int nvdimm_set_config_data(struct nvdimm_drvdata
*ndd
, size_t offset
,
132 void *buf
, size_t len
)
134 size_t max_cmd_size
, buf_offset
;
135 struct nd_cmd_set_config_hdr
*cmd
;
136 int rc
= validate_dimm(ndd
), cmd_rc
= 0;
137 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
138 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
143 if (offset
+ len
> ndd
->nsarea
.config_size
)
146 max_cmd_size
= min_t(u32
, len
, ndd
->nsarea
.max_xfer
);
147 cmd
= kvzalloc(max_cmd_size
+ sizeof(*cmd
) + sizeof(u32
), GFP_KERNEL
);
151 for (buf_offset
= 0; len
; len
-= cmd
->in_length
,
152 buf_offset
+= cmd
->in_length
) {
155 cmd
->in_offset
= offset
+ buf_offset
;
156 cmd
->in_length
= min(max_cmd_size
, len
);
157 memcpy(cmd
->in_buf
, buf
+ buf_offset
, cmd
->in_length
);
159 /* status is output in the last 4-bytes of the command buffer */
160 cmd_size
= sizeof(*cmd
) + cmd
->in_length
+ sizeof(u32
);
162 rc
= nd_desc
->ndctl(nd_desc
, to_nvdimm(ndd
->dev
),
163 ND_CMD_SET_CONFIG_DATA
, cmd
, cmd_size
, &cmd_rc
);
176 void nvdimm_set_labeling(struct device
*dev
)
178 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
180 set_bit(NDD_LABELING
, &nvdimm
->flags
);
183 void nvdimm_set_locked(struct device
*dev
)
185 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
187 set_bit(NDD_LOCKED
, &nvdimm
->flags
);
190 void nvdimm_clear_locked(struct device
*dev
)
192 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
194 clear_bit(NDD_LOCKED
, &nvdimm
->flags
);
197 static void nvdimm_release(struct device
*dev
)
199 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
201 ida_simple_remove(&dimm_ida
, nvdimm
->id
);
205 struct nvdimm
*to_nvdimm(struct device
*dev
)
207 struct nvdimm
*nvdimm
= container_of(dev
, struct nvdimm
, dev
);
209 WARN_ON(!is_nvdimm(dev
));
212 EXPORT_SYMBOL_GPL(to_nvdimm
);
214 struct nvdimm
*nd_blk_region_to_dimm(struct nd_blk_region
*ndbr
)
216 struct nd_region
*nd_region
= &ndbr
->nd_region
;
217 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
219 return nd_mapping
->nvdimm
;
221 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm
);
223 unsigned long nd_blk_memremap_flags(struct nd_blk_region
*ndbr
)
225 /* pmem mapping properties are private to libnvdimm */
226 return ARCH_MEMREMAP_PMEM
;
228 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags
);
230 struct nvdimm_drvdata
*to_ndd(struct nd_mapping
*nd_mapping
)
232 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
234 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm
->dev
));
236 return dev_get_drvdata(&nvdimm
->dev
);
238 EXPORT_SYMBOL(to_ndd
);
240 void nvdimm_drvdata_release(struct kref
*kref
)
242 struct nvdimm_drvdata
*ndd
= container_of(kref
, typeof(*ndd
), kref
);
243 struct device
*dev
= ndd
->dev
;
244 struct resource
*res
, *_r
;
246 dev_dbg(dev
, "trace\n");
247 nvdimm_bus_lock(dev
);
248 for_each_dpa_resource_safe(ndd
, res
, _r
)
249 nvdimm_free_dpa(ndd
, res
);
250 nvdimm_bus_unlock(dev
);
257 void get_ndd(struct nvdimm_drvdata
*ndd
)
259 kref_get(&ndd
->kref
);
262 void put_ndd(struct nvdimm_drvdata
*ndd
)
265 kref_put(&ndd
->kref
, nvdimm_drvdata_release
);
268 const char *nvdimm_name(struct nvdimm
*nvdimm
)
270 return dev_name(&nvdimm
->dev
);
272 EXPORT_SYMBOL_GPL(nvdimm_name
);
274 struct kobject
*nvdimm_kobj(struct nvdimm
*nvdimm
)
276 return &nvdimm
->dev
.kobj
;
278 EXPORT_SYMBOL_GPL(nvdimm_kobj
);
280 unsigned long nvdimm_cmd_mask(struct nvdimm
*nvdimm
)
282 return nvdimm
->cmd_mask
;
284 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask
);
286 void *nvdimm_provider_data(struct nvdimm
*nvdimm
)
289 return nvdimm
->provider_data
;
292 EXPORT_SYMBOL_GPL(nvdimm_provider_data
);
294 static ssize_t
commands_show(struct device
*dev
,
295 struct device_attribute
*attr
, char *buf
)
297 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
300 if (!nvdimm
->cmd_mask
)
301 return sprintf(buf
, "\n");
303 for_each_set_bit(cmd
, &nvdimm
->cmd_mask
, BITS_PER_LONG
)
304 len
+= sprintf(buf
+ len
, "%s ", nvdimm_cmd_name(cmd
));
305 len
+= sprintf(buf
+ len
, "\n");
308 static DEVICE_ATTR_RO(commands
);
310 static ssize_t
flags_show(struct device
*dev
,
311 struct device_attribute
*attr
, char *buf
)
313 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
315 return sprintf(buf
, "%s%s%s\n",
316 test_bit(NDD_ALIASING
, &nvdimm
->flags
) ? "alias " : "",
317 test_bit(NDD_LABELING
, &nvdimm
->flags
) ? "label " : "",
318 test_bit(NDD_LOCKED
, &nvdimm
->flags
) ? "lock " : "");
320 static DEVICE_ATTR_RO(flags
);
322 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*attr
,
325 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
328 * The state may be in the process of changing, userspace should
329 * quiesce probing if it wants a static answer
331 nvdimm_bus_lock(dev
);
332 nvdimm_bus_unlock(dev
);
333 return sprintf(buf
, "%s\n", atomic_read(&nvdimm
->busy
)
334 ? "active" : "idle");
336 static DEVICE_ATTR_RO(state
);
338 static ssize_t
available_slots_show(struct device
*dev
,
339 struct device_attribute
*attr
, char *buf
)
341 struct nvdimm_drvdata
*ndd
= dev_get_drvdata(dev
);
348 nvdimm_bus_lock(dev
);
349 nfree
= nd_label_nfree(ndd
);
350 if (nfree
- 1 > nfree
) {
351 dev_WARN_ONCE(dev
, 1, "we ate our last label?\n");
355 rc
= sprintf(buf
, "%d\n", nfree
);
356 nvdimm_bus_unlock(dev
);
359 static DEVICE_ATTR_RO(available_slots
);
361 __weak ssize_t
security_show(struct device
*dev
,
362 struct device_attribute
*attr
, char *buf
)
364 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
366 if (test_bit(NVDIMM_SECURITY_OVERWRITE
, &nvdimm
->sec
.flags
))
367 return sprintf(buf
, "overwrite\n");
368 if (test_bit(NVDIMM_SECURITY_DISABLED
, &nvdimm
->sec
.flags
))
369 return sprintf(buf
, "disabled\n");
370 if (test_bit(NVDIMM_SECURITY_UNLOCKED
, &nvdimm
->sec
.flags
))
371 return sprintf(buf
, "unlocked\n");
372 if (test_bit(NVDIMM_SECURITY_LOCKED
, &nvdimm
->sec
.flags
))
373 return sprintf(buf
, "locked\n");
377 static ssize_t
frozen_show(struct device
*dev
,
378 struct device_attribute
*attr
, char *buf
)
380 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
382 return sprintf(buf
, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN
,
383 &nvdimm
->sec
.flags
));
385 static DEVICE_ATTR_RO(frozen
);
387 static ssize_t
security_store(struct device
*dev
,
388 struct device_attribute
*attr
, const char *buf
, size_t len
)
394 * Require all userspace triggered security management to be
395 * done while probing is idle and the DIMM is not in active use
399 nvdimm_bus_lock(dev
);
400 wait_nvdimm_bus_probe_idle(dev
);
401 rc
= nvdimm_security_store(dev
, buf
, len
);
402 nvdimm_bus_unlock(dev
);
403 nd_device_unlock(dev
);
407 static DEVICE_ATTR_RW(security
);
409 static struct attribute
*nvdimm_attributes
[] = {
410 &dev_attr_state
.attr
,
411 &dev_attr_flags
.attr
,
412 &dev_attr_commands
.attr
,
413 &dev_attr_available_slots
.attr
,
414 &dev_attr_security
.attr
,
415 &dev_attr_frozen
.attr
,
419 static umode_t
nvdimm_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
421 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
422 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
424 if (a
!= &dev_attr_security
.attr
&& a
!= &dev_attr_frozen
.attr
)
426 if (!nvdimm
->sec
.flags
)
429 if (a
== &dev_attr_security
.attr
) {
430 /* Are there any state mutation ops (make writable)? */
431 if (nvdimm
->sec
.ops
->freeze
|| nvdimm
->sec
.ops
->disable
432 || nvdimm
->sec
.ops
->change_key
433 || nvdimm
->sec
.ops
->erase
434 || nvdimm
->sec
.ops
->overwrite
)
439 if (nvdimm
->sec
.ops
->freeze
)
444 static const struct attribute_group nvdimm_attribute_group
= {
445 .attrs
= nvdimm_attributes
,
446 .is_visible
= nvdimm_visible
,
449 static ssize_t
result_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
451 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
452 enum nvdimm_fwa_result result
;
457 nvdimm_bus_lock(dev
);
458 result
= nvdimm
->fw_ops
->activate_result(nvdimm
);
459 nvdimm_bus_unlock(dev
);
462 case NVDIMM_FWA_RESULT_NONE
:
463 return sprintf(buf
, "none\n");
464 case NVDIMM_FWA_RESULT_SUCCESS
:
465 return sprintf(buf
, "success\n");
466 case NVDIMM_FWA_RESULT_FAIL
:
467 return sprintf(buf
, "fail\n");
468 case NVDIMM_FWA_RESULT_NOTSTAGED
:
469 return sprintf(buf
, "not_staged\n");
470 case NVDIMM_FWA_RESULT_NEEDRESET
:
471 return sprintf(buf
, "need_reset\n");
476 static DEVICE_ATTR_ADMIN_RO(result
);
478 static ssize_t
activate_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
480 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
481 enum nvdimm_fwa_state state
;
486 nvdimm_bus_lock(dev
);
487 state
= nvdimm
->fw_ops
->activate_state(nvdimm
);
488 nvdimm_bus_unlock(dev
);
491 case NVDIMM_FWA_IDLE
:
492 return sprintf(buf
, "idle\n");
493 case NVDIMM_FWA_BUSY
:
494 return sprintf(buf
, "busy\n");
495 case NVDIMM_FWA_ARMED
:
496 return sprintf(buf
, "armed\n");
502 static ssize_t
activate_store(struct device
*dev
, struct device_attribute
*attr
,
503 const char *buf
, size_t len
)
505 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
506 enum nvdimm_fwa_trigger arg
;
512 if (sysfs_streq(buf
, "arm"))
513 arg
= NVDIMM_FWA_ARM
;
514 else if (sysfs_streq(buf
, "disarm"))
515 arg
= NVDIMM_FWA_DISARM
;
519 nvdimm_bus_lock(dev
);
520 rc
= nvdimm
->fw_ops
->arm(nvdimm
, arg
);
521 nvdimm_bus_unlock(dev
);
527 static DEVICE_ATTR_ADMIN_RW(activate
);
529 static struct attribute
*nvdimm_firmware_attributes
[] = {
530 &dev_attr_activate
.attr
,
531 &dev_attr_result
.attr
,
535 static umode_t
nvdimm_firmware_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
537 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
538 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
539 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
540 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
541 enum nvdimm_fwa_capability cap
;
543 if (!nd_desc
->fw_ops
)
548 nvdimm_bus_lock(dev
);
549 cap
= nd_desc
->fw_ops
->capability(nd_desc
);
550 nvdimm_bus_unlock(dev
);
552 if (cap
< NVDIMM_FWA_CAP_QUIESCE
)
558 static const struct attribute_group nvdimm_firmware_attribute_group
= {
560 .attrs
= nvdimm_firmware_attributes
,
561 .is_visible
= nvdimm_firmware_visible
,
564 static const struct attribute_group
*nvdimm_attribute_groups
[] = {
565 &nd_device_attribute_group
,
566 &nvdimm_attribute_group
,
567 &nvdimm_firmware_attribute_group
,
571 static const struct device_type nvdimm_device_type
= {
573 .release
= nvdimm_release
,
574 .groups
= nvdimm_attribute_groups
,
577 bool is_nvdimm(struct device
*dev
)
579 return dev
->type
== &nvdimm_device_type
;
582 struct nvdimm
*__nvdimm_create(struct nvdimm_bus
*nvdimm_bus
,
583 void *provider_data
, const struct attribute_group
**groups
,
584 unsigned long flags
, unsigned long cmd_mask
, int num_flush
,
585 struct resource
*flush_wpq
, const char *dimm_id
,
586 const struct nvdimm_security_ops
*sec_ops
,
587 const struct nvdimm_fw_ops
*fw_ops
)
589 struct nvdimm
*nvdimm
= kzalloc(sizeof(*nvdimm
), GFP_KERNEL
);
595 nvdimm
->id
= ida_simple_get(&dimm_ida
, 0, 0, GFP_KERNEL
);
596 if (nvdimm
->id
< 0) {
601 nvdimm
->dimm_id
= dimm_id
;
602 nvdimm
->provider_data
= provider_data
;
604 flags
|= 1 << NDD_NOBLK
;
605 nvdimm
->flags
= flags
;
606 nvdimm
->cmd_mask
= cmd_mask
;
607 nvdimm
->num_flush
= num_flush
;
608 nvdimm
->flush_wpq
= flush_wpq
;
609 atomic_set(&nvdimm
->busy
, 0);
611 dev_set_name(dev
, "nmem%d", nvdimm
->id
);
612 dev
->parent
= &nvdimm_bus
->dev
;
613 dev
->type
= &nvdimm_device_type
;
614 dev
->devt
= MKDEV(nvdimm_major
, nvdimm
->id
);
615 dev
->groups
= groups
;
616 nvdimm
->sec
.ops
= sec_ops
;
617 nvdimm
->fw_ops
= fw_ops
;
618 nvdimm
->sec
.overwrite_tmo
= 0;
619 INIT_DELAYED_WORK(&nvdimm
->dwork
, nvdimm_security_overwrite_query
);
621 * Security state must be initialized before device_add() for
622 * attribute visibility.
624 /* get security state and extended (master) state */
625 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
626 nvdimm
->sec
.ext_flags
= nvdimm_security_flags(nvdimm
, NVDIMM_MASTER
);
627 nd_device_register(dev
);
631 EXPORT_SYMBOL_GPL(__nvdimm_create
);
633 static void shutdown_security_notify(void *data
)
635 struct nvdimm
*nvdimm
= data
;
637 sysfs_put(nvdimm
->sec
.overwrite_state
);
640 int nvdimm_security_setup_events(struct device
*dev
)
642 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
644 if (!nvdimm
->sec
.flags
|| !nvdimm
->sec
.ops
645 || !nvdimm
->sec
.ops
->overwrite
)
647 nvdimm
->sec
.overwrite_state
= sysfs_get_dirent(dev
->kobj
.sd
, "security");
648 if (!nvdimm
->sec
.overwrite_state
)
651 return devm_add_action_or_reset(dev
, shutdown_security_notify
, nvdimm
);
653 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events
);
655 int nvdimm_in_overwrite(struct nvdimm
*nvdimm
)
657 return test_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
);
659 EXPORT_SYMBOL_GPL(nvdimm_in_overwrite
);
661 int nvdimm_security_freeze(struct nvdimm
*nvdimm
)
665 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm
->dev
));
667 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->freeze
)
670 if (!nvdimm
->sec
.flags
)
673 if (test_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
)) {
674 dev_warn(&nvdimm
->dev
, "Overwrite operation in progress.\n");
678 rc
= nvdimm
->sec
.ops
->freeze(nvdimm
);
679 nvdimm
->sec
.flags
= nvdimm_security_flags(nvdimm
, NVDIMM_USER
);
684 static unsigned long dpa_align(struct nd_region
*nd_region
)
686 struct device
*dev
= &nd_region
->dev
;
688 if (dev_WARN_ONCE(dev
, !is_nvdimm_bus_locked(dev
),
689 "bus lock required for capacity provision\n"))
691 if (dev_WARN_ONCE(dev
, !nd_region
->ndr_mappings
|| nd_region
->align
692 % nd_region
->ndr_mappings
,
693 "invalid region align %#lx mappings: %d\n",
694 nd_region
->align
, nd_region
->ndr_mappings
))
696 return nd_region
->align
/ nd_region
->ndr_mappings
;
699 int alias_dpa_busy(struct device
*dev
, void *data
)
701 resource_size_t map_end
, blk_start
, new;
702 struct blk_alloc_info
*info
= data
;
703 struct nd_mapping
*nd_mapping
;
704 struct nd_region
*nd_region
;
705 struct nvdimm_drvdata
*ndd
;
706 struct resource
*res
;
713 nd_region
= to_nd_region(dev
);
714 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
715 nd_mapping
= &nd_region
->mapping
[i
];
716 if (nd_mapping
->nvdimm
== info
->nd_mapping
->nvdimm
)
720 if (i
>= nd_region
->ndr_mappings
)
723 ndd
= to_ndd(nd_mapping
);
724 map_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
725 blk_start
= nd_mapping
->start
;
728 * In the allocation case ->res is set to free space that we are
729 * looking to validate against PMEM aliasing collision rules
730 * (i.e. BLK is allocated after all aliased PMEM).
733 if (info
->res
->start
>= nd_mapping
->start
734 && info
->res
->start
< map_end
)
742 * Find the free dpa from the end of the last pmem allocation to
743 * the end of the interleave-set mapping.
745 align
= dpa_align(nd_region
);
749 for_each_dpa_resource(ndd
, res
) {
750 resource_size_t start
, end
;
752 if (strncmp(res
->name
, "pmem", 4) != 0)
755 start
= ALIGN_DOWN(res
->start
, align
);
756 end
= ALIGN(res
->end
+ 1, align
) - 1;
757 if ((start
>= blk_start
&& start
< map_end
)
758 || (end
>= blk_start
&& end
<= map_end
)) {
759 new = max(blk_start
, min(map_end
, end
) + 1);
760 if (new != blk_start
) {
767 /* update the free space range with the probed blk_start */
768 if (info
->res
&& blk_start
> info
->res
->start
) {
769 info
->res
->start
= max(info
->res
->start
, blk_start
);
770 if (info
->res
->start
> info
->res
->end
)
771 info
->res
->end
= info
->res
->start
- 1;
775 info
->available
-= blk_start
- nd_mapping
->start
;
781 * nd_blk_available_dpa - account the unused dpa of BLK region
782 * @nd_mapping: container of dpa-resource-root + labels
784 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
785 * we arrange for them to never start at an lower dpa than the last
786 * PMEM allocation in an aliased region.
788 resource_size_t
nd_blk_available_dpa(struct nd_region
*nd_region
)
790 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
791 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
792 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
793 struct blk_alloc_info info
= {
794 .nd_mapping
= nd_mapping
,
795 .available
= nd_mapping
->size
,
798 struct resource
*res
;
804 device_for_each_child(&nvdimm_bus
->dev
, &info
, alias_dpa_busy
);
806 /* now account for busy blk allocations in unaliased dpa */
807 align
= dpa_align(nd_region
);
810 for_each_dpa_resource(ndd
, res
) {
811 resource_size_t start
, end
, size
;
813 if (strncmp(res
->name
, "blk", 3) != 0)
815 start
= ALIGN_DOWN(res
->start
, align
);
816 end
= ALIGN(res
->end
+ 1, align
) - 1;
817 size
= end
- start
+ 1;
818 if (size
>= info
.available
)
820 info
.available
-= size
;
823 return info
.available
;
827 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
828 * contiguous unallocated dpa range.
829 * @nd_region: constrain available space check to this reference region
830 * @nd_mapping: container of dpa-resource-root + labels
832 resource_size_t
nd_pmem_max_contiguous_dpa(struct nd_region
*nd_region
,
833 struct nd_mapping
*nd_mapping
)
835 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
836 struct nvdimm_bus
*nvdimm_bus
;
837 resource_size_t max
= 0;
838 struct resource
*res
;
841 /* if a dimm is disabled the available capacity is zero */
845 align
= dpa_align(nd_region
);
849 nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
850 if (__reserve_free_pmem(&nd_region
->dev
, nd_mapping
->nvdimm
))
852 for_each_dpa_resource(ndd
, res
) {
853 resource_size_t start
, end
;
855 if (strcmp(res
->name
, "pmem-reserve") != 0)
857 /* trim free space relative to current alignment setting */
858 start
= ALIGN(res
->start
, align
);
859 end
= ALIGN_DOWN(res
->end
+ 1, align
) - 1;
862 if (end
- start
+ 1 > max
)
863 max
= end
- start
+ 1;
865 release_free_pmem(nvdimm_bus
, nd_mapping
);
870 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
871 * @nd_mapping: container of dpa-resource-root + labels
872 * @nd_region: constrain available space check to this reference region
873 * @overlap: calculate available space assuming this level of overlap
875 * Validate that a PMEM label, if present, aligns with the start of an
876 * interleave set and truncate the available size at the lowest BLK
879 * The expectation is that this routine is called multiple times as it
880 * probes for the largest BLK encroachment for any single member DIMM of
881 * the interleave set. Once that value is determined the PMEM-limit for
882 * the set can be established.
884 resource_size_t
nd_pmem_available_dpa(struct nd_region
*nd_region
,
885 struct nd_mapping
*nd_mapping
, resource_size_t
*overlap
)
887 resource_size_t map_start
, map_end
, busy
= 0, available
, blk_start
;
888 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
889 struct resource
*res
;
896 align
= dpa_align(nd_region
);
900 map_start
= nd_mapping
->start
;
901 map_end
= map_start
+ nd_mapping
->size
- 1;
902 blk_start
= max(map_start
, map_end
+ 1 - *overlap
);
903 for_each_dpa_resource(ndd
, res
) {
904 resource_size_t start
, end
;
906 start
= ALIGN_DOWN(res
->start
, align
);
907 end
= ALIGN(res
->end
+ 1, align
) - 1;
908 if (start
>= map_start
&& start
< map_end
) {
909 if (strncmp(res
->name
, "blk", 3) == 0)
910 blk_start
= min(blk_start
,
911 max(map_start
, start
));
912 else if (end
> map_end
) {
913 reason
= "misaligned to iset";
916 busy
+= end
- start
+ 1;
917 } else if (end
>= map_start
&& end
<= map_end
) {
918 if (strncmp(res
->name
, "blk", 3) == 0) {
920 * If a BLK allocation overlaps the start of
921 * PMEM the entire interleave set may now only
924 blk_start
= map_start
;
926 busy
+= end
- start
+ 1;
927 } else if (map_start
> start
&& map_start
< end
) {
928 /* total eclipse of the mapping */
929 busy
+= nd_mapping
->size
;
930 blk_start
= map_start
;
934 *overlap
= map_end
+ 1 - blk_start
;
935 available
= blk_start
- map_start
;
936 if (busy
< available
)
937 return ALIGN_DOWN(available
- busy
, align
);
941 nd_dbg_dpa(nd_region
, ndd
, res
, "%s\n", reason
);
945 void nvdimm_free_dpa(struct nvdimm_drvdata
*ndd
, struct resource
*res
)
947 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd
->dev
));
949 __release_region(&ndd
->dpa
, res
->start
, resource_size(res
));
952 struct resource
*nvdimm_allocate_dpa(struct nvdimm_drvdata
*ndd
,
953 struct nd_label_id
*label_id
, resource_size_t start
,
956 char *name
= kmemdup(label_id
, sizeof(*label_id
), GFP_KERNEL
);
957 struct resource
*res
;
962 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd
->dev
));
963 res
= __request_region(&ndd
->dpa
, start
, n
, name
, 0);
970 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
971 * @nvdimm: container of dpa-resource-root + labels
972 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
974 resource_size_t
nvdimm_allocated_dpa(struct nvdimm_drvdata
*ndd
,
975 struct nd_label_id
*label_id
)
977 resource_size_t allocated
= 0;
978 struct resource
*res
;
980 for_each_dpa_resource(ndd
, res
)
981 if (strcmp(res
->name
, label_id
->id
) == 0)
982 allocated
+= resource_size(res
);
987 static int count_dimms(struct device
*dev
, void *c
)
996 int nvdimm_bus_check_dimm_count(struct nvdimm_bus
*nvdimm_bus
, int dimm_count
)
999 /* Flush any possible dimm registration failures */
1002 device_for_each_child(&nvdimm_bus
->dev
, &count
, count_dimms
);
1003 dev_dbg(&nvdimm_bus
->dev
, "count: %d\n", count
);
1004 if (count
!= dimm_count
)
1008 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count
);
1010 void __exit
nvdimm_devs_exit(void)
1012 ida_destroy(&dimm_ida
);