1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/list_sort.h>
6 #include <linux/libnvdimm.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/ndctl.h>
10 #include <linux/sysfs.h>
11 #include <linux/delay.h>
12 #include <linux/list.h>
13 #include <linux/acpi.h>
14 #include <linux/sort.h>
17 #include <asm/cacheflush.h>
18 #include <acpi/nfit.h>
23 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
26 #include <linux/io-64-nonatomic-hi-lo.h>
28 static bool force_enable_dimms
;
29 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
30 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
32 static bool disable_vendor_specific
;
33 module_param(disable_vendor_specific
, bool, S_IRUGO
);
34 MODULE_PARM_DESC(disable_vendor_specific
,
35 "Limit commands to the publicly specified set");
37 static unsigned long override_dsm_mask
;
38 module_param(override_dsm_mask
, ulong
, S_IRUGO
);
39 MODULE_PARM_DESC(override_dsm_mask
, "Bitmask of allowed NVDIMM DSM functions");
41 static int default_dsm_family
= -1;
42 module_param(default_dsm_family
, int, S_IRUGO
);
43 MODULE_PARM_DESC(default_dsm_family
,
44 "Try this DSM type first when identifying NVDIMM family");
46 static bool no_init_ars
;
47 module_param(no_init_ars
, bool, 0644);
48 MODULE_PARM_DESC(no_init_ars
, "Skip ARS run at nfit init time");
50 static bool force_labels
;
51 module_param(force_labels
, bool, 0444);
52 MODULE_PARM_DESC(force_labels
, "Opt-in to labels despite missing methods");
54 LIST_HEAD(acpi_descs
);
55 DEFINE_MUTEX(acpi_desc_lock
);
57 static struct workqueue_struct
*nfit_wq
;
59 struct nfit_table_prev
{
60 struct list_head spas
;
61 struct list_head memdevs
;
62 struct list_head dcrs
;
63 struct list_head bdws
;
64 struct list_head idts
;
65 struct list_head flushes
;
68 static guid_t nfit_uuid
[NFIT_UUID_MAX
];
70 const guid_t
*to_nfit_uuid(enum nfit_uuids id
)
72 return &nfit_uuid
[id
];
74 EXPORT_SYMBOL(to_nfit_uuid
);
76 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
78 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
81 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
84 if (!nd_desc
->provider_name
85 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
88 return to_acpi_device(acpi_desc
->dev
);
91 static int xlat_bus_status(void *buf
, unsigned int cmd
, u32 status
)
93 struct nd_cmd_clear_error
*clear_err
;
94 struct nd_cmd_ars_status
*ars_status
;
99 if ((status
& 0xffff) == NFIT_ARS_CAP_NONE
)
106 /* No supported scan types for this range */
107 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
108 if ((status
>> 16 & flags
) == 0)
111 case ND_CMD_ARS_START
:
112 /* ARS is in progress */
113 if ((status
& 0xffff) == NFIT_ARS_START_BUSY
)
120 case ND_CMD_ARS_STATUS
:
125 /* Check extended status (Upper two bytes) */
126 if (status
== NFIT_ARS_STATUS_DONE
)
129 /* ARS is in progress */
130 if (status
== NFIT_ARS_STATUS_BUSY
)
133 /* No ARS performed for the current boot */
134 if (status
== NFIT_ARS_STATUS_NONE
)
138 * ARS interrupted, either we overflowed or some other
139 * agent wants the scan to stop. If we didn't overflow
140 * then just continue with the returned results.
142 if (status
== NFIT_ARS_STATUS_INTR
) {
143 if (ars_status
->out_length
>= 40 && (ars_status
->flags
144 & NFIT_ARS_F_OVERFLOW
))
153 case ND_CMD_CLEAR_ERROR
:
157 if (!clear_err
->cleared
)
159 if (clear_err
->length
> clear_err
->cleared
)
160 return clear_err
->cleared
;
166 /* all other non-zero status results in an error */
172 #define ACPI_LABELS_LOCKED 3
174 static int xlat_nvdimm_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
177 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
180 case ND_CMD_GET_CONFIG_SIZE
:
182 * In the _LSI, _LSR, _LSW case the locked status is
183 * communicated via the read/write commands
185 if (test_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
))
188 if (status
>> 16 & ND_CONFIG_LOCKED
)
191 case ND_CMD_GET_CONFIG_DATA
:
192 if (test_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
)
193 && status
== ACPI_LABELS_LOCKED
)
196 case ND_CMD_SET_CONFIG_DATA
:
197 if (test_bit(NFIT_MEM_LSW
, &nfit_mem
->flags
)
198 && status
== ACPI_LABELS_LOCKED
)
205 /* all other non-zero status results in an error */
211 static int xlat_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
215 return xlat_bus_status(buf
, cmd
, status
);
216 return xlat_nvdimm_status(nvdimm
, buf
, cmd
, status
);
219 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
220 static union acpi_object
*pkg_to_buf(union acpi_object
*pkg
)
225 union acpi_object
*buf
= NULL
;
227 if (pkg
->type
!= ACPI_TYPE_PACKAGE
) {
228 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
233 for (i
= 0; i
< pkg
->package
.count
; i
++) {
234 union acpi_object
*obj
= &pkg
->package
.elements
[i
];
236 if (obj
->type
== ACPI_TYPE_INTEGER
)
238 else if (obj
->type
== ACPI_TYPE_BUFFER
)
239 size
+= obj
->buffer
.length
;
241 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
247 buf
= ACPI_ALLOCATE(sizeof(*buf
) + size
);
252 buf
->type
= ACPI_TYPE_BUFFER
;
253 buf
->buffer
.length
= size
;
254 buf
->buffer
.pointer
= dst
;
255 for (i
= 0; i
< pkg
->package
.count
; i
++) {
256 union acpi_object
*obj
= &pkg
->package
.elements
[i
];
258 if (obj
->type
== ACPI_TYPE_INTEGER
) {
259 memcpy(dst
, &obj
->integer
.value
, 4);
261 } else if (obj
->type
== ACPI_TYPE_BUFFER
) {
262 memcpy(dst
, obj
->buffer
.pointer
, obj
->buffer
.length
);
263 dst
+= obj
->buffer
.length
;
271 static union acpi_object
*int_to_buf(union acpi_object
*integer
)
273 union acpi_object
*buf
= ACPI_ALLOCATE(sizeof(*buf
) + 4);
279 if (integer
->type
!= ACPI_TYPE_INTEGER
) {
280 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
286 buf
->type
= ACPI_TYPE_BUFFER
;
287 buf
->buffer
.length
= 4;
288 buf
->buffer
.pointer
= dst
;
289 memcpy(dst
, &integer
->integer
.value
, 4);
295 static union acpi_object
*acpi_label_write(acpi_handle handle
, u32 offset
,
299 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
300 struct acpi_object_list input
= {
302 .pointer
= (union acpi_object
[]) {
304 .integer
.type
= ACPI_TYPE_INTEGER
,
305 .integer
.value
= offset
,
308 .integer
.type
= ACPI_TYPE_INTEGER
,
309 .integer
.value
= len
,
312 .buffer
.type
= ACPI_TYPE_BUFFER
,
313 .buffer
.pointer
= data
,
314 .buffer
.length
= len
,
319 rc
= acpi_evaluate_object(handle
, "_LSW", &input
, &buf
);
320 if (ACPI_FAILURE(rc
))
322 return int_to_buf(buf
.pointer
);
325 static union acpi_object
*acpi_label_read(acpi_handle handle
, u32 offset
,
329 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
330 struct acpi_object_list input
= {
332 .pointer
= (union acpi_object
[]) {
334 .integer
.type
= ACPI_TYPE_INTEGER
,
335 .integer
.value
= offset
,
338 .integer
.type
= ACPI_TYPE_INTEGER
,
339 .integer
.value
= len
,
344 rc
= acpi_evaluate_object(handle
, "_LSR", &input
, &buf
);
345 if (ACPI_FAILURE(rc
))
347 return pkg_to_buf(buf
.pointer
);
350 static union acpi_object
*acpi_label_info(acpi_handle handle
)
353 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
355 rc
= acpi_evaluate_object(handle
, "_LSI", NULL
, &buf
);
356 if (ACPI_FAILURE(rc
))
358 return pkg_to_buf(buf
.pointer
);
361 static u8
nfit_dsm_revid(unsigned family
, unsigned func
)
363 static const u8 revid_table
[NVDIMM_FAMILY_MAX
+1][NVDIMM_CMD_MAX
+1] = {
364 [NVDIMM_FAMILY_INTEL
] = {
365 [NVDIMM_INTEL_GET_MODES
] = 2,
366 [NVDIMM_INTEL_GET_FWINFO
] = 2,
367 [NVDIMM_INTEL_START_FWUPDATE
] = 2,
368 [NVDIMM_INTEL_SEND_FWUPDATE
] = 2,
369 [NVDIMM_INTEL_FINISH_FWUPDATE
] = 2,
370 [NVDIMM_INTEL_QUERY_FWUPDATE
] = 2,
371 [NVDIMM_INTEL_SET_THRESHOLD
] = 2,
372 [NVDIMM_INTEL_INJECT_ERROR
] = 2,
373 [NVDIMM_INTEL_GET_SECURITY_STATE
] = 2,
374 [NVDIMM_INTEL_SET_PASSPHRASE
] = 2,
375 [NVDIMM_INTEL_DISABLE_PASSPHRASE
] = 2,
376 [NVDIMM_INTEL_UNLOCK_UNIT
] = 2,
377 [NVDIMM_INTEL_FREEZE_LOCK
] = 2,
378 [NVDIMM_INTEL_SECURE_ERASE
] = 2,
379 [NVDIMM_INTEL_OVERWRITE
] = 2,
380 [NVDIMM_INTEL_QUERY_OVERWRITE
] = 2,
381 [NVDIMM_INTEL_SET_MASTER_PASSPHRASE
] = 2,
382 [NVDIMM_INTEL_MASTER_SECURE_ERASE
] = 2,
387 if (family
> NVDIMM_FAMILY_MAX
)
389 if (func
> NVDIMM_CMD_MAX
)
391 id
= revid_table
[family
][func
];
393 return 1; /* default */
397 static bool payload_dumpable(struct nvdimm
*nvdimm
, unsigned int func
)
399 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
401 if (nfit_mem
&& nfit_mem
->family
== NVDIMM_FAMILY_INTEL
402 && func
>= NVDIMM_INTEL_GET_SECURITY_STATE
403 && func
<= NVDIMM_INTEL_MASTER_SECURE_ERASE
)
404 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG
);
408 static int cmd_to_func(struct nfit_mem
*nfit_mem
, unsigned int cmd
,
409 struct nd_cmd_pkg
*call_pkg
)
414 if (nfit_mem
&& nfit_mem
->family
!= call_pkg
->nd_family
)
417 for (i
= 0; i
< ARRAY_SIZE(call_pkg
->nd_reserved2
); i
++)
418 if (call_pkg
->nd_reserved2
[i
])
420 return call_pkg
->nd_command
;
423 /* In the !call_pkg case, bus commands == bus functions */
427 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
428 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
432 * Force function number validation to fail since 0 is never
433 * published as a valid function in dsm_mask.
438 int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
, struct nvdimm
*nvdimm
,
439 unsigned int cmd
, void *buf
, unsigned int buf_len
, int *cmd_rc
)
441 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
442 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
443 union acpi_object in_obj
, in_buf
, *out_obj
;
444 const struct nd_cmd_desc
*desc
= NULL
;
445 struct device
*dev
= acpi_desc
->dev
;
446 struct nd_cmd_pkg
*call_pkg
= NULL
;
447 const char *cmd_name
, *dimm_name
;
448 unsigned long cmd_mask
, dsm_mask
;
449 u32 offset
, fw_status
= 0;
457 if (cmd
== ND_CMD_CALL
)
459 func
= cmd_to_func(nfit_mem
, cmd
, call_pkg
);
464 struct acpi_device
*adev
= nfit_mem
->adev
;
469 dimm_name
= nvdimm_name(nvdimm
);
470 cmd_name
= nvdimm_cmd_name(cmd
);
471 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
472 dsm_mask
= nfit_mem
->dsm_mask
;
473 desc
= nd_cmd_dimm_desc(cmd
);
474 guid
= to_nfit_uuid(nfit_mem
->family
);
475 handle
= adev
->handle
;
477 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
479 cmd_name
= nvdimm_bus_cmd_name(cmd
);
480 cmd_mask
= nd_desc
->cmd_mask
;
481 dsm_mask
= nd_desc
->bus_dsm_mask
;
482 desc
= nd_cmd_bus_desc(cmd
);
483 guid
= to_nfit_uuid(NFIT_DEV_BUS
);
484 handle
= adev
->handle
;
488 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
492 * Check for a valid command. For ND_CMD_CALL, we also have to
493 * make sure that the DSM function is supported.
495 if (cmd
== ND_CMD_CALL
&&
496 (func
> NVDIMM_CMD_MAX
|| !test_bit(func
, &dsm_mask
)))
498 else if (!test_bit(cmd
, &cmd_mask
))
501 in_obj
.type
= ACPI_TYPE_PACKAGE
;
502 in_obj
.package
.count
= 1;
503 in_obj
.package
.elements
= &in_buf
;
504 in_buf
.type
= ACPI_TYPE_BUFFER
;
505 in_buf
.buffer
.pointer
= buf
;
506 in_buf
.buffer
.length
= 0;
508 /* libnvdimm has already validated the input envelope */
509 for (i
= 0; i
< desc
->in_num
; i
++)
510 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
514 /* skip over package wrapper */
515 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
516 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
519 dev_dbg(dev
, "%s cmd: %d: func: %d input length: %d\n",
520 dimm_name
, cmd
, func
, in_buf
.buffer
.length
);
521 if (payload_dumpable(nvdimm
, func
))
522 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
523 in_buf
.buffer
.pointer
,
524 min_t(u32
, 256, in_buf
.buffer
.length
), true);
526 /* call the BIOS, prefer the named methods over _DSM if available */
527 if (nvdimm
&& cmd
== ND_CMD_GET_CONFIG_SIZE
528 && test_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
))
529 out_obj
= acpi_label_info(handle
);
530 else if (nvdimm
&& cmd
== ND_CMD_GET_CONFIG_DATA
531 && test_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
)) {
532 struct nd_cmd_get_config_data_hdr
*p
= buf
;
534 out_obj
= acpi_label_read(handle
, p
->in_offset
, p
->in_length
);
535 } else if (nvdimm
&& cmd
== ND_CMD_SET_CONFIG_DATA
536 && test_bit(NFIT_MEM_LSW
, &nfit_mem
->flags
)) {
537 struct nd_cmd_set_config_hdr
*p
= buf
;
539 out_obj
= acpi_label_write(handle
, p
->in_offset
, p
->in_length
,
545 revid
= nfit_dsm_revid(nfit_mem
->family
, func
);
548 out_obj
= acpi_evaluate_dsm(handle
, guid
, revid
, func
, &in_obj
);
552 dev_dbg(dev
, "%s _DSM failed cmd: %s\n", dimm_name
, cmd_name
);
556 if (out_obj
->type
!= ACPI_TYPE_BUFFER
) {
557 dev_dbg(dev
, "%s unexpected output object type cmd: %s type: %d\n",
558 dimm_name
, cmd_name
, out_obj
->type
);
563 dev_dbg(dev
, "%s cmd: %s output length: %d\n", dimm_name
,
564 cmd_name
, out_obj
->buffer
.length
);
565 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4, 4,
566 out_obj
->buffer
.pointer
,
567 min_t(u32
, 128, out_obj
->buffer
.length
), true);
570 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
571 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
572 out_obj
->buffer
.pointer
,
573 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
577 * Need to support FW function w/o known size in advance.
578 * Caller can determine required size based upon nd_fw_size.
579 * If we return an error (like elsewhere) then caller wouldn't
580 * be able to rely upon data returned to make calculation.
587 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
588 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
589 (u32
*) out_obj
->buffer
.pointer
,
590 out_obj
->buffer
.length
- offset
);
592 if (offset
+ out_size
> out_obj
->buffer
.length
) {
593 dev_dbg(dev
, "%s output object underflow cmd: %s field: %d\n",
594 dimm_name
, cmd_name
, i
);
598 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
599 dev_dbg(dev
, "%s output overrun cmd: %s field: %d\n",
600 dimm_name
, cmd_name
, i
);
604 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
605 out_obj
->buffer
.pointer
+ offset
, out_size
);
610 * Set fw_status for all the commands with a known format to be
611 * later interpreted by xlat_status().
613 if (i
>= 1 && ((!nvdimm
&& cmd
>= ND_CMD_ARS_CAP
614 && cmd
<= ND_CMD_CLEAR_ERROR
)
615 || (nvdimm
&& cmd
>= ND_CMD_SMART
616 && cmd
<= ND_CMD_VENDOR
)))
617 fw_status
= *(u32
*) out_obj
->buffer
.pointer
;
619 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
622 * status valid, return the number of bytes left
623 * unfilled in the output buffer
625 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
627 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
,
630 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
631 __func__
, dimm_name
, cmd_name
, buf_len
,
638 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
, fw_status
);
646 EXPORT_SYMBOL_GPL(acpi_nfit_ctl
);
648 static const char *spa_type_name(u16 type
)
650 static const char *to_name
[] = {
651 [NFIT_SPA_VOLATILE
] = "volatile",
652 [NFIT_SPA_PM
] = "pmem",
653 [NFIT_SPA_DCR
] = "dimm-control-region",
654 [NFIT_SPA_BDW
] = "block-data-window",
655 [NFIT_SPA_VDISK
] = "volatile-disk",
656 [NFIT_SPA_VCD
] = "volatile-cd",
657 [NFIT_SPA_PDISK
] = "persistent-disk",
658 [NFIT_SPA_PCD
] = "persistent-cd",
662 if (type
> NFIT_SPA_PCD
)
665 return to_name
[type
];
668 int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
672 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
673 if (guid_equal(to_nfit_uuid(i
), (guid_t
*)&spa
->range_guid
))
678 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
679 struct nfit_table_prev
*prev
,
680 struct acpi_nfit_system_address
*spa
)
682 struct device
*dev
= acpi_desc
->dev
;
683 struct nfit_spa
*nfit_spa
;
685 if (spa
->header
.length
!= sizeof(*spa
))
688 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
689 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
690 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
695 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
) + sizeof(*spa
),
699 INIT_LIST_HEAD(&nfit_spa
->list
);
700 memcpy(nfit_spa
->spa
, spa
, sizeof(*spa
));
701 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
702 dev_dbg(dev
, "spa index: %d type: %s\n",
704 spa_type_name(nfit_spa_type(spa
)));
708 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
709 struct nfit_table_prev
*prev
,
710 struct acpi_nfit_memory_map
*memdev
)
712 struct device
*dev
= acpi_desc
->dev
;
713 struct nfit_memdev
*nfit_memdev
;
715 if (memdev
->header
.length
!= sizeof(*memdev
))
718 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
719 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
720 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
724 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
) + sizeof(*memdev
),
728 INIT_LIST_HEAD(&nfit_memdev
->list
);
729 memcpy(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
));
730 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
731 dev_dbg(dev
, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
732 memdev
->device_handle
, memdev
->range_index
,
733 memdev
->region_index
, memdev
->flags
);
737 int nfit_get_smbios_id(u32 device_handle
, u16
*flags
)
739 struct acpi_nfit_memory_map
*memdev
;
740 struct acpi_nfit_desc
*acpi_desc
;
741 struct nfit_mem
*nfit_mem
;
744 mutex_lock(&acpi_desc_lock
);
745 list_for_each_entry(acpi_desc
, &acpi_descs
, list
) {
746 mutex_lock(&acpi_desc
->init_mutex
);
747 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
748 memdev
= __to_nfit_memdev(nfit_mem
);
749 if (memdev
->device_handle
== device_handle
) {
750 *flags
= memdev
->flags
;
751 physical_id
= memdev
->physical_id
;
752 mutex_unlock(&acpi_desc
->init_mutex
);
753 mutex_unlock(&acpi_desc_lock
);
757 mutex_unlock(&acpi_desc
->init_mutex
);
759 mutex_unlock(&acpi_desc_lock
);
763 EXPORT_SYMBOL_GPL(nfit_get_smbios_id
);
766 * An implementation may provide a truncated control region if no block windows
769 static size_t sizeof_dcr(struct acpi_nfit_control_region
*dcr
)
771 if (dcr
->header
.length
< offsetof(struct acpi_nfit_control_region
,
776 return offsetof(struct acpi_nfit_control_region
, window_size
);
779 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
780 struct nfit_table_prev
*prev
,
781 struct acpi_nfit_control_region
*dcr
)
783 struct device
*dev
= acpi_desc
->dev
;
784 struct nfit_dcr
*nfit_dcr
;
786 if (!sizeof_dcr(dcr
))
789 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
790 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
)) == 0) {
791 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
795 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
) + sizeof(*dcr
),
799 INIT_LIST_HEAD(&nfit_dcr
->list
);
800 memcpy(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
));
801 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
802 dev_dbg(dev
, "dcr index: %d windows: %d\n",
803 dcr
->region_index
, dcr
->windows
);
807 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
808 struct nfit_table_prev
*prev
,
809 struct acpi_nfit_data_region
*bdw
)
811 struct device
*dev
= acpi_desc
->dev
;
812 struct nfit_bdw
*nfit_bdw
;
814 if (bdw
->header
.length
!= sizeof(*bdw
))
816 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
817 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
818 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
822 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
) + sizeof(*bdw
),
826 INIT_LIST_HEAD(&nfit_bdw
->list
);
827 memcpy(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
));
828 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
829 dev_dbg(dev
, "bdw dcr: %d windows: %d\n",
830 bdw
->region_index
, bdw
->windows
);
834 static size_t sizeof_idt(struct acpi_nfit_interleave
*idt
)
836 if (idt
->header
.length
< sizeof(*idt
))
838 return sizeof(*idt
) + sizeof(u32
) * (idt
->line_count
- 1);
841 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
842 struct nfit_table_prev
*prev
,
843 struct acpi_nfit_interleave
*idt
)
845 struct device
*dev
= acpi_desc
->dev
;
846 struct nfit_idt
*nfit_idt
;
848 if (!sizeof_idt(idt
))
851 list_for_each_entry(nfit_idt
, &prev
->idts
, list
) {
852 if (sizeof_idt(nfit_idt
->idt
) != sizeof_idt(idt
))
855 if (memcmp(nfit_idt
->idt
, idt
, sizeof_idt(idt
)) == 0) {
856 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
861 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
) + sizeof_idt(idt
),
865 INIT_LIST_HEAD(&nfit_idt
->list
);
866 memcpy(nfit_idt
->idt
, idt
, sizeof_idt(idt
));
867 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
868 dev_dbg(dev
, "idt index: %d num_lines: %d\n",
869 idt
->interleave_index
, idt
->line_count
);
873 static size_t sizeof_flush(struct acpi_nfit_flush_address
*flush
)
875 if (flush
->header
.length
< sizeof(*flush
))
877 return sizeof(*flush
) + sizeof(u64
) * (flush
->hint_count
- 1);
880 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
881 struct nfit_table_prev
*prev
,
882 struct acpi_nfit_flush_address
*flush
)
884 struct device
*dev
= acpi_desc
->dev
;
885 struct nfit_flush
*nfit_flush
;
887 if (!sizeof_flush(flush
))
890 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
) {
891 if (sizeof_flush(nfit_flush
->flush
) != sizeof_flush(flush
))
894 if (memcmp(nfit_flush
->flush
, flush
,
895 sizeof_flush(flush
)) == 0) {
896 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
901 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
)
902 + sizeof_flush(flush
), GFP_KERNEL
);
905 INIT_LIST_HEAD(&nfit_flush
->list
);
906 memcpy(nfit_flush
->flush
, flush
, sizeof_flush(flush
));
907 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
908 dev_dbg(dev
, "nfit_flush handle: %d hint_count: %d\n",
909 flush
->device_handle
, flush
->hint_count
);
913 static bool add_platform_cap(struct acpi_nfit_desc
*acpi_desc
,
914 struct acpi_nfit_capabilities
*pcap
)
916 struct device
*dev
= acpi_desc
->dev
;
919 mask
= (1 << (pcap
->highest_capability
+ 1)) - 1;
920 acpi_desc
->platform_cap
= pcap
->capabilities
& mask
;
921 dev_dbg(dev
, "cap: %#x\n", acpi_desc
->platform_cap
);
925 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
926 struct nfit_table_prev
*prev
, void *table
, const void *end
)
928 struct device
*dev
= acpi_desc
->dev
;
929 struct acpi_nfit_header
*hdr
;
930 void *err
= ERR_PTR(-ENOMEM
);
937 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
943 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
944 if (!add_spa(acpi_desc
, prev
, table
))
947 case ACPI_NFIT_TYPE_MEMORY_MAP
:
948 if (!add_memdev(acpi_desc
, prev
, table
))
951 case ACPI_NFIT_TYPE_CONTROL_REGION
:
952 if (!add_dcr(acpi_desc
, prev
, table
))
955 case ACPI_NFIT_TYPE_DATA_REGION
:
956 if (!add_bdw(acpi_desc
, prev
, table
))
959 case ACPI_NFIT_TYPE_INTERLEAVE
:
960 if (!add_idt(acpi_desc
, prev
, table
))
963 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
964 if (!add_flush(acpi_desc
, prev
, table
))
967 case ACPI_NFIT_TYPE_SMBIOS
:
968 dev_dbg(dev
, "smbios\n");
970 case ACPI_NFIT_TYPE_CAPABILITIES
:
971 if (!add_platform_cap(acpi_desc
, table
))
975 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
979 return table
+ hdr
->length
;
982 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
983 struct nfit_mem
*nfit_mem
)
985 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
986 u16 dcr
= nfit_mem
->dcr
->region_index
;
987 struct nfit_spa
*nfit_spa
;
989 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
990 u16 range_index
= nfit_spa
->spa
->range_index
;
991 int type
= nfit_spa_type(nfit_spa
->spa
);
992 struct nfit_memdev
*nfit_memdev
;
994 if (type
!= NFIT_SPA_BDW
)
997 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
998 if (nfit_memdev
->memdev
->range_index
!= range_index
)
1000 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
1002 if (nfit_memdev
->memdev
->region_index
!= dcr
)
1005 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
1010 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
1011 nfit_mem
->spa_dcr
->range_index
);
1012 nfit_mem
->bdw
= NULL
;
1015 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
1016 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
1018 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
1019 struct nfit_memdev
*nfit_memdev
;
1020 struct nfit_bdw
*nfit_bdw
;
1021 struct nfit_idt
*nfit_idt
;
1022 u16 idt_idx
, range_index
;
1024 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
1025 if (nfit_bdw
->bdw
->region_index
!= dcr
)
1027 nfit_mem
->bdw
= nfit_bdw
->bdw
;
1034 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
1036 if (!nfit_mem
->spa_bdw
)
1039 range_index
= nfit_mem
->spa_bdw
->range_index
;
1040 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1041 if (nfit_memdev
->memdev
->range_index
!= range_index
||
1042 nfit_memdev
->memdev
->region_index
!= dcr
)
1044 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
1045 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
1046 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
1047 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
1049 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
1056 static int __nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
,
1057 struct acpi_nfit_system_address
*spa
)
1059 struct nfit_mem
*nfit_mem
, *found
;
1060 struct nfit_memdev
*nfit_memdev
;
1061 int type
= spa
? nfit_spa_type(spa
) : 0;
1073 * This loop runs in two modes, when a dimm is mapped the loop
1074 * adds memdev associations to an existing dimm, or creates a
1075 * dimm. In the unmapped dimm case this loop sweeps for memdev
1076 * instances with an invalid / zero range_index and adds those
1077 * dimms without spa associations.
1079 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1080 struct nfit_flush
*nfit_flush
;
1081 struct nfit_dcr
*nfit_dcr
;
1085 if (spa
&& nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
1087 if (!spa
&& nfit_memdev
->memdev
->range_index
)
1090 dcr
= nfit_memdev
->memdev
->region_index
;
1091 device_handle
= nfit_memdev
->memdev
->device_handle
;
1092 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1093 if (__to_nfit_memdev(nfit_mem
)->device_handle
1102 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
1103 sizeof(*nfit_mem
), GFP_KERNEL
);
1106 INIT_LIST_HEAD(&nfit_mem
->list
);
1107 nfit_mem
->acpi_desc
= acpi_desc
;
1108 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
1111 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1112 if (nfit_dcr
->dcr
->region_index
!= dcr
)
1115 * Record the control region for the dimm. For
1116 * the ACPI 6.1 case, where there are separate
1117 * control regions for the pmem vs blk
1118 * interfaces, be sure to record the extended
1122 nfit_mem
->dcr
= nfit_dcr
->dcr
;
1123 else if (nfit_mem
->dcr
->windows
== 0
1124 && nfit_dcr
->dcr
->windows
)
1125 nfit_mem
->dcr
= nfit_dcr
->dcr
;
1129 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
1130 struct acpi_nfit_flush_address
*flush
;
1133 if (nfit_flush
->flush
->device_handle
!= device_handle
)
1135 nfit_mem
->nfit_flush
= nfit_flush
;
1136 flush
= nfit_flush
->flush
;
1137 nfit_mem
->flush_wpq
= devm_kcalloc(acpi_desc
->dev
,
1139 sizeof(struct resource
),
1141 if (!nfit_mem
->flush_wpq
)
1143 for (i
= 0; i
< flush
->hint_count
; i
++) {
1144 struct resource
*res
= &nfit_mem
->flush_wpq
[i
];
1146 res
->start
= flush
->hint_address
[i
];
1147 res
->end
= res
->start
+ 8 - 1;
1152 if (dcr
&& !nfit_mem
->dcr
) {
1153 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
1154 spa
->range_index
, dcr
);
1158 if (type
== NFIT_SPA_DCR
) {
1159 struct nfit_idt
*nfit_idt
;
1162 /* multiple dimms may share a SPA when interleaved */
1163 nfit_mem
->spa_dcr
= spa
;
1164 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
1165 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
1166 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
1167 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
1169 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
1172 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
1173 } else if (type
== NFIT_SPA_PM
) {
1175 * A single dimm may belong to multiple SPA-PM
1176 * ranges, record at least one in addition to
1177 * any SPA-DCR range.
1179 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
1181 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
1187 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
1189 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
1190 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
1191 u32 handleA
, handleB
;
1193 handleA
= __to_nfit_memdev(a
)->device_handle
;
1194 handleB
= __to_nfit_memdev(b
)->device_handle
;
1195 if (handleA
< handleB
)
1197 else if (handleA
> handleB
)
1202 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
1204 struct nfit_spa
*nfit_spa
;
1209 * For each SPA-DCR or SPA-PMEM address range find its
1210 * corresponding MEMDEV(s). From each MEMDEV find the
1211 * corresponding DCR. Then, if we're operating on a SPA-DCR,
1212 * try to find a SPA-BDW and a corresponding BDW that references
1213 * the DCR. Throw it all into an nfit_mem object. Note, that
1214 * BDWs are optional.
1216 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
1217 rc
= __nfit_mem_init(acpi_desc
, nfit_spa
->spa
);
1223 * If a DIMM has failed to be mapped into SPA there will be no
1224 * SPA entries above. Find and register all the unmapped DIMMs
1225 * for reporting and recovery purposes.
1227 rc
= __nfit_mem_init(acpi_desc
, NULL
);
1231 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
1236 static ssize_t
bus_dsm_mask_show(struct device
*dev
,
1237 struct device_attribute
*attr
, char *buf
)
1239 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1240 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1242 return sprintf(buf
, "%#lx\n", nd_desc
->bus_dsm_mask
);
1244 static struct device_attribute dev_attr_bus_dsm_mask
=
1245 __ATTR(dsm_mask
, 0444, bus_dsm_mask_show
, NULL
);
1247 static ssize_t
revision_show(struct device
*dev
,
1248 struct device_attribute
*attr
, char *buf
)
1250 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1251 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1252 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1254 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
1256 static DEVICE_ATTR_RO(revision
);
1258 static ssize_t
hw_error_scrub_show(struct device
*dev
,
1259 struct device_attribute
*attr
, char *buf
)
1261 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1262 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1263 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1265 return sprintf(buf
, "%d\n", acpi_desc
->scrub_mode
);
1269 * The 'hw_error_scrub' attribute can have the following values written to it:
1270 * '0': Switch to the default mode where an exception will only insert
1271 * the address of the memory error into the poison and badblocks lists.
1272 * '1': Enable a full scrub to happen if an exception for a memory error is
1275 static ssize_t
hw_error_scrub_store(struct device
*dev
,
1276 struct device_attribute
*attr
, const char *buf
, size_t size
)
1278 struct nvdimm_bus_descriptor
*nd_desc
;
1282 rc
= kstrtol(buf
, 0, &val
);
1286 nfit_device_lock(dev
);
1287 nd_desc
= dev_get_drvdata(dev
);
1289 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1292 case HW_ERROR_SCRUB_ON
:
1293 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_ON
;
1295 case HW_ERROR_SCRUB_OFF
:
1296 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_OFF
;
1303 nfit_device_unlock(dev
);
1308 static DEVICE_ATTR_RW(hw_error_scrub
);
1311 * This shows the number of full Address Range Scrubs that have been
1312 * completed since driver load time. Userspace can wait on this using
1313 * select/poll etc. A '+' at the end indicates an ARS is in progress
1315 static ssize_t
scrub_show(struct device
*dev
,
1316 struct device_attribute
*attr
, char *buf
)
1318 struct nvdimm_bus_descriptor
*nd_desc
;
1319 struct acpi_nfit_desc
*acpi_desc
;
1320 ssize_t rc
= -ENXIO
;
1323 nfit_device_lock(dev
);
1324 nd_desc
= dev_get_drvdata(dev
);
1326 nfit_device_unlock(dev
);
1329 acpi_desc
= to_acpi_desc(nd_desc
);
1331 mutex_lock(&acpi_desc
->init_mutex
);
1332 busy
= test_bit(ARS_BUSY
, &acpi_desc
->scrub_flags
)
1333 && !test_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
);
1334 rc
= sprintf(buf
, "%d%s", acpi_desc
->scrub_count
, busy
? "+\n" : "\n");
1335 /* Allow an admin to poll the busy state at a higher rate */
1336 if (busy
&& capable(CAP_SYS_RAWIO
) && !test_and_set_bit(ARS_POLL
,
1337 &acpi_desc
->scrub_flags
)) {
1338 acpi_desc
->scrub_tmo
= 1;
1339 mod_delayed_work(nfit_wq
, &acpi_desc
->dwork
, HZ
);
1342 mutex_unlock(&acpi_desc
->init_mutex
);
1343 nfit_device_unlock(dev
);
1347 static ssize_t
scrub_store(struct device
*dev
,
1348 struct device_attribute
*attr
, const char *buf
, size_t size
)
1350 struct nvdimm_bus_descriptor
*nd_desc
;
1354 rc
= kstrtol(buf
, 0, &val
);
1360 nfit_device_lock(dev
);
1361 nd_desc
= dev_get_drvdata(dev
);
1363 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1365 rc
= acpi_nfit_ars_rescan(acpi_desc
, ARS_REQ_LONG
);
1367 nfit_device_unlock(dev
);
1372 static DEVICE_ATTR_RW(scrub
);
1374 static bool ars_supported(struct nvdimm_bus
*nvdimm_bus
)
1376 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1377 const unsigned long mask
= 1 << ND_CMD_ARS_CAP
| 1 << ND_CMD_ARS_START
1378 | 1 << ND_CMD_ARS_STATUS
;
1380 return (nd_desc
->cmd_mask
& mask
) == mask
;
1383 static umode_t
nfit_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
1385 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1386 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1388 if (a
== &dev_attr_scrub
.attr
&& !ars_supported(nvdimm_bus
))
1393 static struct attribute
*acpi_nfit_attributes
[] = {
1394 &dev_attr_revision
.attr
,
1395 &dev_attr_scrub
.attr
,
1396 &dev_attr_hw_error_scrub
.attr
,
1397 &dev_attr_bus_dsm_mask
.attr
,
1401 static const struct attribute_group acpi_nfit_attribute_group
= {
1403 .attrs
= acpi_nfit_attributes
,
1404 .is_visible
= nfit_visible
,
1407 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
1408 &acpi_nfit_attribute_group
,
1412 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
1414 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1415 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1417 return __to_nfit_memdev(nfit_mem
);
1420 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
1422 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1423 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1425 return nfit_mem
->dcr
;
1428 static ssize_t
handle_show(struct device
*dev
,
1429 struct device_attribute
*attr
, char *buf
)
1431 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1433 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
1435 static DEVICE_ATTR_RO(handle
);
1437 static ssize_t
phys_id_show(struct device
*dev
,
1438 struct device_attribute
*attr
, char *buf
)
1440 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1442 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
1444 static DEVICE_ATTR_RO(phys_id
);
1446 static ssize_t
vendor_show(struct device
*dev
,
1447 struct device_attribute
*attr
, char *buf
)
1449 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1451 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
1453 static DEVICE_ATTR_RO(vendor
);
1455 static ssize_t
rev_id_show(struct device
*dev
,
1456 struct device_attribute
*attr
, char *buf
)
1458 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1460 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
1462 static DEVICE_ATTR_RO(rev_id
);
1464 static ssize_t
device_show(struct device
*dev
,
1465 struct device_attribute
*attr
, char *buf
)
1467 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1469 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
1471 static DEVICE_ATTR_RO(device
);
1473 static ssize_t
subsystem_vendor_show(struct device
*dev
,
1474 struct device_attribute
*attr
, char *buf
)
1476 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1478 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
1480 static DEVICE_ATTR_RO(subsystem_vendor
);
1482 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
1483 struct device_attribute
*attr
, char *buf
)
1485 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1487 return sprintf(buf
, "0x%04x\n",
1488 be16_to_cpu(dcr
->subsystem_revision_id
));
1490 static DEVICE_ATTR_RO(subsystem_rev_id
);
1492 static ssize_t
subsystem_device_show(struct device
*dev
,
1493 struct device_attribute
*attr
, char *buf
)
1495 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1497 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
1499 static DEVICE_ATTR_RO(subsystem_device
);
1501 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
1503 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1506 if (nfit_mem
->memdev_pmem
)
1508 if (nfit_mem
->memdev_bdw
)
1513 static ssize_t
format_show(struct device
*dev
,
1514 struct device_attribute
*attr
, char *buf
)
1516 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1518 return sprintf(buf
, "0x%04x\n", le16_to_cpu(dcr
->code
));
1520 static DEVICE_ATTR_RO(format
);
1522 static ssize_t
format1_show(struct device
*dev
,
1523 struct device_attribute
*attr
, char *buf
)
1526 ssize_t rc
= -ENXIO
;
1527 struct nfit_mem
*nfit_mem
;
1528 struct nfit_memdev
*nfit_memdev
;
1529 struct acpi_nfit_desc
*acpi_desc
;
1530 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1531 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1533 nfit_mem
= nvdimm_provider_data(nvdimm
);
1534 acpi_desc
= nfit_mem
->acpi_desc
;
1535 handle
= to_nfit_memdev(dev
)->device_handle
;
1537 /* assumes DIMMs have at most 2 published interface codes */
1538 mutex_lock(&acpi_desc
->init_mutex
);
1539 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1540 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1541 struct nfit_dcr
*nfit_dcr
;
1543 if (memdev
->device_handle
!= handle
)
1546 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1547 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
1549 if (nfit_dcr
->dcr
->code
== dcr
->code
)
1551 rc
= sprintf(buf
, "0x%04x\n",
1552 le16_to_cpu(nfit_dcr
->dcr
->code
));
1558 mutex_unlock(&acpi_desc
->init_mutex
);
1561 static DEVICE_ATTR_RO(format1
);
1563 static ssize_t
formats_show(struct device
*dev
,
1564 struct device_attribute
*attr
, char *buf
)
1566 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1568 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
1570 static DEVICE_ATTR_RO(formats
);
1572 static ssize_t
serial_show(struct device
*dev
,
1573 struct device_attribute
*attr
, char *buf
)
1575 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1577 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
1579 static DEVICE_ATTR_RO(serial
);
1581 static ssize_t
family_show(struct device
*dev
,
1582 struct device_attribute
*attr
, char *buf
)
1584 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1585 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1587 if (nfit_mem
->family
< 0)
1589 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1591 static DEVICE_ATTR_RO(family
);
1593 static ssize_t
dsm_mask_show(struct device
*dev
,
1594 struct device_attribute
*attr
, char *buf
)
1596 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1597 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1599 if (nfit_mem
->family
< 0)
1601 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1603 static DEVICE_ATTR_RO(dsm_mask
);
1605 static ssize_t
flags_show(struct device
*dev
,
1606 struct device_attribute
*attr
, char *buf
)
1608 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1609 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1610 u16 flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1612 if (test_bit(NFIT_MEM_DIRTY
, &nfit_mem
->flags
))
1613 flags
|= ACPI_NFIT_MEM_FLUSH_FAILED
;
1615 return sprintf(buf
, "%s%s%s%s%s%s%s\n",
1616 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1617 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1618 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1619 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1620 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "",
1621 flags
& ACPI_NFIT_MEM_MAP_FAILED
? "map_fail " : "",
1622 flags
& ACPI_NFIT_MEM_HEALTH_ENABLED
? "smart_notify " : "");
1624 static DEVICE_ATTR_RO(flags
);
1626 static ssize_t
id_show(struct device
*dev
,
1627 struct device_attribute
*attr
, char *buf
)
1629 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1630 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1632 return sprintf(buf
, "%s\n", nfit_mem
->id
);
1634 static DEVICE_ATTR_RO(id
);
1636 static ssize_t
dirty_shutdown_show(struct device
*dev
,
1637 struct device_attribute
*attr
, char *buf
)
1639 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1640 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1642 return sprintf(buf
, "%d\n", nfit_mem
->dirty_shutdown
);
1644 static DEVICE_ATTR_RO(dirty_shutdown
);
1646 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1647 &dev_attr_handle
.attr
,
1648 &dev_attr_phys_id
.attr
,
1649 &dev_attr_vendor
.attr
,
1650 &dev_attr_device
.attr
,
1651 &dev_attr_rev_id
.attr
,
1652 &dev_attr_subsystem_vendor
.attr
,
1653 &dev_attr_subsystem_device
.attr
,
1654 &dev_attr_subsystem_rev_id
.attr
,
1655 &dev_attr_format
.attr
,
1656 &dev_attr_formats
.attr
,
1657 &dev_attr_format1
.attr
,
1658 &dev_attr_serial
.attr
,
1659 &dev_attr_flags
.attr
,
1661 &dev_attr_family
.attr
,
1662 &dev_attr_dsm_mask
.attr
,
1663 &dev_attr_dirty_shutdown
.attr
,
1667 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1668 struct attribute
*a
, int n
)
1670 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1671 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1672 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1674 if (!to_nfit_dcr(dev
)) {
1675 /* Without a dcr only the memdev attributes can be surfaced */
1676 if (a
== &dev_attr_handle
.attr
|| a
== &dev_attr_phys_id
.attr
1677 || a
== &dev_attr_flags
.attr
1678 || a
== &dev_attr_family
.attr
1679 || a
== &dev_attr_dsm_mask
.attr
)
1684 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1687 if (!test_bit(NFIT_MEM_DIRTY_COUNT
, &nfit_mem
->flags
)
1688 && a
== &dev_attr_dirty_shutdown
.attr
)
1694 static const struct attribute_group acpi_nfit_dimm_attribute_group
= {
1696 .attrs
= acpi_nfit_dimm_attributes
,
1697 .is_visible
= acpi_nfit_dimm_attr_visible
,
1700 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1701 &acpi_nfit_dimm_attribute_group
,
1705 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1708 struct nfit_mem
*nfit_mem
;
1710 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1711 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1712 return nfit_mem
->nvdimm
;
1717 void __acpi_nvdimm_notify(struct device
*dev
, u32 event
)
1719 struct nfit_mem
*nfit_mem
;
1720 struct acpi_nfit_desc
*acpi_desc
;
1722 dev_dbg(dev
->parent
, "%s: event: %d\n", dev_name(dev
),
1725 if (event
!= NFIT_NOTIFY_DIMM_HEALTH
) {
1726 dev_dbg(dev
->parent
, "%s: unknown event: %d\n", dev_name(dev
),
1731 acpi_desc
= dev_get_drvdata(dev
->parent
);
1736 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1739 nfit_mem
= dev_get_drvdata(dev
);
1740 if (nfit_mem
&& nfit_mem
->flags_attr
)
1741 sysfs_notify_dirent(nfit_mem
->flags_attr
);
1743 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify
);
1745 static void acpi_nvdimm_notify(acpi_handle handle
, u32 event
, void *data
)
1747 struct acpi_device
*adev
= data
;
1748 struct device
*dev
= &adev
->dev
;
1750 nfit_device_lock(dev
->parent
);
1751 __acpi_nvdimm_notify(dev
, event
);
1752 nfit_device_unlock(dev
->parent
);
1755 static bool acpi_nvdimm_has_method(struct acpi_device
*adev
, char *method
)
1760 status
= acpi_get_handle(adev
->handle
, method
, &handle
);
1762 if (ACPI_SUCCESS(status
))
1767 __weak
void nfit_intel_shutdown_status(struct nfit_mem
*nfit_mem
)
1769 struct device
*dev
= &nfit_mem
->adev
->dev
;
1770 struct nd_intel_smart smart
= { 0 };
1771 union acpi_object in_buf
= {
1772 .buffer
.type
= ACPI_TYPE_BUFFER
,
1775 union acpi_object in_obj
= {
1776 .package
.type
= ACPI_TYPE_PACKAGE
,
1778 .package
.elements
= &in_buf
,
1780 const u8 func
= ND_INTEL_SMART
;
1781 const guid_t
*guid
= to_nfit_uuid(nfit_mem
->family
);
1782 u8 revid
= nfit_dsm_revid(nfit_mem
->family
, func
);
1783 struct acpi_device
*adev
= nfit_mem
->adev
;
1784 acpi_handle handle
= adev
->handle
;
1785 union acpi_object
*out_obj
;
1787 if ((nfit_mem
->dsm_mask
& (1 << func
)) == 0)
1790 out_obj
= acpi_evaluate_dsm(handle
, guid
, revid
, func
, &in_obj
);
1791 if (!out_obj
|| out_obj
->type
!= ACPI_TYPE_BUFFER
1792 || out_obj
->buffer
.length
< sizeof(smart
)) {
1793 dev_dbg(dev
->parent
, "%s: failed to retrieve initial health\n",
1798 memcpy(&smart
, out_obj
->buffer
.pointer
, sizeof(smart
));
1801 if (smart
.flags
& ND_INTEL_SMART_SHUTDOWN_VALID
) {
1802 if (smart
.shutdown_state
)
1803 set_bit(NFIT_MEM_DIRTY
, &nfit_mem
->flags
);
1806 if (smart
.flags
& ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
) {
1807 set_bit(NFIT_MEM_DIRTY_COUNT
, &nfit_mem
->flags
);
1808 nfit_mem
->dirty_shutdown
= smart
.shutdown_count
;
1812 static void populate_shutdown_status(struct nfit_mem
*nfit_mem
)
1815 * For DIMMs that provide a dynamic facility to retrieve a
1816 * dirty-shutdown status and/or a dirty-shutdown count, cache
1817 * these values in nfit_mem.
1819 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
1820 nfit_intel_shutdown_status(nfit_mem
);
1823 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1824 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1826 struct acpi_device
*adev
, *adev_dimm
;
1827 struct device
*dev
= acpi_desc
->dev
;
1828 unsigned long dsm_mask
, label_mask
;
1832 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
1834 /* nfit test assumes 1:1 relationship between commands and dsms */
1835 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1836 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1838 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1839 sprintf(nfit_mem
->id
, "%04x-%02x-%04x-%08x",
1840 be16_to_cpu(dcr
->vendor_id
),
1841 dcr
->manufacturing_location
,
1842 be16_to_cpu(dcr
->manufacturing_date
),
1843 be32_to_cpu(dcr
->serial_number
));
1845 sprintf(nfit_mem
->id
, "%04x-%08x",
1846 be16_to_cpu(dcr
->vendor_id
),
1847 be32_to_cpu(dcr
->serial_number
));
1849 adev
= to_acpi_dev(acpi_desc
);
1851 /* unit test case */
1852 populate_shutdown_status(nfit_mem
);
1856 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1857 nfit_mem
->adev
= adev_dimm
;
1859 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1861 return force_enable_dimms
? 0 : -ENODEV
;
1864 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm
->handle
,
1865 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
, adev_dimm
))) {
1866 dev_err(dev
, "%s: notification registration failed\n",
1867 dev_name(&adev_dimm
->dev
));
1871 * Record nfit_mem for the notification path to track back to
1872 * the nfit sysfs attributes for this dimm device object.
1874 dev_set_drvdata(&adev_dimm
->dev
, nfit_mem
);
1877 * There are 4 "legacy" NVDIMM command sets
1878 * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
1879 * an EFI working group was established to constrain this
1880 * proliferation. The nfit driver probes for the supported command
1881 * set by GUID. Note, if you're a platform developer looking to add
1882 * a new command set to this probe, consider using an existing set,
1883 * or otherwise seek approval to publish the command set at
1884 * http://www.uefi.org/RFIC_LIST.
1886 * Note, that checking for function0 (bit0) tells us if any commands
1887 * are reachable through this GUID.
1889 for (i
= 0; i
<= NVDIMM_FAMILY_MAX
; i
++)
1890 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1891 if (family
< 0 || i
== default_dsm_family
)
1894 /* limit the supported commands to those that are publicly documented */
1895 nfit_mem
->family
= family
;
1896 if (override_dsm_mask
&& !disable_vendor_specific
)
1897 dsm_mask
= override_dsm_mask
;
1898 else if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1899 dsm_mask
= NVDIMM_INTEL_CMDMASK
;
1900 if (disable_vendor_specific
)
1901 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1902 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
) {
1903 dsm_mask
= 0x1c3c76;
1904 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1906 if (disable_vendor_specific
)
1907 dsm_mask
&= ~(1 << 8);
1908 } else if (nfit_mem
->family
== NVDIMM_FAMILY_MSFT
) {
1909 dsm_mask
= 0xffffffff;
1910 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HYPERV
) {
1913 dev_dbg(dev
, "unknown dimm command family\n");
1914 nfit_mem
->family
= -1;
1915 /* DSMs are optional, continue loading the driver... */
1920 * Function 0 is the command interrogation function, don't
1921 * export it to potential userspace use, and enable it to be
1922 * used as an error value in acpi_nfit_ctl().
1926 guid
= to_nfit_uuid(nfit_mem
->family
);
1927 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1928 if (acpi_check_dsm(adev_dimm
->handle
, guid
,
1929 nfit_dsm_revid(nfit_mem
->family
, i
),
1931 set_bit(i
, &nfit_mem
->dsm_mask
);
1934 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1935 * due to their better semantics handling locked capacity.
1937 label_mask
= 1 << ND_CMD_GET_CONFIG_SIZE
| 1 << ND_CMD_GET_CONFIG_DATA
1938 | 1 << ND_CMD_SET_CONFIG_DATA
;
1939 if (family
== NVDIMM_FAMILY_INTEL
1940 && (dsm_mask
& label_mask
) == label_mask
)
1941 /* skip _LS{I,R,W} enabling */;
1943 if (acpi_nvdimm_has_method(adev_dimm
, "_LSI")
1944 && acpi_nvdimm_has_method(adev_dimm
, "_LSR")) {
1945 dev_dbg(dev
, "%s: has _LSR\n", dev_name(&adev_dimm
->dev
));
1946 set_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
);
1949 if (test_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
)
1950 && acpi_nvdimm_has_method(adev_dimm
, "_LSW")) {
1951 dev_dbg(dev
, "%s: has _LSW\n", dev_name(&adev_dimm
->dev
));
1952 set_bit(NFIT_MEM_LSW
, &nfit_mem
->flags
);
1956 * Quirk read-only label configurations to preserve
1957 * access to label-less namespaces by default.
1959 if (!test_bit(NFIT_MEM_LSW
, &nfit_mem
->flags
)
1961 dev_dbg(dev
, "%s: No _LSW, disable labels\n",
1962 dev_name(&adev_dimm
->dev
));
1963 clear_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
);
1965 dev_dbg(dev
, "%s: Force enable labels\n",
1966 dev_name(&adev_dimm
->dev
));
1969 populate_shutdown_status(nfit_mem
);
1974 static void shutdown_dimm_notify(void *data
)
1976 struct acpi_nfit_desc
*acpi_desc
= data
;
1977 struct nfit_mem
*nfit_mem
;
1979 mutex_lock(&acpi_desc
->init_mutex
);
1981 * Clear out the nfit_mem->flags_attr and shut down dimm event
1984 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1985 struct acpi_device
*adev_dimm
= nfit_mem
->adev
;
1987 if (nfit_mem
->flags_attr
) {
1988 sysfs_put(nfit_mem
->flags_attr
);
1989 nfit_mem
->flags_attr
= NULL
;
1992 acpi_remove_notify_handler(adev_dimm
->handle
,
1993 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
);
1994 dev_set_drvdata(&adev_dimm
->dev
, NULL
);
1997 mutex_unlock(&acpi_desc
->init_mutex
);
2000 static const struct nvdimm_security_ops
*acpi_nfit_get_security_ops(int family
)
2003 case NVDIMM_FAMILY_INTEL
:
2004 return intel_security_ops
;
2010 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
2012 struct nfit_mem
*nfit_mem
;
2013 int dimm_count
= 0, rc
;
2014 struct nvdimm
*nvdimm
;
2016 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
2017 struct acpi_nfit_flush_address
*flush
;
2018 unsigned long flags
= 0, cmd_mask
;
2019 struct nfit_memdev
*nfit_memdev
;
2023 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
2024 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
2030 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
) {
2031 set_bit(NDD_ALIASING
, &flags
);
2032 set_bit(NDD_LABELING
, &flags
);
2035 /* collate flags across all memdevs for this dimm */
2036 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2037 struct acpi_nfit_memory_map
*dimm_memdev
;
2039 dimm_memdev
= __to_nfit_memdev(nfit_mem
);
2040 if (dimm_memdev
->device_handle
2041 != nfit_memdev
->memdev
->device_handle
)
2043 dimm_memdev
->flags
|= nfit_memdev
->memdev
->flags
;
2046 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
2047 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
2048 set_bit(NDD_UNARMED
, &flags
);
2050 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
2055 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
2056 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
2057 * userspace interface.
2059 cmd_mask
= 1UL << ND_CMD_CALL
;
2060 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
2062 * These commands have a 1:1 correspondence
2063 * between DSM payload and libnvdimm ioctl
2066 cmd_mask
|= nfit_mem
->dsm_mask
& NVDIMM_STANDARD_CMDMASK
;
2069 /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
2070 if (nfit_mem
->family
== NVDIMM_FAMILY_HYPERV
)
2071 set_bit(NDD_NOBLK
, &flags
);
2073 if (test_bit(NFIT_MEM_LSR
, &nfit_mem
->flags
)) {
2074 set_bit(ND_CMD_GET_CONFIG_SIZE
, &cmd_mask
);
2075 set_bit(ND_CMD_GET_CONFIG_DATA
, &cmd_mask
);
2077 if (test_bit(NFIT_MEM_LSW
, &nfit_mem
->flags
))
2078 set_bit(ND_CMD_SET_CONFIG_DATA
, &cmd_mask
);
2080 flush
= nfit_mem
->nfit_flush
? nfit_mem
->nfit_flush
->flush
2082 nvdimm
= __nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
2083 acpi_nfit_dimm_attribute_groups
,
2084 flags
, cmd_mask
, flush
? flush
->hint_count
: 0,
2085 nfit_mem
->flush_wpq
, &nfit_mem
->id
[0],
2086 acpi_nfit_get_security_ops(nfit_mem
->family
));
2090 nfit_mem
->nvdimm
= nvdimm
;
2093 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
2096 dev_err(acpi_desc
->dev
, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
2097 nvdimm_name(nvdimm
),
2098 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
2099 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
2100 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
2101 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "",
2102 mem_flags
& ACPI_NFIT_MEM_MAP_FAILED
? " map_fail" : "");
2106 rc
= nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
2111 * Now that dimms are successfully registered, and async registration
2112 * is flushed, attempt to enable event notification.
2114 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
2115 struct kernfs_node
*nfit_kernfs
;
2117 nvdimm
= nfit_mem
->nvdimm
;
2121 nfit_kernfs
= sysfs_get_dirent(nvdimm_kobj(nvdimm
)->sd
, "nfit");
2123 nfit_mem
->flags_attr
= sysfs_get_dirent(nfit_kernfs
,
2125 sysfs_put(nfit_kernfs
);
2126 if (!nfit_mem
->flags_attr
)
2127 dev_warn(acpi_desc
->dev
, "%s: notifications disabled\n",
2128 nvdimm_name(nvdimm
));
2131 return devm_add_action_or_reset(acpi_desc
->dev
, shutdown_dimm_notify
,
2136 * These constants are private because there are no kernel consumers of
2139 enum nfit_aux_cmds
{
2140 NFIT_CMD_TRANSLATE_SPA
= 5,
2141 NFIT_CMD_ARS_INJECT_SET
= 7,
2142 NFIT_CMD_ARS_INJECT_CLEAR
= 8,
2143 NFIT_CMD_ARS_INJECT_GET
= 9,
2146 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
2148 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2149 const guid_t
*guid
= to_nfit_uuid(NFIT_DEV_BUS
);
2150 struct acpi_device
*adev
;
2151 unsigned long dsm_mask
;
2154 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
2155 nd_desc
->bus_dsm_mask
= acpi_desc
->bus_nfit_cmd_force_en
;
2156 adev
= to_acpi_dev(acpi_desc
);
2160 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
2161 if (acpi_check_dsm(adev
->handle
, guid
, 1, 1ULL << i
))
2162 set_bit(i
, &nd_desc
->cmd_mask
);
2163 set_bit(ND_CMD_CALL
, &nd_desc
->cmd_mask
);
2166 (1 << ND_CMD_ARS_CAP
) |
2167 (1 << ND_CMD_ARS_START
) |
2168 (1 << ND_CMD_ARS_STATUS
) |
2169 (1 << ND_CMD_CLEAR_ERROR
) |
2170 (1 << NFIT_CMD_TRANSLATE_SPA
) |
2171 (1 << NFIT_CMD_ARS_INJECT_SET
) |
2172 (1 << NFIT_CMD_ARS_INJECT_CLEAR
) |
2173 (1 << NFIT_CMD_ARS_INJECT_GET
);
2174 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
2175 if (acpi_check_dsm(adev
->handle
, guid
, 1, 1ULL << i
))
2176 set_bit(i
, &nd_desc
->bus_dsm_mask
);
2179 static ssize_t
range_index_show(struct device
*dev
,
2180 struct device_attribute
*attr
, char *buf
)
2182 struct nd_region
*nd_region
= to_nd_region(dev
);
2183 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
2185 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
2187 static DEVICE_ATTR_RO(range_index
);
2189 static struct attribute
*acpi_nfit_region_attributes
[] = {
2190 &dev_attr_range_index
.attr
,
2194 static const struct attribute_group acpi_nfit_region_attribute_group
= {
2196 .attrs
= acpi_nfit_region_attributes
,
2199 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
2200 &acpi_nfit_region_attribute_group
,
2204 /* enough info to uniquely specify an interleave set */
2205 struct nfit_set_info
{
2206 struct nfit_set_info_map
{
2213 struct nfit_set_info2
{
2214 struct nfit_set_info_map2
{
2218 u16 manufacturing_date
;
2219 u8 manufacturing_location
;
2224 static size_t sizeof_nfit_set_info(int num_mappings
)
2226 return sizeof(struct nfit_set_info
)
2227 + num_mappings
* sizeof(struct nfit_set_info_map
);
2230 static size_t sizeof_nfit_set_info2(int num_mappings
)
2232 return sizeof(struct nfit_set_info2
)
2233 + num_mappings
* sizeof(struct nfit_set_info_map2
);
2236 static int cmp_map_compat(const void *m0
, const void *m1
)
2238 const struct nfit_set_info_map
*map0
= m0
;
2239 const struct nfit_set_info_map
*map1
= m1
;
2241 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
2245 static int cmp_map(const void *m0
, const void *m1
)
2247 const struct nfit_set_info_map
*map0
= m0
;
2248 const struct nfit_set_info_map
*map1
= m1
;
2250 if (map0
->region_offset
< map1
->region_offset
)
2252 else if (map0
->region_offset
> map1
->region_offset
)
2257 static int cmp_map2(const void *m0
, const void *m1
)
2259 const struct nfit_set_info_map2
*map0
= m0
;
2260 const struct nfit_set_info_map2
*map1
= m1
;
2262 if (map0
->region_offset
< map1
->region_offset
)
2264 else if (map0
->region_offset
> map1
->region_offset
)
2269 /* Retrieve the nth entry referencing this spa */
2270 static struct acpi_nfit_memory_map
*memdev_from_spa(
2271 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
2273 struct nfit_memdev
*nfit_memdev
;
2275 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
2276 if (nfit_memdev
->memdev
->range_index
== range_index
)
2278 return nfit_memdev
->memdev
;
2282 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
2283 struct nd_region_desc
*ndr_desc
,
2284 struct acpi_nfit_system_address
*spa
)
2286 struct device
*dev
= acpi_desc
->dev
;
2287 struct nd_interleave_set
*nd_set
;
2288 u16 nr
= ndr_desc
->num_mappings
;
2289 struct nfit_set_info2
*info2
;
2290 struct nfit_set_info
*info
;
2293 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
2296 guid_copy(&nd_set
->type_guid
, (guid_t
*) spa
->range_guid
);
2298 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
2302 info2
= devm_kzalloc(dev
, sizeof_nfit_set_info2(nr
), GFP_KERNEL
);
2306 for (i
= 0; i
< nr
; i
++) {
2307 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
2308 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
2309 struct nfit_set_info_map2
*map2
= &info2
->mapping
[i
];
2310 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
2311 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
2312 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
2313 spa
->range_index
, i
);
2314 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
2316 if (!memdev
|| !nfit_mem
->dcr
) {
2317 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
2321 map
->region_offset
= memdev
->region_offset
;
2322 map
->serial_number
= dcr
->serial_number
;
2324 map2
->region_offset
= memdev
->region_offset
;
2325 map2
->serial_number
= dcr
->serial_number
;
2326 map2
->vendor_id
= dcr
->vendor_id
;
2327 map2
->manufacturing_date
= dcr
->manufacturing_date
;
2328 map2
->manufacturing_location
= dcr
->manufacturing_location
;
2331 /* v1.1 namespaces */
2332 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
2334 nd_set
->cookie1
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
2336 /* v1.2 namespaces */
2337 sort(&info2
->mapping
[0], nr
, sizeof(struct nfit_set_info_map2
),
2339 nd_set
->cookie2
= nd_fletcher64(info2
, sizeof_nfit_set_info2(nr
), 0);
2341 /* support v1.1 namespaces created with the wrong sort order */
2342 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
2343 cmp_map_compat
, NULL
);
2344 nd_set
->altcookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
2346 /* record the result of the sort for the mapping position */
2347 for (i
= 0; i
< nr
; i
++) {
2348 struct nfit_set_info_map2
*map2
= &info2
->mapping
[i
];
2351 for (j
= 0; j
< nr
; j
++) {
2352 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[j
];
2353 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
2354 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
2355 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
2357 if (map2
->serial_number
== dcr
->serial_number
&&
2358 map2
->vendor_id
== dcr
->vendor_id
&&
2359 map2
->manufacturing_date
== dcr
->manufacturing_date
&&
2360 map2
->manufacturing_location
2361 == dcr
->manufacturing_location
) {
2362 mapping
->position
= i
;
2368 ndr_desc
->nd_set
= nd_set
;
2369 devm_kfree(dev
, info
);
2370 devm_kfree(dev
, info2
);
2375 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
2377 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
2378 u32 sub_line_offset
, line_index
, line_offset
;
2379 u64 line_no
, table_skip_count
, table_offset
;
2381 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
2382 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
2383 line_offset
= idt
->line_offset
[line_index
]
2385 table_offset
= table_skip_count
* mmio
->table_size
;
2387 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
2390 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
2392 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
2393 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
2394 const u32 STATUS_MASK
= 0x80000037;
2396 if (mmio
->num_lines
)
2397 offset
= to_interleave_offset(offset
, mmio
);
2399 return readl(mmio
->addr
.base
+ offset
) & STATUS_MASK
;
2402 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
2403 resource_size_t dpa
, unsigned int len
, unsigned int write
)
2406 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
2409 BCW_OFFSET_MASK
= (1ULL << 48)-1,
2411 BCW_LEN_MASK
= (1ULL << 8) - 1,
2415 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
2416 len
= len
>> L1_CACHE_SHIFT
;
2417 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
2418 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
2420 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
2421 if (mmio
->num_lines
)
2422 offset
= to_interleave_offset(offset
, mmio
);
2424 writeq(cmd
, mmio
->addr
.base
+ offset
);
2425 nvdimm_flush(nfit_blk
->nd_region
, NULL
);
2427 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
2428 readq(mmio
->addr
.base
+ offset
);
2431 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
2432 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
2435 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2436 unsigned int copied
= 0;
2440 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
2441 + lane
* mmio
->size
;
2442 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
2447 if (mmio
->num_lines
) {
2450 offset
= to_interleave_offset(base_offset
+ copied
,
2452 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
2453 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
2455 offset
= base_offset
+ nfit_blk
->bdw_offset
;
2460 memcpy_flushcache(mmio
->addr
.aperture
+ offset
, iobuf
+ copied
, c
);
2462 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
2463 arch_invalidate_pmem((void __force
*)
2464 mmio
->addr
.aperture
+ offset
, c
);
2466 memcpy(iobuf
+ copied
, mmio
->addr
.aperture
+ offset
, c
);
2474 nvdimm_flush(nfit_blk
->nd_region
, NULL
);
2476 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
2480 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
2481 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
2483 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
2484 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2485 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
2486 unsigned int lane
, copied
= 0;
2489 lane
= nd_region_acquire_lane(nd_region
);
2491 u64 c
= min(len
, mmio
->size
);
2493 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
2494 iobuf
+ copied
, c
, rw
, lane
);
2501 nd_region_release_lane(nd_region
, lane
);
2506 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
2507 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
2510 mmio
->num_lines
= idt
->line_count
;
2511 mmio
->line_size
= idt
->line_size
;
2512 if (interleave_ways
== 0)
2514 mmio
->table_size
= mmio
->num_lines
* interleave_ways
2521 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
2522 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
2524 struct nd_cmd_dimm_flags flags
;
2527 memset(&flags
, 0, sizeof(flags
));
2528 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
2529 sizeof(flags
), NULL
);
2531 if (rc
>= 0 && flags
.status
== 0)
2532 nfit_blk
->dimm_flags
= flags
.flags
;
2533 else if (rc
== -ENOTTY
) {
2534 /* fall back to a conservative default */
2535 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
2543 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
2546 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
2547 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
2548 struct nfit_blk_mmio
*mmio
;
2549 struct nfit_blk
*nfit_blk
;
2550 struct nfit_mem
*nfit_mem
;
2551 struct nvdimm
*nvdimm
;
2554 nvdimm
= nd_blk_region_to_dimm(ndbr
);
2555 nfit_mem
= nvdimm_provider_data(nvdimm
);
2556 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
2557 dev_dbg(dev
, "missing%s%s%s\n",
2558 nfit_mem
? "" : " nfit_mem",
2559 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
2560 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
2564 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
2567 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
2568 nfit_blk
->nd_region
= to_nd_region(dev
);
2570 /* map block aperture memory */
2571 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
2572 mmio
= &nfit_blk
->mmio
[BDW
];
2573 mmio
->addr
.base
= devm_nvdimm_memremap(dev
, nfit_mem
->spa_bdw
->address
,
2574 nfit_mem
->spa_bdw
->length
, nd_blk_memremap_flags(ndbr
));
2575 if (!mmio
->addr
.base
) {
2576 dev_dbg(dev
, "%s failed to map bdw\n",
2577 nvdimm_name(nvdimm
));
2580 mmio
->size
= nfit_mem
->bdw
->size
;
2581 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
2582 mmio
->idt
= nfit_mem
->idt_bdw
;
2583 mmio
->spa
= nfit_mem
->spa_bdw
;
2584 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
2585 nfit_mem
->memdev_bdw
->interleave_ways
);
2587 dev_dbg(dev
, "%s failed to init bdw interleave\n",
2588 nvdimm_name(nvdimm
));
2592 /* map block control memory */
2593 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
2594 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
2595 mmio
= &nfit_blk
->mmio
[DCR
];
2596 mmio
->addr
.base
= devm_nvdimm_ioremap(dev
, nfit_mem
->spa_dcr
->address
,
2597 nfit_mem
->spa_dcr
->length
);
2598 if (!mmio
->addr
.base
) {
2599 dev_dbg(dev
, "%s failed to map dcr\n",
2600 nvdimm_name(nvdimm
));
2603 mmio
->size
= nfit_mem
->dcr
->window_size
;
2604 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
2605 mmio
->idt
= nfit_mem
->idt_dcr
;
2606 mmio
->spa
= nfit_mem
->spa_dcr
;
2607 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
2608 nfit_mem
->memdev_dcr
->interleave_ways
);
2610 dev_dbg(dev
, "%s failed to init dcr interleave\n",
2611 nvdimm_name(nvdimm
));
2615 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
2617 dev_dbg(dev
, "%s failed get DIMM flags\n",
2618 nvdimm_name(nvdimm
));
2622 if (nvdimm_has_flush(nfit_blk
->nd_region
) < 0)
2623 dev_warn(dev
, "unable to guarantee persistence of writes\n");
2625 if (mmio
->line_size
== 0)
2628 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
2629 + 8 > mmio
->line_size
) {
2630 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
2632 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
2633 + 8 > mmio
->line_size
) {
2634 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
2641 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
2642 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
2644 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2645 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2648 cmd
->address
= spa
->address
;
2649 cmd
->length
= spa
->length
;
2650 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
2651 sizeof(*cmd
), &cmd_rc
);
2657 static int ars_start(struct acpi_nfit_desc
*acpi_desc
,
2658 struct nfit_spa
*nfit_spa
, enum nfit_ars_state req_type
)
2662 struct nd_cmd_ars_start ars_start
;
2663 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2664 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2666 memset(&ars_start
, 0, sizeof(ars_start
));
2667 ars_start
.address
= spa
->address
;
2668 ars_start
.length
= spa
->length
;
2669 if (req_type
== ARS_REQ_SHORT
)
2670 ars_start
.flags
= ND_ARS_RETURN_PREV_DATA
;
2671 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
2672 ars_start
.type
= ND_ARS_PERSISTENT
;
2673 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
2674 ars_start
.type
= ND_ARS_VOLATILE
;
2678 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
2679 sizeof(ars_start
), &cmd_rc
);
2685 set_bit(ARS_VALID
, &acpi_desc
->scrub_flags
);
2689 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
2692 struct nd_cmd_ars_start ars_start
;
2693 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2694 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2696 ars_start
= (struct nd_cmd_ars_start
) {
2697 .address
= ars_status
->restart_address
,
2698 .length
= ars_status
->restart_length
,
2699 .type
= ars_status
->type
,
2701 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
2702 sizeof(ars_start
), &cmd_rc
);
2708 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
2710 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2711 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2714 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
2715 acpi_desc
->max_ars
, &cmd_rc
);
2721 static void ars_complete(struct acpi_nfit_desc
*acpi_desc
,
2722 struct nfit_spa
*nfit_spa
)
2724 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2725 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2726 struct nd_region
*nd_region
= nfit_spa
->nd_region
;
2729 lockdep_assert_held(&acpi_desc
->init_mutex
);
2731 * Only advance the ARS state for ARS runs initiated by the
2732 * kernel, ignore ARS results from BIOS initiated runs for scrub
2733 * completion tracking.
2735 if (acpi_desc
->scrub_spa
!= nfit_spa
)
2738 if ((ars_status
->address
>= spa
->address
&& ars_status
->address
2739 < spa
->address
+ spa
->length
)
2740 || (ars_status
->address
< spa
->address
)) {
2742 * Assume that if a scrub starts at an offset from the
2743 * start of nfit_spa that we are in the continuation
2746 * Otherwise, if the scrub covers the spa range, mark
2747 * any pending request complete.
2749 if (ars_status
->address
+ ars_status
->length
2750 >= spa
->address
+ spa
->length
)
2757 acpi_desc
->scrub_spa
= NULL
;
2759 dev
= nd_region_dev(nd_region
);
2760 nvdimm_region_notify(nd_region
, NVDIMM_REVALIDATE_POISON
);
2762 dev
= acpi_desc
->dev
;
2763 dev_dbg(dev
, "ARS: range %d complete\n", spa
->range_index
);
2766 static int ars_status_process_records(struct acpi_nfit_desc
*acpi_desc
)
2768 struct nvdimm_bus
*nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2769 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2774 * First record starts at 44 byte offset from the start of the
2777 if (ars_status
->out_length
< 44)
2781 * Ignore potentially stale results that are only refreshed
2782 * after a start-ARS event.
2784 if (!test_and_clear_bit(ARS_VALID
, &acpi_desc
->scrub_flags
)) {
2785 dev_dbg(acpi_desc
->dev
, "skip %d stale records\n",
2786 ars_status
->num_records
);
2790 for (i
= 0; i
< ars_status
->num_records
; i
++) {
2791 /* only process full records */
2792 if (ars_status
->out_length
2793 < 44 + sizeof(struct nd_ars_record
) * (i
+ 1))
2795 rc
= nvdimm_bus_add_badrange(nvdimm_bus
,
2796 ars_status
->records
[i
].err_address
,
2797 ars_status
->records
[i
].length
);
2801 if (i
< ars_status
->num_records
)
2802 dev_warn(acpi_desc
->dev
, "detected truncated ars results\n");
2807 static void acpi_nfit_remove_resource(void *data
)
2809 struct resource
*res
= data
;
2811 remove_resource(res
);
2814 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
2815 struct nd_region_desc
*ndr_desc
)
2817 struct resource
*res
, *nd_res
= ndr_desc
->res
;
2820 /* No operation if the region is already registered as PMEM */
2821 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
2822 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
2823 if (is_pmem
== REGION_INTERSECTS
)
2826 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
2830 res
->name
= "Persistent Memory";
2831 res
->start
= nd_res
->start
;
2832 res
->end
= nd_res
->end
;
2833 res
->flags
= IORESOURCE_MEM
;
2834 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
2836 ret
= insert_resource(&iomem_resource
, res
);
2840 ret
= devm_add_action_or_reset(acpi_desc
->dev
,
2841 acpi_nfit_remove_resource
,
2849 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
2850 struct nd_mapping_desc
*mapping
, struct nd_region_desc
*ndr_desc
,
2851 struct acpi_nfit_memory_map
*memdev
,
2852 struct nfit_spa
*nfit_spa
)
2854 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
2855 memdev
->device_handle
);
2856 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2857 struct nd_blk_region_desc
*ndbr_desc
;
2858 struct nfit_mem
*nfit_mem
;
2862 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
2863 spa
->range_index
, memdev
->device_handle
);
2867 mapping
->nvdimm
= nvdimm
;
2868 switch (nfit_spa_type(spa
)) {
2870 case NFIT_SPA_VOLATILE
:
2871 mapping
->start
= memdev
->address
;
2872 mapping
->size
= memdev
->region_size
;
2875 nfit_mem
= nvdimm_provider_data(nvdimm
);
2876 if (!nfit_mem
|| !nfit_mem
->bdw
) {
2877 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
2878 spa
->range_index
, nvdimm_name(nvdimm
));
2882 mapping
->size
= nfit_mem
->bdw
->capacity
;
2883 mapping
->start
= nfit_mem
->bdw
->start_address
;
2884 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
2885 ndr_desc
->mapping
= mapping
;
2886 ndr_desc
->num_mappings
= 1;
2887 ndbr_desc
= to_blk_region_desc(ndr_desc
);
2888 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
2889 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
2890 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2893 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
2895 if (!nfit_spa
->nd_region
)
2903 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address
*spa
)
2905 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2906 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2907 nfit_spa_type(spa
) == NFIT_SPA_PDISK
||
2908 nfit_spa_type(spa
) == NFIT_SPA_PCD
);
2911 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address
*spa
)
2913 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2914 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2915 nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
);
2918 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
2919 struct nfit_spa
*nfit_spa
)
2921 static struct nd_mapping_desc mappings
[ND_MAX_MAPPINGS
];
2922 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2923 struct nd_blk_region_desc ndbr_desc
;
2924 struct nd_region_desc
*ndr_desc
;
2925 struct nfit_memdev
*nfit_memdev
;
2926 struct nvdimm_bus
*nvdimm_bus
;
2927 struct resource res
;
2930 if (nfit_spa
->nd_region
)
2933 if (spa
->range_index
== 0 && !nfit_spa_is_virtual(spa
)) {
2934 dev_dbg(acpi_desc
->dev
, "detected invalid spa index\n");
2938 memset(&res
, 0, sizeof(res
));
2939 memset(&mappings
, 0, sizeof(mappings
));
2940 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
2941 res
.start
= spa
->address
;
2942 res
.end
= res
.start
+ spa
->length
- 1;
2943 ndr_desc
= &ndbr_desc
.ndr_desc
;
2944 ndr_desc
->res
= &res
;
2945 ndr_desc
->provider_data
= nfit_spa
;
2946 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2947 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
) {
2948 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2949 spa
->proximity_domain
);
2950 ndr_desc
->target_node
= acpi_map_pxm_to_node(
2951 spa
->proximity_domain
);
2953 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2954 ndr_desc
->target_node
= NUMA_NO_NODE
;
2958 * Persistence domain bits are hierarchical, if
2959 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2960 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2962 if (acpi_desc
->platform_cap
& ACPI_NFIT_CAPABILITY_CACHE_FLUSH
)
2963 set_bit(ND_REGION_PERSIST_CACHE
, &ndr_desc
->flags
);
2964 else if (acpi_desc
->platform_cap
& ACPI_NFIT_CAPABILITY_MEM_FLUSH
)
2965 set_bit(ND_REGION_PERSIST_MEMCTRL
, &ndr_desc
->flags
);
2967 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2968 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2969 struct nd_mapping_desc
*mapping
;
2971 if (memdev
->range_index
!= spa
->range_index
)
2973 if (count
>= ND_MAX_MAPPINGS
) {
2974 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2975 spa
->range_index
, ND_MAX_MAPPINGS
);
2978 mapping
= &mappings
[count
++];
2979 rc
= acpi_nfit_init_mapping(acpi_desc
, mapping
, ndr_desc
,
2985 ndr_desc
->mapping
= mappings
;
2986 ndr_desc
->num_mappings
= count
;
2987 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2991 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2992 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2993 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2995 dev_warn(acpi_desc
->dev
,
2996 "failed to insert pmem resource to iomem: %d\n",
3001 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
3003 if (!nfit_spa
->nd_region
)
3005 } else if (nfit_spa_is_volatile(spa
)) {
3006 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
3008 if (!nfit_spa
->nd_region
)
3010 } else if (nfit_spa_is_virtual(spa
)) {
3011 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
3013 if (!nfit_spa
->nd_region
)
3019 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
3020 nfit_spa
->spa
->range_index
);
3024 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
)
3026 struct device
*dev
= acpi_desc
->dev
;
3027 struct nd_cmd_ars_status
*ars_status
;
3029 if (acpi_desc
->ars_status
) {
3030 memset(acpi_desc
->ars_status
, 0, acpi_desc
->max_ars
);
3034 ars_status
= devm_kzalloc(dev
, acpi_desc
->max_ars
, GFP_KERNEL
);
3037 acpi_desc
->ars_status
= ars_status
;
3041 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
)
3045 if (ars_status_alloc(acpi_desc
))
3048 rc
= ars_get_status(acpi_desc
);
3050 if (rc
< 0 && rc
!= -ENOSPC
)
3053 if (ars_status_process_records(acpi_desc
))
3054 dev_err(acpi_desc
->dev
, "Failed to process ARS records\n");
3059 static int ars_register(struct acpi_nfit_desc
*acpi_desc
,
3060 struct nfit_spa
*nfit_spa
)
3064 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
3065 return acpi_nfit_register_region(acpi_desc
, nfit_spa
);
3067 set_bit(ARS_REQ_SHORT
, &nfit_spa
->ars_state
);
3069 set_bit(ARS_REQ_LONG
, &nfit_spa
->ars_state
);
3071 switch (acpi_nfit_query_poison(acpi_desc
)) {
3075 rc
= ars_start(acpi_desc
, nfit_spa
, ARS_REQ_SHORT
);
3076 /* shouldn't happen, try again later */
3080 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3083 clear_bit(ARS_REQ_SHORT
, &nfit_spa
->ars_state
);
3084 rc
= acpi_nfit_query_poison(acpi_desc
);
3087 acpi_desc
->scrub_spa
= nfit_spa
;
3088 ars_complete(acpi_desc
, nfit_spa
);
3090 * If ars_complete() says we didn't complete the
3091 * short scrub, we'll try again with a long
3094 acpi_desc
->scrub_spa
= NULL
;
3099 * BIOS was using ARS, wait for it to complete (or
3100 * resources to become available) and then perform our
3105 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3109 return acpi_nfit_register_region(acpi_desc
, nfit_spa
);
3112 static void ars_complete_all(struct acpi_nfit_desc
*acpi_desc
)
3114 struct nfit_spa
*nfit_spa
;
3116 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3117 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
3119 ars_complete(acpi_desc
, nfit_spa
);
3123 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc
*acpi_desc
,
3126 unsigned int tmo
= acpi_desc
->scrub_tmo
;
3127 struct device
*dev
= acpi_desc
->dev
;
3128 struct nfit_spa
*nfit_spa
;
3130 lockdep_assert_held(&acpi_desc
->init_mutex
);
3132 if (test_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
))
3135 if (query_rc
== -EBUSY
) {
3136 dev_dbg(dev
, "ARS: ARS busy\n");
3137 return min(30U * 60U, tmo
* 2);
3139 if (query_rc
== -ENOSPC
) {
3140 dev_dbg(dev
, "ARS: ARS continue\n");
3141 ars_continue(acpi_desc
);
3144 if (query_rc
&& query_rc
!= -EAGAIN
) {
3145 unsigned long long addr
, end
;
3147 addr
= acpi_desc
->ars_status
->address
;
3148 end
= addr
+ acpi_desc
->ars_status
->length
;
3149 dev_dbg(dev
, "ARS: %llx-%llx failed (%d)\n", addr
, end
,
3153 ars_complete_all(acpi_desc
);
3154 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3155 enum nfit_ars_state req_type
;
3158 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
3161 /* prefer short ARS requests first */
3162 if (test_bit(ARS_REQ_SHORT
, &nfit_spa
->ars_state
))
3163 req_type
= ARS_REQ_SHORT
;
3164 else if (test_bit(ARS_REQ_LONG
, &nfit_spa
->ars_state
))
3165 req_type
= ARS_REQ_LONG
;
3168 rc
= ars_start(acpi_desc
, nfit_spa
, req_type
);
3170 dev
= nd_region_dev(nfit_spa
->nd_region
);
3171 dev_dbg(dev
, "ARS: range %d ARS start %s (%d)\n",
3172 nfit_spa
->spa
->range_index
,
3173 req_type
== ARS_REQ_SHORT
? "short" : "long",
3176 * Hmm, we raced someone else starting ARS? Try again in
3182 dev_WARN_ONCE(dev
, acpi_desc
->scrub_spa
,
3183 "scrub start while range %d active\n",
3184 acpi_desc
->scrub_spa
->spa
->range_index
);
3185 clear_bit(req_type
, &nfit_spa
->ars_state
);
3186 acpi_desc
->scrub_spa
= nfit_spa
;
3188 * Consider this spa last for future scrub
3191 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
3195 dev_err(dev
, "ARS: range %d ARS failed (%d)\n",
3196 nfit_spa
->spa
->range_index
, rc
);
3197 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3202 static void __sched_ars(struct acpi_nfit_desc
*acpi_desc
, unsigned int tmo
)
3204 lockdep_assert_held(&acpi_desc
->init_mutex
);
3206 set_bit(ARS_BUSY
, &acpi_desc
->scrub_flags
);
3207 /* note this should only be set from within the workqueue */
3209 acpi_desc
->scrub_tmo
= tmo
;
3210 queue_delayed_work(nfit_wq
, &acpi_desc
->dwork
, tmo
* HZ
);
3213 static void sched_ars(struct acpi_nfit_desc
*acpi_desc
)
3215 __sched_ars(acpi_desc
, 0);
3218 static void notify_ars_done(struct acpi_nfit_desc
*acpi_desc
)
3220 lockdep_assert_held(&acpi_desc
->init_mutex
);
3222 clear_bit(ARS_BUSY
, &acpi_desc
->scrub_flags
);
3223 acpi_desc
->scrub_count
++;
3224 if (acpi_desc
->scrub_count_state
)
3225 sysfs_notify_dirent(acpi_desc
->scrub_count_state
);
3228 static void acpi_nfit_scrub(struct work_struct
*work
)
3230 struct acpi_nfit_desc
*acpi_desc
;
3234 acpi_desc
= container_of(work
, typeof(*acpi_desc
), dwork
.work
);
3235 mutex_lock(&acpi_desc
->init_mutex
);
3236 query_rc
= acpi_nfit_query_poison(acpi_desc
);
3237 tmo
= __acpi_nfit_scrub(acpi_desc
, query_rc
);
3239 __sched_ars(acpi_desc
, tmo
);
3241 notify_ars_done(acpi_desc
);
3242 memset(acpi_desc
->ars_status
, 0, acpi_desc
->max_ars
);
3243 clear_bit(ARS_POLL
, &acpi_desc
->scrub_flags
);
3244 mutex_unlock(&acpi_desc
->init_mutex
);
3247 static void acpi_nfit_init_ars(struct acpi_nfit_desc
*acpi_desc
,
3248 struct nfit_spa
*nfit_spa
)
3250 int type
= nfit_spa_type(nfit_spa
->spa
);
3251 struct nd_cmd_ars_cap ars_cap
;
3254 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3255 memset(&ars_cap
, 0, sizeof(ars_cap
));
3256 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
3259 /* check that the supported scrub types match the spa type */
3260 if (type
== NFIT_SPA_VOLATILE
&& ((ars_cap
.status
>> 16)
3261 & ND_ARS_VOLATILE
) == 0)
3263 if (type
== NFIT_SPA_PM
&& ((ars_cap
.status
>> 16)
3264 & ND_ARS_PERSISTENT
) == 0)
3267 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
3268 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
3269 acpi_desc
->max_ars
= max(nfit_spa
->max_ars
, acpi_desc
->max_ars
);
3270 clear_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3273 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
3275 struct nfit_spa
*nfit_spa
;
3278 set_bit(ARS_VALID
, &acpi_desc
->scrub_flags
);
3279 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3280 switch (nfit_spa_type(nfit_spa
->spa
)) {
3281 case NFIT_SPA_VOLATILE
:
3283 acpi_nfit_init_ars(acpi_desc
, nfit_spa
);
3288 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
3289 switch (nfit_spa_type(nfit_spa
->spa
)) {
3290 case NFIT_SPA_VOLATILE
:
3292 /* register regions and kick off initial ARS run */
3293 rc
= ars_register(acpi_desc
, nfit_spa
);
3298 /* nothing to register */
3301 case NFIT_SPA_VDISK
:
3303 case NFIT_SPA_PDISK
:
3305 /* register known regions that don't support ARS */
3306 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
3311 /* don't register unknown regions */
3315 sched_ars(acpi_desc
);
3319 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
3320 struct nfit_table_prev
*prev
)
3322 struct device
*dev
= acpi_desc
->dev
;
3324 if (!list_empty(&prev
->spas
) ||
3325 !list_empty(&prev
->memdevs
) ||
3326 !list_empty(&prev
->dcrs
) ||
3327 !list_empty(&prev
->bdws
) ||
3328 !list_empty(&prev
->idts
) ||
3329 !list_empty(&prev
->flushes
)) {
3330 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
3336 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc
*acpi_desc
)
3338 struct device
*dev
= acpi_desc
->dev
;
3339 struct kernfs_node
*nfit
;
3340 struct device
*bus_dev
;
3342 if (!ars_supported(acpi_desc
->nvdimm_bus
))
3345 bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
3346 nfit
= sysfs_get_dirent(bus_dev
->kobj
.sd
, "nfit");
3348 dev_err(dev
, "sysfs_get_dirent 'nfit' failed\n");
3351 acpi_desc
->scrub_count_state
= sysfs_get_dirent(nfit
, "scrub");
3353 if (!acpi_desc
->scrub_count_state
) {
3354 dev_err(dev
, "sysfs_get_dirent 'scrub' failed\n");
3361 static void acpi_nfit_unregister(void *data
)
3363 struct acpi_nfit_desc
*acpi_desc
= data
;
3365 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
3368 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, void *data
, acpi_size sz
)
3370 struct device
*dev
= acpi_desc
->dev
;
3371 struct nfit_table_prev prev
;
3375 if (!acpi_desc
->nvdimm_bus
) {
3376 acpi_nfit_init_dsms(acpi_desc
);
3378 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
,
3379 &acpi_desc
->nd_desc
);
3380 if (!acpi_desc
->nvdimm_bus
)
3383 rc
= devm_add_action_or_reset(dev
, acpi_nfit_unregister
,
3388 rc
= acpi_nfit_desc_init_scrub_attr(acpi_desc
);
3392 /* register this acpi_desc for mce notifications */
3393 mutex_lock(&acpi_desc_lock
);
3394 list_add_tail(&acpi_desc
->list
, &acpi_descs
);
3395 mutex_unlock(&acpi_desc_lock
);
3398 mutex_lock(&acpi_desc
->init_mutex
);
3400 INIT_LIST_HEAD(&prev
.spas
);
3401 INIT_LIST_HEAD(&prev
.memdevs
);
3402 INIT_LIST_HEAD(&prev
.dcrs
);
3403 INIT_LIST_HEAD(&prev
.bdws
);
3404 INIT_LIST_HEAD(&prev
.idts
);
3405 INIT_LIST_HEAD(&prev
.flushes
);
3407 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
3408 acpi_desc
->spas
.prev
);
3409 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
3410 acpi_desc
->memdevs
.prev
);
3411 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
3412 acpi_desc
->dcrs
.prev
);
3413 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
3414 acpi_desc
->bdws
.prev
);
3415 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
3416 acpi_desc
->idts
.prev
);
3417 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
3418 acpi_desc
->flushes
.prev
);
3421 while (!IS_ERR_OR_NULL(data
))
3422 data
= add_table(acpi_desc
, &prev
, data
, end
);
3425 dev_dbg(dev
, "nfit table parsing error: %ld\n", PTR_ERR(data
));
3430 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
3434 rc
= nfit_mem_init(acpi_desc
);
3438 rc
= acpi_nfit_register_dimms(acpi_desc
);
3442 rc
= acpi_nfit_register_regions(acpi_desc
);
3445 mutex_unlock(&acpi_desc
->init_mutex
);
3448 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
3450 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
3452 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
3453 struct device
*dev
= acpi_desc
->dev
;
3455 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3456 nfit_device_lock(dev
);
3457 nfit_device_unlock(dev
);
3459 /* Bounce the init_mutex to complete initial registration */
3460 mutex_lock(&acpi_desc
->init_mutex
);
3461 mutex_unlock(&acpi_desc
->init_mutex
);
3466 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
3467 struct nvdimm
*nvdimm
, unsigned int cmd
)
3469 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
3473 if (cmd
!= ND_CMD_ARS_START
)
3477 * The kernel and userspace may race to initiate a scrub, but
3478 * the scrub thread is prepared to lose that initial race. It
3479 * just needs guarantees that any ARS it initiates are not
3480 * interrupted by any intervening start requests from userspace.
3482 if (work_busy(&acpi_desc
->dwork
.work
))
3488 /* prevent security commands from being issued via ioctl */
3489 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
3490 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
)
3492 struct nd_cmd_pkg
*call_pkg
= buf
;
3495 if (nvdimm
&& cmd
== ND_CMD_CALL
&&
3496 call_pkg
->nd_family
== NVDIMM_FAMILY_INTEL
) {
3497 func
= call_pkg
->nd_command
;
3498 if (func
> NVDIMM_CMD_MAX
||
3499 (1 << func
) & NVDIMM_INTEL_SECURITY_CMDMASK
)
3503 return __acpi_nfit_clear_to_send(nd_desc
, nvdimm
, cmd
);
3506 int acpi_nfit_ars_rescan(struct acpi_nfit_desc
*acpi_desc
,
3507 enum nfit_ars_state req_type
)
3509 struct device
*dev
= acpi_desc
->dev
;
3510 int scheduled
= 0, busy
= 0;
3511 struct nfit_spa
*nfit_spa
;
3513 mutex_lock(&acpi_desc
->init_mutex
);
3514 if (test_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
)) {
3515 mutex_unlock(&acpi_desc
->init_mutex
);
3519 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3520 int type
= nfit_spa_type(nfit_spa
->spa
);
3522 if (type
!= NFIT_SPA_PM
&& type
!= NFIT_SPA_VOLATILE
)
3524 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
3527 if (test_and_set_bit(req_type
, &nfit_spa
->ars_state
))
3533 sched_ars(acpi_desc
);
3534 dev_dbg(dev
, "ars_scan triggered\n");
3536 mutex_unlock(&acpi_desc
->init_mutex
);
3545 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
3547 struct nvdimm_bus_descriptor
*nd_desc
;
3549 dev_set_drvdata(dev
, acpi_desc
);
3550 acpi_desc
->dev
= dev
;
3551 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
3552 nd_desc
= &acpi_desc
->nd_desc
;
3553 nd_desc
->provider_name
= "ACPI.NFIT";
3554 nd_desc
->module
= THIS_MODULE
;
3555 nd_desc
->ndctl
= acpi_nfit_ctl
;
3556 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
3557 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
3558 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
3560 INIT_LIST_HEAD(&acpi_desc
->spas
);
3561 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
3562 INIT_LIST_HEAD(&acpi_desc
->bdws
);
3563 INIT_LIST_HEAD(&acpi_desc
->idts
);
3564 INIT_LIST_HEAD(&acpi_desc
->flushes
);
3565 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
3566 INIT_LIST_HEAD(&acpi_desc
->dimms
);
3567 INIT_LIST_HEAD(&acpi_desc
->list
);
3568 mutex_init(&acpi_desc
->init_mutex
);
3569 acpi_desc
->scrub_tmo
= 1;
3570 INIT_DELAYED_WORK(&acpi_desc
->dwork
, acpi_nfit_scrub
);
3572 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
3574 static void acpi_nfit_put_table(void *table
)
3576 acpi_put_table(table
);
3579 void acpi_nfit_shutdown(void *data
)
3581 struct acpi_nfit_desc
*acpi_desc
= data
;
3582 struct device
*bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
3585 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3588 mutex_lock(&acpi_desc_lock
);
3589 list_del(&acpi_desc
->list
);
3590 mutex_unlock(&acpi_desc_lock
);
3592 mutex_lock(&acpi_desc
->init_mutex
);
3593 set_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
);
3594 cancel_delayed_work_sync(&acpi_desc
->dwork
);
3595 mutex_unlock(&acpi_desc
->init_mutex
);
3598 * Bounce the nvdimm bus lock to make sure any in-flight
3599 * acpi_nfit_ars_rescan() submissions have had a chance to
3600 * either submit or see ->cancel set.
3602 nfit_device_lock(bus_dev
);
3603 nfit_device_unlock(bus_dev
);
3605 flush_workqueue(nfit_wq
);
3607 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown
);
3609 static int acpi_nfit_add(struct acpi_device
*adev
)
3611 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3612 struct acpi_nfit_desc
*acpi_desc
;
3613 struct device
*dev
= &adev
->dev
;
3614 struct acpi_table_header
*tbl
;
3615 acpi_status status
= AE_OK
;
3619 status
= acpi_get_table(ACPI_SIG_NFIT
, 0, &tbl
);
3620 if (ACPI_FAILURE(status
)) {
3621 /* The NVDIMM root device allows OS to trigger enumeration of
3622 * NVDIMMs through NFIT at boot time and re-enumeration at
3623 * root level via the _FIT method during runtime.
3624 * This is ok to return 0 here, we could have an nvdimm
3625 * hotplugged later and evaluate _FIT method which returns
3626 * data in the format of a series of NFIT Structures.
3628 dev_dbg(dev
, "failed to find NFIT at startup\n");
3632 rc
= devm_add_action_or_reset(dev
, acpi_nfit_put_table
, tbl
);
3637 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
3640 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
3642 /* Save the acpi header for exporting the revision via sysfs */
3643 acpi_desc
->acpi_header
= *tbl
;
3645 /* Evaluate _FIT and override with that if present */
3646 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
3647 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
3648 union acpi_object
*obj
= buf
.pointer
;
3650 if (obj
->type
== ACPI_TYPE_BUFFER
)
3651 rc
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
3652 obj
->buffer
.length
);
3654 dev_dbg(dev
, "invalid type %d, ignoring _FIT\n",
3658 /* skip over the lead-in header table */
3659 rc
= acpi_nfit_init(acpi_desc
, (void *) tbl
3660 + sizeof(struct acpi_table_nfit
),
3661 sz
- sizeof(struct acpi_table_nfit
));
3665 return devm_add_action_or_reset(dev
, acpi_nfit_shutdown
, acpi_desc
);
3668 static int acpi_nfit_remove(struct acpi_device
*adev
)
3670 /* see acpi_nfit_unregister */
3674 static void acpi_nfit_update_notify(struct device
*dev
, acpi_handle handle
)
3676 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
3677 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3678 union acpi_object
*obj
;
3683 /* dev->driver may be null if we're being removed */
3684 dev_dbg(dev
, "no driver found for dev\n");
3689 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
3692 acpi_nfit_desc_init(acpi_desc
, dev
);
3695 * Finish previous registration before considering new
3698 flush_workqueue(nfit_wq
);
3702 status
= acpi_evaluate_object(handle
, "_FIT", NULL
, &buf
);
3703 if (ACPI_FAILURE(status
)) {
3704 dev_err(dev
, "failed to evaluate _FIT\n");
3709 if (obj
->type
== ACPI_TYPE_BUFFER
) {
3710 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
3711 obj
->buffer
.length
);
3713 dev_err(dev
, "failed to merge updated NFIT\n");
3715 dev_err(dev
, "Invalid _FIT\n");
3719 static void acpi_nfit_uc_error_notify(struct device
*dev
, acpi_handle handle
)
3721 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
3723 if (acpi_desc
->scrub_mode
== HW_ERROR_SCRUB_ON
)
3724 acpi_nfit_ars_rescan(acpi_desc
, ARS_REQ_LONG
);
3726 acpi_nfit_ars_rescan(acpi_desc
, ARS_REQ_SHORT
);
3729 void __acpi_nfit_notify(struct device
*dev
, acpi_handle handle
, u32 event
)
3731 dev_dbg(dev
, "event: 0x%x\n", event
);
3734 case NFIT_NOTIFY_UPDATE
:
3735 return acpi_nfit_update_notify(dev
, handle
);
3736 case NFIT_NOTIFY_UC_MEMORY_ERROR
:
3737 return acpi_nfit_uc_error_notify(dev
, handle
);
3742 EXPORT_SYMBOL_GPL(__acpi_nfit_notify
);
3744 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
3746 nfit_device_lock(&adev
->dev
);
3747 __acpi_nfit_notify(&adev
->dev
, adev
->handle
, event
);
3748 nfit_device_unlock(&adev
->dev
);
3751 static const struct acpi_device_id acpi_nfit_ids
[] = {
3755 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
3757 static struct acpi_driver acpi_nfit_driver
= {
3758 .name
= KBUILD_MODNAME
,
3759 .ids
= acpi_nfit_ids
,
3761 .add
= acpi_nfit_add
,
3762 .remove
= acpi_nfit_remove
,
3763 .notify
= acpi_nfit_notify
,
3767 static __init
int nfit_init(void)
3771 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
3772 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
3773 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
3774 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
3775 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
3776 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
3777 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
3778 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities
) != 16);
3780 guid_parse(UUID_VOLATILE_MEMORY
, &nfit_uuid
[NFIT_SPA_VOLATILE
]);
3781 guid_parse(UUID_PERSISTENT_MEMORY
, &nfit_uuid
[NFIT_SPA_PM
]);
3782 guid_parse(UUID_CONTROL_REGION
, &nfit_uuid
[NFIT_SPA_DCR
]);
3783 guid_parse(UUID_DATA_REGION
, &nfit_uuid
[NFIT_SPA_BDW
]);
3784 guid_parse(UUID_VOLATILE_VIRTUAL_DISK
, &nfit_uuid
[NFIT_SPA_VDISK
]);
3785 guid_parse(UUID_VOLATILE_VIRTUAL_CD
, &nfit_uuid
[NFIT_SPA_VCD
]);
3786 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK
, &nfit_uuid
[NFIT_SPA_PDISK
]);
3787 guid_parse(UUID_PERSISTENT_VIRTUAL_CD
, &nfit_uuid
[NFIT_SPA_PCD
]);
3788 guid_parse(UUID_NFIT_BUS
, &nfit_uuid
[NFIT_DEV_BUS
]);
3789 guid_parse(UUID_NFIT_DIMM
, &nfit_uuid
[NFIT_DEV_DIMM
]);
3790 guid_parse(UUID_NFIT_DIMM_N_HPE1
, &nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
3791 guid_parse(UUID_NFIT_DIMM_N_HPE2
, &nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
3792 guid_parse(UUID_NFIT_DIMM_N_MSFT
, &nfit_uuid
[NFIT_DEV_DIMM_N_MSFT
]);
3793 guid_parse(UUID_NFIT_DIMM_N_HYPERV
, &nfit_uuid
[NFIT_DEV_DIMM_N_HYPERV
]);
3795 nfit_wq
= create_singlethread_workqueue("nfit");
3799 nfit_mce_register();
3800 ret
= acpi_bus_register_driver(&acpi_nfit_driver
);
3802 nfit_mce_unregister();
3803 destroy_workqueue(nfit_wq
);
3810 static __exit
void nfit_exit(void)
3812 nfit_mce_unregister();
3813 acpi_bus_unregister_driver(&acpi_nfit_driver
);
3814 destroy_workqueue(nfit_wq
);
3815 WARN_ON(!list_empty(&acpi_descs
));
3818 module_init(nfit_init
);
3819 module_exit(nfit_exit
);
3820 MODULE_LICENSE("GPL v2");
3821 MODULE_AUTHOR("Intel Corporation");