2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
25 #include <asm/cacheflush.h>
29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
32 #include <linux/io-64-nonatomic-hi-lo.h>
34 static bool force_enable_dimms
;
35 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
36 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
38 static unsigned int scrub_timeout
= NFIT_ARS_TIMEOUT
;
39 module_param(scrub_timeout
, uint
, S_IRUGO
|S_IWUSR
);
40 MODULE_PARM_DESC(scrub_timeout
, "Initial scrub timeout in seconds");
42 /* after three payloads of overflow, it's dead jim */
43 static unsigned int scrub_overflow_abort
= 3;
44 module_param(scrub_overflow_abort
, uint
, S_IRUGO
|S_IWUSR
);
45 MODULE_PARM_DESC(scrub_overflow_abort
,
46 "Number of times we overflow ARS results before abort");
48 static bool disable_vendor_specific
;
49 module_param(disable_vendor_specific
, bool, S_IRUGO
);
50 MODULE_PARM_DESC(disable_vendor_specific
,
51 "Limit commands to the publicly specified set");
53 static unsigned long override_dsm_mask
;
54 module_param(override_dsm_mask
, ulong
, S_IRUGO
);
55 MODULE_PARM_DESC(override_dsm_mask
, "Bitmask of allowed NVDIMM DSM functions");
57 static int default_dsm_family
= -1;
58 module_param(default_dsm_family
, int, S_IRUGO
);
59 MODULE_PARM_DESC(default_dsm_family
,
60 "Try this DSM type first when identifying NVDIMM family");
62 LIST_HEAD(acpi_descs
);
63 DEFINE_MUTEX(acpi_desc_lock
);
65 static struct workqueue_struct
*nfit_wq
;
67 struct nfit_table_prev
{
68 struct list_head spas
;
69 struct list_head memdevs
;
70 struct list_head dcrs
;
71 struct list_head bdws
;
72 struct list_head idts
;
73 struct list_head flushes
;
76 static guid_t nfit_uuid
[NFIT_UUID_MAX
];
78 const guid_t
*to_nfit_uuid(enum nfit_uuids id
)
80 return &nfit_uuid
[id
];
82 EXPORT_SYMBOL(to_nfit_uuid
);
84 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
85 struct nvdimm_bus_descriptor
*nd_desc
)
87 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
90 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
92 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
95 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
98 if (!nd_desc
->provider_name
99 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
102 return to_acpi_device(acpi_desc
->dev
);
105 static int xlat_bus_status(void *buf
, unsigned int cmd
, u32 status
)
107 struct nd_cmd_clear_error
*clear_err
;
108 struct nd_cmd_ars_status
*ars_status
;
113 if ((status
& 0xffff) == NFIT_ARS_CAP_NONE
)
120 /* No supported scan types for this range */
121 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
122 if ((status
>> 16 & flags
) == 0)
125 case ND_CMD_ARS_START
:
126 /* ARS is in progress */
127 if ((status
& 0xffff) == NFIT_ARS_START_BUSY
)
134 case ND_CMD_ARS_STATUS
:
139 /* Check extended status (Upper two bytes) */
140 if (status
== NFIT_ARS_STATUS_DONE
)
143 /* ARS is in progress */
144 if (status
== NFIT_ARS_STATUS_BUSY
)
147 /* No ARS performed for the current boot */
148 if (status
== NFIT_ARS_STATUS_NONE
)
152 * ARS interrupted, either we overflowed or some other
153 * agent wants the scan to stop. If we didn't overflow
154 * then just continue with the returned results.
156 if (status
== NFIT_ARS_STATUS_INTR
) {
157 if (ars_status
->out_length
>= 40 && (ars_status
->flags
158 & NFIT_ARS_F_OVERFLOW
))
167 case ND_CMD_CLEAR_ERROR
:
171 if (!clear_err
->cleared
)
173 if (clear_err
->length
> clear_err
->cleared
)
174 return clear_err
->cleared
;
180 /* all other non-zero status results in an error */
186 #define ACPI_LABELS_LOCKED 3
188 static int xlat_nvdimm_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
191 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
194 case ND_CMD_GET_CONFIG_SIZE
:
196 * In the _LSI, _LSR, _LSW case the locked status is
197 * communicated via the read/write commands
199 if (nfit_mem
->has_lsi
)
202 if (status
>> 16 & ND_CONFIG_LOCKED
)
205 case ND_CMD_GET_CONFIG_DATA
:
206 if (nfit_mem
->has_lsr
&& status
== ACPI_LABELS_LOCKED
)
209 case ND_CMD_SET_CONFIG_DATA
:
210 if (nfit_mem
->has_lsw
&& status
== ACPI_LABELS_LOCKED
)
217 /* all other non-zero status results in an error */
223 static int xlat_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
227 return xlat_bus_status(buf
, cmd
, status
);
228 return xlat_nvdimm_status(nvdimm
, buf
, cmd
, status
);
231 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
232 static union acpi_object
*pkg_to_buf(union acpi_object
*pkg
)
237 union acpi_object
*buf
= NULL
;
239 if (pkg
->type
!= ACPI_TYPE_PACKAGE
) {
240 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
245 for (i
= 0; i
< pkg
->package
.count
; i
++) {
246 union acpi_object
*obj
= &pkg
->package
.elements
[i
];
248 if (obj
->type
== ACPI_TYPE_INTEGER
)
250 else if (obj
->type
== ACPI_TYPE_BUFFER
)
251 size
+= obj
->buffer
.length
;
253 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
259 buf
= ACPI_ALLOCATE(sizeof(*buf
) + size
);
264 buf
->type
= ACPI_TYPE_BUFFER
;
265 buf
->buffer
.length
= size
;
266 buf
->buffer
.pointer
= dst
;
267 for (i
= 0; i
< pkg
->package
.count
; i
++) {
268 union acpi_object
*obj
= &pkg
->package
.elements
[i
];
270 if (obj
->type
== ACPI_TYPE_INTEGER
) {
271 memcpy(dst
, &obj
->integer
.value
, 4);
273 } else if (obj
->type
== ACPI_TYPE_BUFFER
) {
274 memcpy(dst
, obj
->buffer
.pointer
, obj
->buffer
.length
);
275 dst
+= obj
->buffer
.length
;
283 static union acpi_object
*int_to_buf(union acpi_object
*integer
)
285 union acpi_object
*buf
= ACPI_ALLOCATE(sizeof(*buf
) + 4);
291 if (integer
->type
!= ACPI_TYPE_INTEGER
) {
292 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
298 buf
->type
= ACPI_TYPE_BUFFER
;
299 buf
->buffer
.length
= 4;
300 buf
->buffer
.pointer
= dst
;
301 memcpy(dst
, &integer
->integer
.value
, 4);
307 static union acpi_object
*acpi_label_write(acpi_handle handle
, u32 offset
,
311 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
312 struct acpi_object_list input
= {
314 .pointer
= (union acpi_object
[]) {
316 .integer
.type
= ACPI_TYPE_INTEGER
,
317 .integer
.value
= offset
,
320 .integer
.type
= ACPI_TYPE_INTEGER
,
321 .integer
.value
= len
,
324 .buffer
.type
= ACPI_TYPE_BUFFER
,
325 .buffer
.pointer
= data
,
326 .buffer
.length
= len
,
331 rc
= acpi_evaluate_object(handle
, "_LSW", &input
, &buf
);
332 if (ACPI_FAILURE(rc
))
334 return int_to_buf(buf
.pointer
);
337 static union acpi_object
*acpi_label_read(acpi_handle handle
, u32 offset
,
341 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
342 struct acpi_object_list input
= {
344 .pointer
= (union acpi_object
[]) {
346 .integer
.type
= ACPI_TYPE_INTEGER
,
347 .integer
.value
= offset
,
350 .integer
.type
= ACPI_TYPE_INTEGER
,
351 .integer
.value
= len
,
356 rc
= acpi_evaluate_object(handle
, "_LSR", &input
, &buf
);
357 if (ACPI_FAILURE(rc
))
359 return pkg_to_buf(buf
.pointer
);
362 static union acpi_object
*acpi_label_info(acpi_handle handle
)
365 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
367 rc
= acpi_evaluate_object(handle
, "_LSI", NULL
, &buf
);
368 if (ACPI_FAILURE(rc
))
370 return pkg_to_buf(buf
.pointer
);
373 static u8
nfit_dsm_revid(unsigned family
, unsigned func
)
375 static const u8 revid_table
[NVDIMM_FAMILY_MAX
+1][32] = {
376 [NVDIMM_FAMILY_INTEL
] = {
377 [NVDIMM_INTEL_GET_MODES
] = 2,
378 [NVDIMM_INTEL_GET_FWINFO
] = 2,
379 [NVDIMM_INTEL_START_FWUPDATE
] = 2,
380 [NVDIMM_INTEL_SEND_FWUPDATE
] = 2,
381 [NVDIMM_INTEL_FINISH_FWUPDATE
] = 2,
382 [NVDIMM_INTEL_QUERY_FWUPDATE
] = 2,
383 [NVDIMM_INTEL_SET_THRESHOLD
] = 2,
384 [NVDIMM_INTEL_INJECT_ERROR
] = 2,
389 if (family
> NVDIMM_FAMILY_MAX
)
393 id
= revid_table
[family
][func
];
395 return 1; /* default */
399 int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
, struct nvdimm
*nvdimm
,
400 unsigned int cmd
, void *buf
, unsigned int buf_len
, int *cmd_rc
)
402 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
403 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
404 union acpi_object in_obj
, in_buf
, *out_obj
;
405 const struct nd_cmd_desc
*desc
= NULL
;
406 struct device
*dev
= acpi_desc
->dev
;
407 struct nd_cmd_pkg
*call_pkg
= NULL
;
408 const char *cmd_name
, *dimm_name
;
409 unsigned long cmd_mask
, dsm_mask
;
410 u32 offset
, fw_status
= 0;
417 if (cmd
== ND_CMD_CALL
) {
419 func
= call_pkg
->nd_command
;
421 for (i
= 0; i
< ARRAY_SIZE(call_pkg
->nd_reserved2
); i
++)
422 if (call_pkg
->nd_reserved2
[i
])
427 struct acpi_device
*adev
= nfit_mem
->adev
;
431 if (call_pkg
&& nfit_mem
->family
!= call_pkg
->nd_family
)
434 dimm_name
= nvdimm_name(nvdimm
);
435 cmd_name
= nvdimm_cmd_name(cmd
);
436 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
437 dsm_mask
= nfit_mem
->dsm_mask
;
438 desc
= nd_cmd_dimm_desc(cmd
);
439 guid
= to_nfit_uuid(nfit_mem
->family
);
440 handle
= adev
->handle
;
442 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
444 cmd_name
= nvdimm_bus_cmd_name(cmd
);
445 cmd_mask
= nd_desc
->cmd_mask
;
447 if (cmd
== ND_CMD_CALL
)
448 dsm_mask
= nd_desc
->bus_dsm_mask
;
449 desc
= nd_cmd_bus_desc(cmd
);
450 guid
= to_nfit_uuid(NFIT_DEV_BUS
);
451 handle
= adev
->handle
;
455 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
458 if (!test_bit(cmd
, &cmd_mask
) || !test_bit(func
, &dsm_mask
))
461 in_obj
.type
= ACPI_TYPE_PACKAGE
;
462 in_obj
.package
.count
= 1;
463 in_obj
.package
.elements
= &in_buf
;
464 in_buf
.type
= ACPI_TYPE_BUFFER
;
465 in_buf
.buffer
.pointer
= buf
;
466 in_buf
.buffer
.length
= 0;
468 /* libnvdimm has already validated the input envelope */
469 for (i
= 0; i
< desc
->in_num
; i
++)
470 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
474 /* skip over package wrapper */
475 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
476 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
479 dev_dbg(dev
, "%s:%s cmd: %d: func: %d input length: %d\n",
480 __func__
, dimm_name
, cmd
, func
, in_buf
.buffer
.length
);
481 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
482 in_buf
.buffer
.pointer
,
483 min_t(u32
, 256, in_buf
.buffer
.length
), true);
485 /* call the BIOS, prefer the named methods over _DSM if available */
486 if (nvdimm
&& cmd
== ND_CMD_GET_CONFIG_SIZE
&& nfit_mem
->has_lsi
)
487 out_obj
= acpi_label_info(handle
);
488 else if (nvdimm
&& cmd
== ND_CMD_GET_CONFIG_DATA
&& nfit_mem
->has_lsr
) {
489 struct nd_cmd_get_config_data_hdr
*p
= buf
;
491 out_obj
= acpi_label_read(handle
, p
->in_offset
, p
->in_length
);
492 } else if (nvdimm
&& cmd
== ND_CMD_SET_CONFIG_DATA
493 && nfit_mem
->has_lsw
) {
494 struct nd_cmd_set_config_hdr
*p
= buf
;
496 out_obj
= acpi_label_write(handle
, p
->in_offset
, p
->in_length
,
502 revid
= nfit_dsm_revid(nfit_mem
->family
, func
);
505 out_obj
= acpi_evaluate_dsm(handle
, guid
, revid
, func
, &in_obj
);
509 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
515 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
516 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
517 out_obj
->buffer
.pointer
,
518 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
522 * Need to support FW function w/o known size in advance.
523 * Caller can determine required size based upon nd_fw_size.
524 * If we return an error (like elsewhere) then caller wouldn't
525 * be able to rely upon data returned to make calculation.
530 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
531 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
532 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
537 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
, dimm_name
,
538 cmd_name
, out_obj
->buffer
.length
);
539 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4, 4,
540 out_obj
->buffer
.pointer
,
541 min_t(u32
, 128, out_obj
->buffer
.length
), true);
543 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
544 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
545 (u32
*) out_obj
->buffer
.pointer
,
546 out_obj
->buffer
.length
- offset
);
548 if (offset
+ out_size
> out_obj
->buffer
.length
) {
549 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
550 __func__
, dimm_name
, cmd_name
, i
);
554 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
555 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
556 __func__
, dimm_name
, cmd_name
, i
);
560 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
561 out_obj
->buffer
.pointer
+ offset
, out_size
);
566 * Set fw_status for all the commands with a known format to be
567 * later interpreted by xlat_status().
569 if (i
>= 1 && ((!nvdimm
&& cmd
>= ND_CMD_ARS_CAP
570 && cmd
<= ND_CMD_CLEAR_ERROR
)
571 || (nvdimm
&& cmd
>= ND_CMD_SMART
572 && cmd
<= ND_CMD_VENDOR
)))
573 fw_status
= *(u32
*) out_obj
->buffer
.pointer
;
575 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
578 * status valid, return the number of bytes left
579 * unfilled in the output buffer
581 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
583 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
,
586 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
587 __func__
, dimm_name
, cmd_name
, buf_len
,
594 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
, fw_status
);
602 EXPORT_SYMBOL_GPL(acpi_nfit_ctl
);
604 static const char *spa_type_name(u16 type
)
606 static const char *to_name
[] = {
607 [NFIT_SPA_VOLATILE
] = "volatile",
608 [NFIT_SPA_PM
] = "pmem",
609 [NFIT_SPA_DCR
] = "dimm-control-region",
610 [NFIT_SPA_BDW
] = "block-data-window",
611 [NFIT_SPA_VDISK
] = "volatile-disk",
612 [NFIT_SPA_VCD
] = "volatile-cd",
613 [NFIT_SPA_PDISK
] = "persistent-disk",
614 [NFIT_SPA_PCD
] = "persistent-cd",
618 if (type
> NFIT_SPA_PCD
)
621 return to_name
[type
];
624 int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
628 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
629 if (guid_equal(to_nfit_uuid(i
), (guid_t
*)&spa
->range_guid
))
634 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
635 struct nfit_table_prev
*prev
,
636 struct acpi_nfit_system_address
*spa
)
638 struct device
*dev
= acpi_desc
->dev
;
639 struct nfit_spa
*nfit_spa
;
641 if (spa
->header
.length
!= sizeof(*spa
))
644 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
645 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
646 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
651 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
) + sizeof(*spa
),
655 INIT_LIST_HEAD(&nfit_spa
->list
);
656 memcpy(nfit_spa
->spa
, spa
, sizeof(*spa
));
657 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
658 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
660 spa_type_name(nfit_spa_type(spa
)));
664 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
665 struct nfit_table_prev
*prev
,
666 struct acpi_nfit_memory_map
*memdev
)
668 struct device
*dev
= acpi_desc
->dev
;
669 struct nfit_memdev
*nfit_memdev
;
671 if (memdev
->header
.length
!= sizeof(*memdev
))
674 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
675 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
676 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
680 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
) + sizeof(*memdev
),
684 INIT_LIST_HEAD(&nfit_memdev
->list
);
685 memcpy(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
));
686 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
687 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
688 __func__
, memdev
->device_handle
, memdev
->range_index
,
689 memdev
->region_index
, memdev
->flags
);
694 * An implementation may provide a truncated control region if no block windows
697 static size_t sizeof_dcr(struct acpi_nfit_control_region
*dcr
)
699 if (dcr
->header
.length
< offsetof(struct acpi_nfit_control_region
,
704 return offsetof(struct acpi_nfit_control_region
, window_size
);
707 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
708 struct nfit_table_prev
*prev
,
709 struct acpi_nfit_control_region
*dcr
)
711 struct device
*dev
= acpi_desc
->dev
;
712 struct nfit_dcr
*nfit_dcr
;
714 if (!sizeof_dcr(dcr
))
717 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
718 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
)) == 0) {
719 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
723 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
) + sizeof(*dcr
),
727 INIT_LIST_HEAD(&nfit_dcr
->list
);
728 memcpy(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
));
729 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
730 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
731 dcr
->region_index
, dcr
->windows
);
735 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
736 struct nfit_table_prev
*prev
,
737 struct acpi_nfit_data_region
*bdw
)
739 struct device
*dev
= acpi_desc
->dev
;
740 struct nfit_bdw
*nfit_bdw
;
742 if (bdw
->header
.length
!= sizeof(*bdw
))
744 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
745 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
746 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
750 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
) + sizeof(*bdw
),
754 INIT_LIST_HEAD(&nfit_bdw
->list
);
755 memcpy(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
));
756 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
757 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
758 bdw
->region_index
, bdw
->windows
);
762 static size_t sizeof_idt(struct acpi_nfit_interleave
*idt
)
764 if (idt
->header
.length
< sizeof(*idt
))
766 return sizeof(*idt
) + sizeof(u32
) * (idt
->line_count
- 1);
769 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
770 struct nfit_table_prev
*prev
,
771 struct acpi_nfit_interleave
*idt
)
773 struct device
*dev
= acpi_desc
->dev
;
774 struct nfit_idt
*nfit_idt
;
776 if (!sizeof_idt(idt
))
779 list_for_each_entry(nfit_idt
, &prev
->idts
, list
) {
780 if (sizeof_idt(nfit_idt
->idt
) != sizeof_idt(idt
))
783 if (memcmp(nfit_idt
->idt
, idt
, sizeof_idt(idt
)) == 0) {
784 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
789 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
) + sizeof_idt(idt
),
793 INIT_LIST_HEAD(&nfit_idt
->list
);
794 memcpy(nfit_idt
->idt
, idt
, sizeof_idt(idt
));
795 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
796 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
797 idt
->interleave_index
, idt
->line_count
);
801 static size_t sizeof_flush(struct acpi_nfit_flush_address
*flush
)
803 if (flush
->header
.length
< sizeof(*flush
))
805 return sizeof(*flush
) + sizeof(u64
) * (flush
->hint_count
- 1);
808 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
809 struct nfit_table_prev
*prev
,
810 struct acpi_nfit_flush_address
*flush
)
812 struct device
*dev
= acpi_desc
->dev
;
813 struct nfit_flush
*nfit_flush
;
815 if (!sizeof_flush(flush
))
818 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
) {
819 if (sizeof_flush(nfit_flush
->flush
) != sizeof_flush(flush
))
822 if (memcmp(nfit_flush
->flush
, flush
,
823 sizeof_flush(flush
)) == 0) {
824 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
829 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
)
830 + sizeof_flush(flush
), GFP_KERNEL
);
833 INIT_LIST_HEAD(&nfit_flush
->list
);
834 memcpy(nfit_flush
->flush
, flush
, sizeof_flush(flush
));
835 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
836 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
837 flush
->device_handle
, flush
->hint_count
);
841 static bool add_platform_cap(struct acpi_nfit_desc
*acpi_desc
,
842 struct acpi_nfit_capabilities
*pcap
)
844 struct device
*dev
= acpi_desc
->dev
;
847 mask
= (1 << (pcap
->highest_capability
+ 1)) - 1;
848 acpi_desc
->platform_cap
= pcap
->capabilities
& mask
;
849 dev_dbg(dev
, "%s: cap: %#x\n", __func__
, acpi_desc
->platform_cap
);
853 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
854 struct nfit_table_prev
*prev
, void *table
, const void *end
)
856 struct device
*dev
= acpi_desc
->dev
;
857 struct acpi_nfit_header
*hdr
;
858 void *err
= ERR_PTR(-ENOMEM
);
865 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
871 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
872 if (!add_spa(acpi_desc
, prev
, table
))
875 case ACPI_NFIT_TYPE_MEMORY_MAP
:
876 if (!add_memdev(acpi_desc
, prev
, table
))
879 case ACPI_NFIT_TYPE_CONTROL_REGION
:
880 if (!add_dcr(acpi_desc
, prev
, table
))
883 case ACPI_NFIT_TYPE_DATA_REGION
:
884 if (!add_bdw(acpi_desc
, prev
, table
))
887 case ACPI_NFIT_TYPE_INTERLEAVE
:
888 if (!add_idt(acpi_desc
, prev
, table
))
891 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
892 if (!add_flush(acpi_desc
, prev
, table
))
895 case ACPI_NFIT_TYPE_SMBIOS
:
896 dev_dbg(dev
, "%s: smbios\n", __func__
);
898 case ACPI_NFIT_TYPE_CAPABILITIES
:
899 if (!add_platform_cap(acpi_desc
, table
))
903 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
907 return table
+ hdr
->length
;
910 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
911 struct nfit_mem
*nfit_mem
)
913 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
914 u16 dcr
= nfit_mem
->dcr
->region_index
;
915 struct nfit_spa
*nfit_spa
;
917 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
918 u16 range_index
= nfit_spa
->spa
->range_index
;
919 int type
= nfit_spa_type(nfit_spa
->spa
);
920 struct nfit_memdev
*nfit_memdev
;
922 if (type
!= NFIT_SPA_BDW
)
925 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
926 if (nfit_memdev
->memdev
->range_index
!= range_index
)
928 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
930 if (nfit_memdev
->memdev
->region_index
!= dcr
)
933 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
938 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
939 nfit_mem
->spa_dcr
->range_index
);
940 nfit_mem
->bdw
= NULL
;
943 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
944 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
946 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
947 struct nfit_memdev
*nfit_memdev
;
948 struct nfit_bdw
*nfit_bdw
;
949 struct nfit_idt
*nfit_idt
;
950 u16 idt_idx
, range_index
;
952 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
953 if (nfit_bdw
->bdw
->region_index
!= dcr
)
955 nfit_mem
->bdw
= nfit_bdw
->bdw
;
962 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
964 if (!nfit_mem
->spa_bdw
)
967 range_index
= nfit_mem
->spa_bdw
->range_index
;
968 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
969 if (nfit_memdev
->memdev
->range_index
!= range_index
||
970 nfit_memdev
->memdev
->region_index
!= dcr
)
972 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
973 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
974 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
975 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
977 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
984 static int __nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
,
985 struct acpi_nfit_system_address
*spa
)
987 struct nfit_mem
*nfit_mem
, *found
;
988 struct nfit_memdev
*nfit_memdev
;
989 int type
= spa
? nfit_spa_type(spa
) : 0;
1001 * This loop runs in two modes, when a dimm is mapped the loop
1002 * adds memdev associations to an existing dimm, or creates a
1003 * dimm. In the unmapped dimm case this loop sweeps for memdev
1004 * instances with an invalid / zero range_index and adds those
1005 * dimms without spa associations.
1007 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1008 struct nfit_flush
*nfit_flush
;
1009 struct nfit_dcr
*nfit_dcr
;
1013 if (spa
&& nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
1015 if (!spa
&& nfit_memdev
->memdev
->range_index
)
1018 dcr
= nfit_memdev
->memdev
->region_index
;
1019 device_handle
= nfit_memdev
->memdev
->device_handle
;
1020 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1021 if (__to_nfit_memdev(nfit_mem
)->device_handle
1030 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
1031 sizeof(*nfit_mem
), GFP_KERNEL
);
1034 INIT_LIST_HEAD(&nfit_mem
->list
);
1035 nfit_mem
->acpi_desc
= acpi_desc
;
1036 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
1039 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1040 if (nfit_dcr
->dcr
->region_index
!= dcr
)
1043 * Record the control region for the dimm. For
1044 * the ACPI 6.1 case, where there are separate
1045 * control regions for the pmem vs blk
1046 * interfaces, be sure to record the extended
1050 nfit_mem
->dcr
= nfit_dcr
->dcr
;
1051 else if (nfit_mem
->dcr
->windows
== 0
1052 && nfit_dcr
->dcr
->windows
)
1053 nfit_mem
->dcr
= nfit_dcr
->dcr
;
1057 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
1058 struct acpi_nfit_flush_address
*flush
;
1061 if (nfit_flush
->flush
->device_handle
!= device_handle
)
1063 nfit_mem
->nfit_flush
= nfit_flush
;
1064 flush
= nfit_flush
->flush
;
1065 nfit_mem
->flush_wpq
= devm_kzalloc(acpi_desc
->dev
,
1067 * sizeof(struct resource
), GFP_KERNEL
);
1068 if (!nfit_mem
->flush_wpq
)
1070 for (i
= 0; i
< flush
->hint_count
; i
++) {
1071 struct resource
*res
= &nfit_mem
->flush_wpq
[i
];
1073 res
->start
= flush
->hint_address
[i
];
1074 res
->end
= res
->start
+ 8 - 1;
1079 if (dcr
&& !nfit_mem
->dcr
) {
1080 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
1081 spa
->range_index
, dcr
);
1085 if (type
== NFIT_SPA_DCR
) {
1086 struct nfit_idt
*nfit_idt
;
1089 /* multiple dimms may share a SPA when interleaved */
1090 nfit_mem
->spa_dcr
= spa
;
1091 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
1092 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
1093 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
1094 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
1096 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
1099 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
1100 } else if (type
== NFIT_SPA_PM
) {
1102 * A single dimm may belong to multiple SPA-PM
1103 * ranges, record at least one in addition to
1104 * any SPA-DCR range.
1106 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
1108 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
1114 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
1116 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
1117 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
1118 u32 handleA
, handleB
;
1120 handleA
= __to_nfit_memdev(a
)->device_handle
;
1121 handleB
= __to_nfit_memdev(b
)->device_handle
;
1122 if (handleA
< handleB
)
1124 else if (handleA
> handleB
)
1129 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
1131 struct nfit_spa
*nfit_spa
;
1136 * For each SPA-DCR or SPA-PMEM address range find its
1137 * corresponding MEMDEV(s). From each MEMDEV find the
1138 * corresponding DCR. Then, if we're operating on a SPA-DCR,
1139 * try to find a SPA-BDW and a corresponding BDW that references
1140 * the DCR. Throw it all into an nfit_mem object. Note, that
1141 * BDWs are optional.
1143 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
1144 rc
= __nfit_mem_init(acpi_desc
, nfit_spa
->spa
);
1150 * If a DIMM has failed to be mapped into SPA there will be no
1151 * SPA entries above. Find and register all the unmapped DIMMs
1152 * for reporting and recovery purposes.
1154 rc
= __nfit_mem_init(acpi_desc
, NULL
);
1158 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
1163 static ssize_t
bus_dsm_mask_show(struct device
*dev
,
1164 struct device_attribute
*attr
, char *buf
)
1166 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1167 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1169 return sprintf(buf
, "%#lx\n", nd_desc
->bus_dsm_mask
);
1171 static struct device_attribute dev_attr_bus_dsm_mask
=
1172 __ATTR(dsm_mask
, 0444, bus_dsm_mask_show
, NULL
);
1174 static ssize_t
revision_show(struct device
*dev
,
1175 struct device_attribute
*attr
, char *buf
)
1177 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1178 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1179 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1181 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
1183 static DEVICE_ATTR_RO(revision
);
1185 static ssize_t
hw_error_scrub_show(struct device
*dev
,
1186 struct device_attribute
*attr
, char *buf
)
1188 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1189 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1190 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1192 return sprintf(buf
, "%d\n", acpi_desc
->scrub_mode
);
1196 * The 'hw_error_scrub' attribute can have the following values written to it:
1197 * '0': Switch to the default mode where an exception will only insert
1198 * the address of the memory error into the poison and badblocks lists.
1199 * '1': Enable a full scrub to happen if an exception for a memory error is
1202 static ssize_t
hw_error_scrub_store(struct device
*dev
,
1203 struct device_attribute
*attr
, const char *buf
, size_t size
)
1205 struct nvdimm_bus_descriptor
*nd_desc
;
1209 rc
= kstrtol(buf
, 0, &val
);
1214 nd_desc
= dev_get_drvdata(dev
);
1216 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1219 case HW_ERROR_SCRUB_ON
:
1220 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_ON
;
1222 case HW_ERROR_SCRUB_OFF
:
1223 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_OFF
;
1235 static DEVICE_ATTR_RW(hw_error_scrub
);
1238 * This shows the number of full Address Range Scrubs that have been
1239 * completed since driver load time. Userspace can wait on this using
1240 * select/poll etc. A '+' at the end indicates an ARS is in progress
1242 static ssize_t
scrub_show(struct device
*dev
,
1243 struct device_attribute
*attr
, char *buf
)
1245 struct nvdimm_bus_descriptor
*nd_desc
;
1246 ssize_t rc
= -ENXIO
;
1249 nd_desc
= dev_get_drvdata(dev
);
1251 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1253 rc
= sprintf(buf
, "%d%s", acpi_desc
->scrub_count
,
1254 (work_busy(&acpi_desc
->work
)) ? "+\n" : "\n");
1260 static ssize_t
scrub_store(struct device
*dev
,
1261 struct device_attribute
*attr
, const char *buf
, size_t size
)
1263 struct nvdimm_bus_descriptor
*nd_desc
;
1267 rc
= kstrtol(buf
, 0, &val
);
1274 nd_desc
= dev_get_drvdata(dev
);
1276 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1278 rc
= acpi_nfit_ars_rescan(acpi_desc
, 0);
1285 static DEVICE_ATTR_RW(scrub
);
1287 static bool ars_supported(struct nvdimm_bus
*nvdimm_bus
)
1289 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1290 const unsigned long mask
= 1 << ND_CMD_ARS_CAP
| 1 << ND_CMD_ARS_START
1291 | 1 << ND_CMD_ARS_STATUS
;
1293 return (nd_desc
->cmd_mask
& mask
) == mask
;
1296 static umode_t
nfit_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
1298 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1299 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1301 if (a
== &dev_attr_scrub
.attr
&& !ars_supported(nvdimm_bus
))
1306 static struct attribute
*acpi_nfit_attributes
[] = {
1307 &dev_attr_revision
.attr
,
1308 &dev_attr_scrub
.attr
,
1309 &dev_attr_hw_error_scrub
.attr
,
1310 &dev_attr_bus_dsm_mask
.attr
,
1314 static const struct attribute_group acpi_nfit_attribute_group
= {
1316 .attrs
= acpi_nfit_attributes
,
1317 .is_visible
= nfit_visible
,
1320 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
1321 &nvdimm_bus_attribute_group
,
1322 &acpi_nfit_attribute_group
,
1326 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
1328 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1329 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1331 return __to_nfit_memdev(nfit_mem
);
1334 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
1336 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1337 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1339 return nfit_mem
->dcr
;
1342 static ssize_t
handle_show(struct device
*dev
,
1343 struct device_attribute
*attr
, char *buf
)
1345 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1347 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
1349 static DEVICE_ATTR_RO(handle
);
1351 static ssize_t
phys_id_show(struct device
*dev
,
1352 struct device_attribute
*attr
, char *buf
)
1354 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1356 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
1358 static DEVICE_ATTR_RO(phys_id
);
1360 static ssize_t
vendor_show(struct device
*dev
,
1361 struct device_attribute
*attr
, char *buf
)
1363 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1365 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
1367 static DEVICE_ATTR_RO(vendor
);
1369 static ssize_t
rev_id_show(struct device
*dev
,
1370 struct device_attribute
*attr
, char *buf
)
1372 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1374 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
1376 static DEVICE_ATTR_RO(rev_id
);
1378 static ssize_t
device_show(struct device
*dev
,
1379 struct device_attribute
*attr
, char *buf
)
1381 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1383 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
1385 static DEVICE_ATTR_RO(device
);
1387 static ssize_t
subsystem_vendor_show(struct device
*dev
,
1388 struct device_attribute
*attr
, char *buf
)
1390 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1392 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
1394 static DEVICE_ATTR_RO(subsystem_vendor
);
1396 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
1397 struct device_attribute
*attr
, char *buf
)
1399 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1401 return sprintf(buf
, "0x%04x\n",
1402 be16_to_cpu(dcr
->subsystem_revision_id
));
1404 static DEVICE_ATTR_RO(subsystem_rev_id
);
1406 static ssize_t
subsystem_device_show(struct device
*dev
,
1407 struct device_attribute
*attr
, char *buf
)
1409 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1411 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
1413 static DEVICE_ATTR_RO(subsystem_device
);
1415 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
1417 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1420 if (nfit_mem
->memdev_pmem
)
1422 if (nfit_mem
->memdev_bdw
)
1427 static ssize_t
format_show(struct device
*dev
,
1428 struct device_attribute
*attr
, char *buf
)
1430 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1432 return sprintf(buf
, "0x%04x\n", le16_to_cpu(dcr
->code
));
1434 static DEVICE_ATTR_RO(format
);
1436 static ssize_t
format1_show(struct device
*dev
,
1437 struct device_attribute
*attr
, char *buf
)
1440 ssize_t rc
= -ENXIO
;
1441 struct nfit_mem
*nfit_mem
;
1442 struct nfit_memdev
*nfit_memdev
;
1443 struct acpi_nfit_desc
*acpi_desc
;
1444 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1445 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1447 nfit_mem
= nvdimm_provider_data(nvdimm
);
1448 acpi_desc
= nfit_mem
->acpi_desc
;
1449 handle
= to_nfit_memdev(dev
)->device_handle
;
1451 /* assumes DIMMs have at most 2 published interface codes */
1452 mutex_lock(&acpi_desc
->init_mutex
);
1453 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1454 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1455 struct nfit_dcr
*nfit_dcr
;
1457 if (memdev
->device_handle
!= handle
)
1460 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1461 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
1463 if (nfit_dcr
->dcr
->code
== dcr
->code
)
1465 rc
= sprintf(buf
, "0x%04x\n",
1466 le16_to_cpu(nfit_dcr
->dcr
->code
));
1472 mutex_unlock(&acpi_desc
->init_mutex
);
1475 static DEVICE_ATTR_RO(format1
);
1477 static ssize_t
formats_show(struct device
*dev
,
1478 struct device_attribute
*attr
, char *buf
)
1480 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1482 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
1484 static DEVICE_ATTR_RO(formats
);
1486 static ssize_t
serial_show(struct device
*dev
,
1487 struct device_attribute
*attr
, char *buf
)
1489 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1491 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
1493 static DEVICE_ATTR_RO(serial
);
1495 static ssize_t
family_show(struct device
*dev
,
1496 struct device_attribute
*attr
, char *buf
)
1498 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1499 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1501 if (nfit_mem
->family
< 0)
1503 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1505 static DEVICE_ATTR_RO(family
);
1507 static ssize_t
dsm_mask_show(struct device
*dev
,
1508 struct device_attribute
*attr
, char *buf
)
1510 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1511 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1513 if (nfit_mem
->family
< 0)
1515 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1517 static DEVICE_ATTR_RO(dsm_mask
);
1519 static ssize_t
flags_show(struct device
*dev
,
1520 struct device_attribute
*attr
, char *buf
)
1522 u16 flags
= to_nfit_memdev(dev
)->flags
;
1524 return sprintf(buf
, "%s%s%s%s%s%s%s\n",
1525 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1526 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1527 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1528 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1529 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "",
1530 flags
& ACPI_NFIT_MEM_MAP_FAILED
? "map_fail " : "",
1531 flags
& ACPI_NFIT_MEM_HEALTH_ENABLED
? "smart_notify " : "");
1533 static DEVICE_ATTR_RO(flags
);
1535 static ssize_t
id_show(struct device
*dev
,
1536 struct device_attribute
*attr
, char *buf
)
1538 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1540 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1541 return sprintf(buf
, "%04x-%02x-%04x-%08x\n",
1542 be16_to_cpu(dcr
->vendor_id
),
1543 dcr
->manufacturing_location
,
1544 be16_to_cpu(dcr
->manufacturing_date
),
1545 be32_to_cpu(dcr
->serial_number
));
1547 return sprintf(buf
, "%04x-%08x\n",
1548 be16_to_cpu(dcr
->vendor_id
),
1549 be32_to_cpu(dcr
->serial_number
));
1551 static DEVICE_ATTR_RO(id
);
1553 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1554 &dev_attr_handle
.attr
,
1555 &dev_attr_phys_id
.attr
,
1556 &dev_attr_vendor
.attr
,
1557 &dev_attr_device
.attr
,
1558 &dev_attr_rev_id
.attr
,
1559 &dev_attr_subsystem_vendor
.attr
,
1560 &dev_attr_subsystem_device
.attr
,
1561 &dev_attr_subsystem_rev_id
.attr
,
1562 &dev_attr_format
.attr
,
1563 &dev_attr_formats
.attr
,
1564 &dev_attr_format1
.attr
,
1565 &dev_attr_serial
.attr
,
1566 &dev_attr_flags
.attr
,
1568 &dev_attr_family
.attr
,
1569 &dev_attr_dsm_mask
.attr
,
1573 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1574 struct attribute
*a
, int n
)
1576 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1577 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1579 if (!to_nfit_dcr(dev
)) {
1580 /* Without a dcr only the memdev attributes can be surfaced */
1581 if (a
== &dev_attr_handle
.attr
|| a
== &dev_attr_phys_id
.attr
1582 || a
== &dev_attr_flags
.attr
1583 || a
== &dev_attr_family
.attr
1584 || a
== &dev_attr_dsm_mask
.attr
)
1589 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1594 static const struct attribute_group acpi_nfit_dimm_attribute_group
= {
1596 .attrs
= acpi_nfit_dimm_attributes
,
1597 .is_visible
= acpi_nfit_dimm_attr_visible
,
1600 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1601 &nvdimm_attribute_group
,
1602 &nd_device_attribute_group
,
1603 &acpi_nfit_dimm_attribute_group
,
1607 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1610 struct nfit_mem
*nfit_mem
;
1612 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1613 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1614 return nfit_mem
->nvdimm
;
1619 void __acpi_nvdimm_notify(struct device
*dev
, u32 event
)
1621 struct nfit_mem
*nfit_mem
;
1622 struct acpi_nfit_desc
*acpi_desc
;
1624 dev_dbg(dev
->parent
, "%s: %s: event: %d\n", dev_name(dev
), __func__
,
1627 if (event
!= NFIT_NOTIFY_DIMM_HEALTH
) {
1628 dev_dbg(dev
->parent
, "%s: unknown event: %d\n", dev_name(dev
),
1633 acpi_desc
= dev_get_drvdata(dev
->parent
);
1638 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1641 nfit_mem
= dev_get_drvdata(dev
);
1642 if (nfit_mem
&& nfit_mem
->flags_attr
)
1643 sysfs_notify_dirent(nfit_mem
->flags_attr
);
1645 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify
);
1647 static void acpi_nvdimm_notify(acpi_handle handle
, u32 event
, void *data
)
1649 struct acpi_device
*adev
= data
;
1650 struct device
*dev
= &adev
->dev
;
1652 device_lock(dev
->parent
);
1653 __acpi_nvdimm_notify(dev
, event
);
1654 device_unlock(dev
->parent
);
1657 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1658 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1660 struct acpi_device
*adev
, *adev_dimm
;
1661 struct device
*dev
= acpi_desc
->dev
;
1662 union acpi_object
*obj
;
1663 unsigned long dsm_mask
;
1668 /* nfit test assumes 1:1 relationship between commands and dsms */
1669 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1670 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1671 adev
= to_acpi_dev(acpi_desc
);
1675 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1676 nfit_mem
->adev
= adev_dimm
;
1678 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1680 return force_enable_dimms
? 0 : -ENODEV
;
1683 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm
->handle
,
1684 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
, adev_dimm
))) {
1685 dev_err(dev
, "%s: notification registration failed\n",
1686 dev_name(&adev_dimm
->dev
));
1690 * Record nfit_mem for the notification path to track back to
1691 * the nfit sysfs attributes for this dimm device object.
1693 dev_set_drvdata(&adev_dimm
->dev
, nfit_mem
);
1696 * Until standardization materializes we need to consider 4
1697 * different command sets. Note, that checking for function0 (bit0)
1698 * tells us if any commands are reachable through this GUID.
1700 for (i
= 0; i
<= NVDIMM_FAMILY_MAX
; i
++)
1701 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1702 if (family
< 0 || i
== default_dsm_family
)
1705 /* limit the supported commands to those that are publicly documented */
1706 nfit_mem
->family
= family
;
1707 if (override_dsm_mask
&& !disable_vendor_specific
)
1708 dsm_mask
= override_dsm_mask
;
1709 else if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1710 dsm_mask
= NVDIMM_INTEL_CMDMASK
;
1711 if (disable_vendor_specific
)
1712 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1713 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
) {
1714 dsm_mask
= 0x1c3c76;
1715 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1717 if (disable_vendor_specific
)
1718 dsm_mask
&= ~(1 << 8);
1719 } else if (nfit_mem
->family
== NVDIMM_FAMILY_MSFT
) {
1720 dsm_mask
= 0xffffffff;
1722 dev_dbg(dev
, "unknown dimm command family\n");
1723 nfit_mem
->family
= -1;
1724 /* DSMs are optional, continue loading the driver... */
1728 guid
= to_nfit_uuid(nfit_mem
->family
);
1729 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1730 if (acpi_check_dsm(adev_dimm
->handle
, guid
,
1731 nfit_dsm_revid(nfit_mem
->family
, i
),
1733 set_bit(i
, &nfit_mem
->dsm_mask
);
1735 obj
= acpi_label_info(adev_dimm
->handle
);
1738 nfit_mem
->has_lsi
= 1;
1739 dev_dbg(dev
, "%s: has _LSI\n", dev_name(&adev_dimm
->dev
));
1742 obj
= acpi_label_read(adev_dimm
->handle
, 0, 0);
1745 nfit_mem
->has_lsr
= 1;
1746 dev_dbg(dev
, "%s: has _LSR\n", dev_name(&adev_dimm
->dev
));
1749 obj
= acpi_label_write(adev_dimm
->handle
, 0, 0, NULL
);
1752 nfit_mem
->has_lsw
= 1;
1753 dev_dbg(dev
, "%s: has _LSW\n", dev_name(&adev_dimm
->dev
));
1759 static void shutdown_dimm_notify(void *data
)
1761 struct acpi_nfit_desc
*acpi_desc
= data
;
1762 struct nfit_mem
*nfit_mem
;
1764 mutex_lock(&acpi_desc
->init_mutex
);
1766 * Clear out the nfit_mem->flags_attr and shut down dimm event
1769 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1770 struct acpi_device
*adev_dimm
= nfit_mem
->adev
;
1772 if (nfit_mem
->flags_attr
) {
1773 sysfs_put(nfit_mem
->flags_attr
);
1774 nfit_mem
->flags_attr
= NULL
;
1777 acpi_remove_notify_handler(adev_dimm
->handle
,
1778 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
);
1779 dev_set_drvdata(&adev_dimm
->dev
, NULL
);
1782 mutex_unlock(&acpi_desc
->init_mutex
);
1785 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
1787 struct nfit_mem
*nfit_mem
;
1788 int dimm_count
= 0, rc
;
1789 struct nvdimm
*nvdimm
;
1791 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1792 struct acpi_nfit_flush_address
*flush
;
1793 unsigned long flags
= 0, cmd_mask
;
1794 struct nfit_memdev
*nfit_memdev
;
1798 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
1799 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
1805 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
1806 set_bit(NDD_ALIASING
, &flags
);
1808 /* collate flags across all memdevs for this dimm */
1809 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1810 struct acpi_nfit_memory_map
*dimm_memdev
;
1812 dimm_memdev
= __to_nfit_memdev(nfit_mem
);
1813 if (dimm_memdev
->device_handle
1814 != nfit_memdev
->memdev
->device_handle
)
1816 dimm_memdev
->flags
|= nfit_memdev
->memdev
->flags
;
1819 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1820 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
1821 set_bit(NDD_UNARMED
, &flags
);
1823 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
1828 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1829 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1830 * userspace interface.
1832 cmd_mask
= 1UL << ND_CMD_CALL
;
1833 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1835 * These commands have a 1:1 correspondence
1836 * between DSM payload and libnvdimm ioctl
1839 cmd_mask
|= nfit_mem
->dsm_mask
& NVDIMM_STANDARD_CMDMASK
;
1842 if (nfit_mem
->has_lsi
)
1843 set_bit(ND_CMD_GET_CONFIG_SIZE
, &cmd_mask
);
1844 if (nfit_mem
->has_lsr
)
1845 set_bit(ND_CMD_GET_CONFIG_DATA
, &cmd_mask
);
1846 if (nfit_mem
->has_lsw
)
1847 set_bit(ND_CMD_SET_CONFIG_DATA
, &cmd_mask
);
1849 flush
= nfit_mem
->nfit_flush
? nfit_mem
->nfit_flush
->flush
1851 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
1852 acpi_nfit_dimm_attribute_groups
,
1853 flags
, cmd_mask
, flush
? flush
->hint_count
: 0,
1854 nfit_mem
->flush_wpq
);
1858 nfit_mem
->nvdimm
= nvdimm
;
1861 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
1864 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s%s\n",
1865 nvdimm_name(nvdimm
),
1866 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
1867 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
1868 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
1869 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "",
1870 mem_flags
& ACPI_NFIT_MEM_MAP_FAILED
? " map_fail" : "");
1874 rc
= nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
1879 * Now that dimms are successfully registered, and async registration
1880 * is flushed, attempt to enable event notification.
1882 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1883 struct kernfs_node
*nfit_kernfs
;
1885 nvdimm
= nfit_mem
->nvdimm
;
1889 nfit_kernfs
= sysfs_get_dirent(nvdimm_kobj(nvdimm
)->sd
, "nfit");
1891 nfit_mem
->flags_attr
= sysfs_get_dirent(nfit_kernfs
,
1893 sysfs_put(nfit_kernfs
);
1894 if (!nfit_mem
->flags_attr
)
1895 dev_warn(acpi_desc
->dev
, "%s: notifications disabled\n",
1896 nvdimm_name(nvdimm
));
1899 return devm_add_action_or_reset(acpi_desc
->dev
, shutdown_dimm_notify
,
1904 * These constants are private because there are no kernel consumers of
1907 enum nfit_aux_cmds
{
1908 NFIT_CMD_TRANSLATE_SPA
= 5,
1909 NFIT_CMD_ARS_INJECT_SET
= 7,
1910 NFIT_CMD_ARS_INJECT_CLEAR
= 8,
1911 NFIT_CMD_ARS_INJECT_GET
= 9,
1914 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
1916 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1917 const guid_t
*guid
= to_nfit_uuid(NFIT_DEV_BUS
);
1918 struct acpi_device
*adev
;
1919 unsigned long dsm_mask
;
1922 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
1923 nd_desc
->bus_dsm_mask
= acpi_desc
->bus_nfit_cmd_force_en
;
1924 adev
= to_acpi_dev(acpi_desc
);
1928 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
1929 if (acpi_check_dsm(adev
->handle
, guid
, 1, 1ULL << i
))
1930 set_bit(i
, &nd_desc
->cmd_mask
);
1931 set_bit(ND_CMD_CALL
, &nd_desc
->cmd_mask
);
1934 (1 << ND_CMD_ARS_CAP
) |
1935 (1 << ND_CMD_ARS_START
) |
1936 (1 << ND_CMD_ARS_STATUS
) |
1937 (1 << ND_CMD_CLEAR_ERROR
) |
1938 (1 << NFIT_CMD_TRANSLATE_SPA
) |
1939 (1 << NFIT_CMD_ARS_INJECT_SET
) |
1940 (1 << NFIT_CMD_ARS_INJECT_CLEAR
) |
1941 (1 << NFIT_CMD_ARS_INJECT_GET
);
1942 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1943 if (acpi_check_dsm(adev
->handle
, guid
, 1, 1ULL << i
))
1944 set_bit(i
, &nd_desc
->bus_dsm_mask
);
1947 static ssize_t
range_index_show(struct device
*dev
,
1948 struct device_attribute
*attr
, char *buf
)
1950 struct nd_region
*nd_region
= to_nd_region(dev
);
1951 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
1953 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
1955 static DEVICE_ATTR_RO(range_index
);
1957 static ssize_t
ecc_unit_size_show(struct device
*dev
,
1958 struct device_attribute
*attr
, char *buf
)
1960 struct nd_region
*nd_region
= to_nd_region(dev
);
1961 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
1963 return sprintf(buf
, "%d\n", nfit_spa
->clear_err_unit
);
1965 static DEVICE_ATTR_RO(ecc_unit_size
);
1967 static struct attribute
*acpi_nfit_region_attributes
[] = {
1968 &dev_attr_range_index
.attr
,
1969 &dev_attr_ecc_unit_size
.attr
,
1973 static const struct attribute_group acpi_nfit_region_attribute_group
= {
1975 .attrs
= acpi_nfit_region_attributes
,
1978 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
1979 &nd_region_attribute_group
,
1980 &nd_mapping_attribute_group
,
1981 &nd_device_attribute_group
,
1982 &nd_numa_attribute_group
,
1983 &acpi_nfit_region_attribute_group
,
1987 /* enough info to uniquely specify an interleave set */
1988 struct nfit_set_info
{
1989 struct nfit_set_info_map
{
1996 struct nfit_set_info2
{
1997 struct nfit_set_info_map2
{
2001 u16 manufacturing_date
;
2002 u8 manufacturing_location
;
2007 static size_t sizeof_nfit_set_info(int num_mappings
)
2009 return sizeof(struct nfit_set_info
)
2010 + num_mappings
* sizeof(struct nfit_set_info_map
);
2013 static size_t sizeof_nfit_set_info2(int num_mappings
)
2015 return sizeof(struct nfit_set_info2
)
2016 + num_mappings
* sizeof(struct nfit_set_info_map2
);
2019 static int cmp_map_compat(const void *m0
, const void *m1
)
2021 const struct nfit_set_info_map
*map0
= m0
;
2022 const struct nfit_set_info_map
*map1
= m1
;
2024 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
2028 static int cmp_map(const void *m0
, const void *m1
)
2030 const struct nfit_set_info_map
*map0
= m0
;
2031 const struct nfit_set_info_map
*map1
= m1
;
2033 if (map0
->region_offset
< map1
->region_offset
)
2035 else if (map0
->region_offset
> map1
->region_offset
)
2040 static int cmp_map2(const void *m0
, const void *m1
)
2042 const struct nfit_set_info_map2
*map0
= m0
;
2043 const struct nfit_set_info_map2
*map1
= m1
;
2045 if (map0
->region_offset
< map1
->region_offset
)
2047 else if (map0
->region_offset
> map1
->region_offset
)
2052 /* Retrieve the nth entry referencing this spa */
2053 static struct acpi_nfit_memory_map
*memdev_from_spa(
2054 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
2056 struct nfit_memdev
*nfit_memdev
;
2058 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
2059 if (nfit_memdev
->memdev
->range_index
== range_index
)
2061 return nfit_memdev
->memdev
;
2065 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
2066 struct nd_region_desc
*ndr_desc
,
2067 struct acpi_nfit_system_address
*spa
)
2069 struct device
*dev
= acpi_desc
->dev
;
2070 struct nd_interleave_set
*nd_set
;
2071 u16 nr
= ndr_desc
->num_mappings
;
2072 struct nfit_set_info2
*info2
;
2073 struct nfit_set_info
*info
;
2076 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
2079 ndr_desc
->nd_set
= nd_set
;
2080 guid_copy(&nd_set
->type_guid
, (guid_t
*) spa
->range_guid
);
2082 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
2086 info2
= devm_kzalloc(dev
, sizeof_nfit_set_info2(nr
), GFP_KERNEL
);
2090 for (i
= 0; i
< nr
; i
++) {
2091 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
2092 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
2093 struct nfit_set_info_map2
*map2
= &info2
->mapping
[i
];
2094 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
2095 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
2096 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
2097 spa
->range_index
, i
);
2098 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
2100 if (!memdev
|| !nfit_mem
->dcr
) {
2101 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
2105 map
->region_offset
= memdev
->region_offset
;
2106 map
->serial_number
= dcr
->serial_number
;
2108 map2
->region_offset
= memdev
->region_offset
;
2109 map2
->serial_number
= dcr
->serial_number
;
2110 map2
->vendor_id
= dcr
->vendor_id
;
2111 map2
->manufacturing_date
= dcr
->manufacturing_date
;
2112 map2
->manufacturing_location
= dcr
->manufacturing_location
;
2115 /* v1.1 namespaces */
2116 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
2118 nd_set
->cookie1
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
2120 /* v1.2 namespaces */
2121 sort(&info2
->mapping
[0], nr
, sizeof(struct nfit_set_info_map2
),
2123 nd_set
->cookie2
= nd_fletcher64(info2
, sizeof_nfit_set_info2(nr
), 0);
2125 /* support v1.1 namespaces created with the wrong sort order */
2126 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
2127 cmp_map_compat
, NULL
);
2128 nd_set
->altcookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
2130 /* record the result of the sort for the mapping position */
2131 for (i
= 0; i
< nr
; i
++) {
2132 struct nfit_set_info_map2
*map2
= &info2
->mapping
[i
];
2135 for (j
= 0; j
< nr
; j
++) {
2136 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[j
];
2137 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
2138 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
2139 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
2141 if (map2
->serial_number
== dcr
->serial_number
&&
2142 map2
->vendor_id
== dcr
->vendor_id
&&
2143 map2
->manufacturing_date
== dcr
->manufacturing_date
&&
2144 map2
->manufacturing_location
2145 == dcr
->manufacturing_location
) {
2146 mapping
->position
= i
;
2152 ndr_desc
->nd_set
= nd_set
;
2153 devm_kfree(dev
, info
);
2154 devm_kfree(dev
, info2
);
2159 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
2161 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
2162 u32 sub_line_offset
, line_index
, line_offset
;
2163 u64 line_no
, table_skip_count
, table_offset
;
2165 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
2166 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
2167 line_offset
= idt
->line_offset
[line_index
]
2169 table_offset
= table_skip_count
* mmio
->table_size
;
2171 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
2174 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
2176 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
2177 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
2178 const u32 STATUS_MASK
= 0x80000037;
2180 if (mmio
->num_lines
)
2181 offset
= to_interleave_offset(offset
, mmio
);
2183 return readl(mmio
->addr
.base
+ offset
) & STATUS_MASK
;
2186 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
2187 resource_size_t dpa
, unsigned int len
, unsigned int write
)
2190 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
2193 BCW_OFFSET_MASK
= (1ULL << 48)-1,
2195 BCW_LEN_MASK
= (1ULL << 8) - 1,
2199 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
2200 len
= len
>> L1_CACHE_SHIFT
;
2201 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
2202 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
2204 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
2205 if (mmio
->num_lines
)
2206 offset
= to_interleave_offset(offset
, mmio
);
2208 writeq(cmd
, mmio
->addr
.base
+ offset
);
2209 nvdimm_flush(nfit_blk
->nd_region
);
2211 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
2212 readq(mmio
->addr
.base
+ offset
);
2215 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
2216 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
2219 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2220 unsigned int copied
= 0;
2224 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
2225 + lane
* mmio
->size
;
2226 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
2231 if (mmio
->num_lines
) {
2234 offset
= to_interleave_offset(base_offset
+ copied
,
2236 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
2237 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
2239 offset
= base_offset
+ nfit_blk
->bdw_offset
;
2244 memcpy_flushcache(mmio
->addr
.aperture
+ offset
, iobuf
+ copied
, c
);
2246 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
2247 arch_invalidate_pmem((void __force
*)
2248 mmio
->addr
.aperture
+ offset
, c
);
2250 memcpy(iobuf
+ copied
, mmio
->addr
.aperture
+ offset
, c
);
2258 nvdimm_flush(nfit_blk
->nd_region
);
2260 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
2264 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
2265 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
2267 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
2268 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2269 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
2270 unsigned int lane
, copied
= 0;
2273 lane
= nd_region_acquire_lane(nd_region
);
2275 u64 c
= min(len
, mmio
->size
);
2277 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
2278 iobuf
+ copied
, c
, rw
, lane
);
2285 nd_region_release_lane(nd_region
, lane
);
2290 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
2291 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
2294 mmio
->num_lines
= idt
->line_count
;
2295 mmio
->line_size
= idt
->line_size
;
2296 if (interleave_ways
== 0)
2298 mmio
->table_size
= mmio
->num_lines
* interleave_ways
2305 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
2306 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
2308 struct nd_cmd_dimm_flags flags
;
2311 memset(&flags
, 0, sizeof(flags
));
2312 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
2313 sizeof(flags
), NULL
);
2315 if (rc
>= 0 && flags
.status
== 0)
2316 nfit_blk
->dimm_flags
= flags
.flags
;
2317 else if (rc
== -ENOTTY
) {
2318 /* fall back to a conservative default */
2319 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
2327 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
2330 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
2331 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
2332 struct nfit_blk_mmio
*mmio
;
2333 struct nfit_blk
*nfit_blk
;
2334 struct nfit_mem
*nfit_mem
;
2335 struct nvdimm
*nvdimm
;
2338 nvdimm
= nd_blk_region_to_dimm(ndbr
);
2339 nfit_mem
= nvdimm_provider_data(nvdimm
);
2340 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
2341 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
2342 nfit_mem
? "" : " nfit_mem",
2343 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
2344 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
2348 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
2351 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
2352 nfit_blk
->nd_region
= to_nd_region(dev
);
2354 /* map block aperture memory */
2355 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
2356 mmio
= &nfit_blk
->mmio
[BDW
];
2357 mmio
->addr
.base
= devm_nvdimm_memremap(dev
, nfit_mem
->spa_bdw
->address
,
2358 nfit_mem
->spa_bdw
->length
, nd_blk_memremap_flags(ndbr
));
2359 if (!mmio
->addr
.base
) {
2360 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
2361 nvdimm_name(nvdimm
));
2364 mmio
->size
= nfit_mem
->bdw
->size
;
2365 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
2366 mmio
->idt
= nfit_mem
->idt_bdw
;
2367 mmio
->spa
= nfit_mem
->spa_bdw
;
2368 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
2369 nfit_mem
->memdev_bdw
->interleave_ways
);
2371 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
2372 __func__
, nvdimm_name(nvdimm
));
2376 /* map block control memory */
2377 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
2378 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
2379 mmio
= &nfit_blk
->mmio
[DCR
];
2380 mmio
->addr
.base
= devm_nvdimm_ioremap(dev
, nfit_mem
->spa_dcr
->address
,
2381 nfit_mem
->spa_dcr
->length
);
2382 if (!mmio
->addr
.base
) {
2383 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
2384 nvdimm_name(nvdimm
));
2387 mmio
->size
= nfit_mem
->dcr
->window_size
;
2388 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
2389 mmio
->idt
= nfit_mem
->idt_dcr
;
2390 mmio
->spa
= nfit_mem
->spa_dcr
;
2391 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
2392 nfit_mem
->memdev_dcr
->interleave_ways
);
2394 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
2395 __func__
, nvdimm_name(nvdimm
));
2399 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
2401 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
2402 __func__
, nvdimm_name(nvdimm
));
2406 if (nvdimm_has_flush(nfit_blk
->nd_region
) < 0)
2407 dev_warn(dev
, "unable to guarantee persistence of writes\n");
2409 if (mmio
->line_size
== 0)
2412 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
2413 + 8 > mmio
->line_size
) {
2414 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
2416 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
2417 + 8 > mmio
->line_size
) {
2418 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
2425 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
2426 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
2428 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2429 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2432 cmd
->address
= spa
->address
;
2433 cmd
->length
= spa
->length
;
2434 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
2435 sizeof(*cmd
), &cmd_rc
);
2441 static int ars_start(struct acpi_nfit_desc
*acpi_desc
, struct nfit_spa
*nfit_spa
)
2445 struct nd_cmd_ars_start ars_start
;
2446 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2447 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2449 memset(&ars_start
, 0, sizeof(ars_start
));
2450 ars_start
.address
= spa
->address
;
2451 ars_start
.length
= spa
->length
;
2452 ars_start
.flags
= acpi_desc
->ars_start_flags
;
2453 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
2454 ars_start
.type
= ND_ARS_PERSISTENT
;
2455 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
2456 ars_start
.type
= ND_ARS_VOLATILE
;
2460 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
2461 sizeof(ars_start
), &cmd_rc
);
2468 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
2471 struct nd_cmd_ars_start ars_start
;
2472 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2473 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2475 memset(&ars_start
, 0, sizeof(ars_start
));
2476 ars_start
.address
= ars_status
->restart_address
;
2477 ars_start
.length
= ars_status
->restart_length
;
2478 ars_start
.type
= ars_status
->type
;
2479 ars_start
.flags
= acpi_desc
->ars_start_flags
;
2480 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
2481 sizeof(ars_start
), &cmd_rc
);
2487 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
2489 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2490 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2493 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
2494 acpi_desc
->ars_status_size
, &cmd_rc
);
2500 static int ars_status_process_records(struct acpi_nfit_desc
*acpi_desc
,
2501 struct nd_cmd_ars_status
*ars_status
)
2503 struct nvdimm_bus
*nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2508 * First record starts at 44 byte offset from the start of the
2511 if (ars_status
->out_length
< 44)
2513 for (i
= 0; i
< ars_status
->num_records
; i
++) {
2514 /* only process full records */
2515 if (ars_status
->out_length
2516 < 44 + sizeof(struct nd_ars_record
) * (i
+ 1))
2518 rc
= nvdimm_bus_add_badrange(nvdimm_bus
,
2519 ars_status
->records
[i
].err_address
,
2520 ars_status
->records
[i
].length
);
2524 if (i
< ars_status
->num_records
)
2525 dev_warn(acpi_desc
->dev
, "detected truncated ars results\n");
2530 static void acpi_nfit_remove_resource(void *data
)
2532 struct resource
*res
= data
;
2534 remove_resource(res
);
2537 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
2538 struct nd_region_desc
*ndr_desc
)
2540 struct resource
*res
, *nd_res
= ndr_desc
->res
;
2543 /* No operation if the region is already registered as PMEM */
2544 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
2545 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
2546 if (is_pmem
== REGION_INTERSECTS
)
2549 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
2553 res
->name
= "Persistent Memory";
2554 res
->start
= nd_res
->start
;
2555 res
->end
= nd_res
->end
;
2556 res
->flags
= IORESOURCE_MEM
;
2557 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
2559 ret
= insert_resource(&iomem_resource
, res
);
2563 ret
= devm_add_action_or_reset(acpi_desc
->dev
,
2564 acpi_nfit_remove_resource
,
2572 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
2573 struct nd_mapping_desc
*mapping
, struct nd_region_desc
*ndr_desc
,
2574 struct acpi_nfit_memory_map
*memdev
,
2575 struct nfit_spa
*nfit_spa
)
2577 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
2578 memdev
->device_handle
);
2579 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2580 struct nd_blk_region_desc
*ndbr_desc
;
2581 struct nfit_mem
*nfit_mem
;
2582 int blk_valid
= 0, rc
;
2585 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
2586 spa
->range_index
, memdev
->device_handle
);
2590 mapping
->nvdimm
= nvdimm
;
2591 switch (nfit_spa_type(spa
)) {
2593 case NFIT_SPA_VOLATILE
:
2594 mapping
->start
= memdev
->address
;
2595 mapping
->size
= memdev
->region_size
;
2598 nfit_mem
= nvdimm_provider_data(nvdimm
);
2599 if (!nfit_mem
|| !nfit_mem
->bdw
) {
2600 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
2601 spa
->range_index
, nvdimm_name(nvdimm
));
2603 mapping
->size
= nfit_mem
->bdw
->capacity
;
2604 mapping
->start
= nfit_mem
->bdw
->start_address
;
2605 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
2609 ndr_desc
->mapping
= mapping
;
2610 ndr_desc
->num_mappings
= blk_valid
;
2611 ndbr_desc
= to_blk_region_desc(ndr_desc
);
2612 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
2613 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
2614 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2617 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
2619 if (!nfit_spa
->nd_region
)
2627 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address
*spa
)
2629 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2630 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2631 nfit_spa_type(spa
) == NFIT_SPA_PDISK
||
2632 nfit_spa_type(spa
) == NFIT_SPA_PCD
);
2635 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address
*spa
)
2637 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2638 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2639 nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
);
2642 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
2643 struct nfit_spa
*nfit_spa
)
2645 static struct nd_mapping_desc mappings
[ND_MAX_MAPPINGS
];
2646 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2647 struct nd_blk_region_desc ndbr_desc
;
2648 struct nd_region_desc
*ndr_desc
;
2649 struct nfit_memdev
*nfit_memdev
;
2650 struct nvdimm_bus
*nvdimm_bus
;
2651 struct resource res
;
2654 if (nfit_spa
->nd_region
)
2657 if (spa
->range_index
== 0 && !nfit_spa_is_virtual(spa
)) {
2658 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
2663 memset(&res
, 0, sizeof(res
));
2664 memset(&mappings
, 0, sizeof(mappings
));
2665 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
2666 res
.start
= spa
->address
;
2667 res
.end
= res
.start
+ spa
->length
- 1;
2668 ndr_desc
= &ndbr_desc
.ndr_desc
;
2669 ndr_desc
->res
= &res
;
2670 ndr_desc
->provider_data
= nfit_spa
;
2671 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2672 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
2673 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2674 spa
->proximity_domain
);
2676 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2678 if(acpi_desc
->platform_cap
& ACPI_NFIT_CAPABILITY_CACHE_FLUSH
)
2679 set_bit(ND_REGION_PERSIST_CACHE
, &ndr_desc
->flags
);
2681 if (acpi_desc
->platform_cap
& ACPI_NFIT_CAPABILITY_MEM_FLUSH
)
2682 set_bit(ND_REGION_PERSIST_MEMCTRL
, &ndr_desc
->flags
);
2684 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2685 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2686 struct nd_mapping_desc
*mapping
;
2688 if (memdev
->range_index
!= spa
->range_index
)
2690 if (count
>= ND_MAX_MAPPINGS
) {
2691 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2692 spa
->range_index
, ND_MAX_MAPPINGS
);
2695 mapping
= &mappings
[count
++];
2696 rc
= acpi_nfit_init_mapping(acpi_desc
, mapping
, ndr_desc
,
2702 ndr_desc
->mapping
= mappings
;
2703 ndr_desc
->num_mappings
= count
;
2704 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2708 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2709 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2710 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2712 dev_warn(acpi_desc
->dev
,
2713 "failed to insert pmem resource to iomem: %d\n",
2718 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2720 if (!nfit_spa
->nd_region
)
2722 } else if (nfit_spa_is_volatile(spa
)) {
2723 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
2725 if (!nfit_spa
->nd_region
)
2727 } else if (nfit_spa_is_virtual(spa
)) {
2728 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2730 if (!nfit_spa
->nd_region
)
2736 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
2737 nfit_spa
->spa
->range_index
);
2741 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
,
2744 struct device
*dev
= acpi_desc
->dev
;
2745 struct nd_cmd_ars_status
*ars_status
;
2747 if (acpi_desc
->ars_status
&& acpi_desc
->ars_status_size
>= max_ars
) {
2748 memset(acpi_desc
->ars_status
, 0, acpi_desc
->ars_status_size
);
2752 if (acpi_desc
->ars_status
)
2753 devm_kfree(dev
, acpi_desc
->ars_status
);
2754 acpi_desc
->ars_status
= NULL
;
2755 ars_status
= devm_kzalloc(dev
, max_ars
, GFP_KERNEL
);
2758 acpi_desc
->ars_status
= ars_status
;
2759 acpi_desc
->ars_status_size
= max_ars
;
2763 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
,
2764 struct nfit_spa
*nfit_spa
)
2766 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2769 if (!nfit_spa
->max_ars
) {
2770 struct nd_cmd_ars_cap ars_cap
;
2772 memset(&ars_cap
, 0, sizeof(ars_cap
));
2773 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
2776 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
2777 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
2778 /* check that the supported scrub types match the spa type */
2779 if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
&&
2780 ((ars_cap
.status
>> 16) & ND_ARS_VOLATILE
) == 0)
2782 else if (nfit_spa_type(spa
) == NFIT_SPA_PM
&&
2783 ((ars_cap
.status
>> 16) & ND_ARS_PERSISTENT
) == 0)
2787 if (ars_status_alloc(acpi_desc
, nfit_spa
->max_ars
))
2790 rc
= ars_get_status(acpi_desc
);
2791 if (rc
< 0 && rc
!= -ENOSPC
)
2794 if (ars_status_process_records(acpi_desc
, acpi_desc
->ars_status
))
2800 static void acpi_nfit_async_scrub(struct acpi_nfit_desc
*acpi_desc
,
2801 struct nfit_spa
*nfit_spa
)
2803 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2804 unsigned int overflow_retry
= scrub_overflow_abort
;
2805 u64 init_ars_start
= 0, init_ars_len
= 0;
2806 struct device
*dev
= acpi_desc
->dev
;
2807 unsigned int tmo
= scrub_timeout
;
2810 if (!nfit_spa
->ars_required
|| !nfit_spa
->nd_region
)
2813 rc
= ars_start(acpi_desc
, nfit_spa
);
2815 * If we timed out the initial scan we'll still be busy here,
2816 * and will wait another timeout before giving up permanently.
2818 if (rc
< 0 && rc
!= -EBUSY
)
2822 u64 ars_start
, ars_len
;
2824 if (acpi_desc
->cancel
)
2826 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2829 if (rc
== -EBUSY
&& !tmo
) {
2830 dev_warn(dev
, "range %d ars timeout, aborting\n",
2837 * Note, entries may be appended to the list
2838 * while the lock is dropped, but the workqueue
2839 * being active prevents entries being deleted /
2842 mutex_unlock(&acpi_desc
->init_mutex
);
2845 mutex_lock(&acpi_desc
->init_mutex
);
2849 /* we got some results, but there are more pending... */
2850 if (rc
== -ENOSPC
&& overflow_retry
--) {
2851 if (!init_ars_len
) {
2852 init_ars_len
= acpi_desc
->ars_status
->length
;
2853 init_ars_start
= acpi_desc
->ars_status
->address
;
2855 rc
= ars_continue(acpi_desc
);
2859 dev_warn(dev
, "range %d ars continuation failed\n",
2865 ars_start
= init_ars_start
;
2866 ars_len
= init_ars_len
;
2868 ars_start
= acpi_desc
->ars_status
->address
;
2869 ars_len
= acpi_desc
->ars_status
->length
;
2871 dev_dbg(dev
, "spa range: %d ars from %#llx + %#llx complete\n",
2872 spa
->range_index
, ars_start
, ars_len
);
2873 /* notify the region about new poison entries */
2874 nvdimm_region_notify(nfit_spa
->nd_region
,
2875 NVDIMM_REVALIDATE_POISON
);
2880 static void acpi_nfit_scrub(struct work_struct
*work
)
2883 u64 init_scrub_length
= 0;
2884 struct nfit_spa
*nfit_spa
;
2885 u64 init_scrub_address
= 0;
2886 bool init_ars_done
= false;
2887 struct acpi_nfit_desc
*acpi_desc
;
2888 unsigned int tmo
= scrub_timeout
;
2889 unsigned int overflow_retry
= scrub_overflow_abort
;
2891 acpi_desc
= container_of(work
, typeof(*acpi_desc
), work
);
2892 dev
= acpi_desc
->dev
;
2895 * We scrub in 2 phases. The first phase waits for any platform
2896 * firmware initiated scrubs to complete and then we go search for the
2897 * affected spa regions to mark them scanned. In the second phase we
2898 * initiate a directed scrub for every range that was not scrubbed in
2899 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2900 * the first phase, but really only care about running phase 2, where
2901 * regions can be notified of new poison.
2904 /* process platform firmware initiated scrubs */
2906 mutex_lock(&acpi_desc
->init_mutex
);
2907 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2908 struct nd_cmd_ars_status
*ars_status
;
2909 struct acpi_nfit_system_address
*spa
;
2910 u64 ars_start
, ars_len
;
2913 if (acpi_desc
->cancel
)
2916 if (nfit_spa
->nd_region
)
2919 if (init_ars_done
) {
2921 * No need to re-query, we're now just
2922 * reconciling all the ranges covered by the
2927 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2929 if (rc
== -ENOTTY
) {
2930 /* no ars capability, just register spa and move on */
2931 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2935 if (rc
== -EBUSY
&& !tmo
) {
2936 /* fallthrough to directed scrub in phase 2 */
2937 dev_warn(dev
, "timeout awaiting ars results, continuing...\n");
2939 } else if (rc
== -EBUSY
) {
2940 mutex_unlock(&acpi_desc
->init_mutex
);
2946 /* we got some results, but there are more pending... */
2947 if (rc
== -ENOSPC
&& overflow_retry
--) {
2948 ars_status
= acpi_desc
->ars_status
;
2950 * Record the original scrub range, so that we
2951 * can recall all the ranges impacted by the
2954 if (!init_scrub_length
) {
2955 init_scrub_length
= ars_status
->length
;
2956 init_scrub_address
= ars_status
->address
;
2958 rc
= ars_continue(acpi_desc
);
2960 mutex_unlock(&acpi_desc
->init_mutex
);
2967 * Initial scrub failed, we'll give it one more
2973 /* We got some final results, record completed ranges */
2974 ars_status
= acpi_desc
->ars_status
;
2975 if (init_scrub_length
) {
2976 ars_start
= init_scrub_address
;
2977 ars_len
= ars_start
+ init_scrub_length
;
2979 ars_start
= ars_status
->address
;
2980 ars_len
= ars_status
->length
;
2982 spa
= nfit_spa
->spa
;
2984 if (!init_ars_done
) {
2985 init_ars_done
= true;
2986 dev_dbg(dev
, "init scrub %#llx + %#llx complete\n",
2987 ars_start
, ars_len
);
2989 if (ars_start
<= spa
->address
&& ars_start
+ ars_len
2990 >= spa
->address
+ spa
->length
)
2991 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2995 * For all the ranges not covered by an initial scrub we still
2996 * want to see if there are errors, but it's ok to discover them
2999 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3001 * Flag all the ranges that still need scrubbing, but
3002 * register them now to make data available.
3004 if (!nfit_spa
->nd_region
) {
3005 nfit_spa
->ars_required
= 1;
3006 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
3009 acpi_desc
->init_complete
= 1;
3011 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
3012 acpi_nfit_async_scrub(acpi_desc
, nfit_spa
);
3013 acpi_desc
->scrub_count
++;
3014 acpi_desc
->ars_start_flags
= 0;
3015 if (acpi_desc
->scrub_count_state
)
3016 sysfs_notify_dirent(acpi_desc
->scrub_count_state
);
3017 mutex_unlock(&acpi_desc
->init_mutex
);
3020 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
3022 struct nfit_spa
*nfit_spa
;
3025 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
3026 if (nfit_spa_type(nfit_spa
->spa
) == NFIT_SPA_DCR
) {
3027 /* BLK regions don't need to wait for ars results */
3028 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
3033 acpi_desc
->ars_start_flags
= 0;
3034 if (!acpi_desc
->cancel
)
3035 queue_work(nfit_wq
, &acpi_desc
->work
);
3039 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
3040 struct nfit_table_prev
*prev
)
3042 struct device
*dev
= acpi_desc
->dev
;
3044 if (!list_empty(&prev
->spas
) ||
3045 !list_empty(&prev
->memdevs
) ||
3046 !list_empty(&prev
->dcrs
) ||
3047 !list_empty(&prev
->bdws
) ||
3048 !list_empty(&prev
->idts
) ||
3049 !list_empty(&prev
->flushes
)) {
3050 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
3056 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc
*acpi_desc
)
3058 struct device
*dev
= acpi_desc
->dev
;
3059 struct kernfs_node
*nfit
;
3060 struct device
*bus_dev
;
3062 if (!ars_supported(acpi_desc
->nvdimm_bus
))
3065 bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
3066 nfit
= sysfs_get_dirent(bus_dev
->kobj
.sd
, "nfit");
3068 dev_err(dev
, "sysfs_get_dirent 'nfit' failed\n");
3071 acpi_desc
->scrub_count_state
= sysfs_get_dirent(nfit
, "scrub");
3073 if (!acpi_desc
->scrub_count_state
) {
3074 dev_err(dev
, "sysfs_get_dirent 'scrub' failed\n");
3081 static void acpi_nfit_unregister(void *data
)
3083 struct acpi_nfit_desc
*acpi_desc
= data
;
3085 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
3088 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, void *data
, acpi_size sz
)
3090 struct device
*dev
= acpi_desc
->dev
;
3091 struct nfit_table_prev prev
;
3095 if (!acpi_desc
->nvdimm_bus
) {
3096 acpi_nfit_init_dsms(acpi_desc
);
3098 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
,
3099 &acpi_desc
->nd_desc
);
3100 if (!acpi_desc
->nvdimm_bus
)
3103 rc
= devm_add_action_or_reset(dev
, acpi_nfit_unregister
,
3108 rc
= acpi_nfit_desc_init_scrub_attr(acpi_desc
);
3112 /* register this acpi_desc for mce notifications */
3113 mutex_lock(&acpi_desc_lock
);
3114 list_add_tail(&acpi_desc
->list
, &acpi_descs
);
3115 mutex_unlock(&acpi_desc_lock
);
3118 mutex_lock(&acpi_desc
->init_mutex
);
3120 INIT_LIST_HEAD(&prev
.spas
);
3121 INIT_LIST_HEAD(&prev
.memdevs
);
3122 INIT_LIST_HEAD(&prev
.dcrs
);
3123 INIT_LIST_HEAD(&prev
.bdws
);
3124 INIT_LIST_HEAD(&prev
.idts
);
3125 INIT_LIST_HEAD(&prev
.flushes
);
3127 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
3128 acpi_desc
->spas
.prev
);
3129 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
3130 acpi_desc
->memdevs
.prev
);
3131 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
3132 acpi_desc
->dcrs
.prev
);
3133 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
3134 acpi_desc
->bdws
.prev
);
3135 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
3136 acpi_desc
->idts
.prev
);
3137 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
3138 acpi_desc
->flushes
.prev
);
3141 while (!IS_ERR_OR_NULL(data
))
3142 data
= add_table(acpi_desc
, &prev
, data
, end
);
3145 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
3151 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
3155 rc
= nfit_mem_init(acpi_desc
);
3159 rc
= acpi_nfit_register_dimms(acpi_desc
);
3163 rc
= acpi_nfit_register_regions(acpi_desc
);
3166 mutex_unlock(&acpi_desc
->init_mutex
);
3169 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
3171 struct acpi_nfit_flush_work
{
3172 struct work_struct work
;
3173 struct completion cmp
;
3176 static void flush_probe(struct work_struct
*work
)
3178 struct acpi_nfit_flush_work
*flush
;
3180 flush
= container_of(work
, typeof(*flush
), work
);
3181 complete(&flush
->cmp
);
3184 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
3186 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
3187 struct device
*dev
= acpi_desc
->dev
;
3188 struct acpi_nfit_flush_work flush
;
3191 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3195 /* bounce the init_mutex to make init_complete valid */
3196 mutex_lock(&acpi_desc
->init_mutex
);
3197 if (acpi_desc
->cancel
|| acpi_desc
->init_complete
) {
3198 mutex_unlock(&acpi_desc
->init_mutex
);
3203 * Scrub work could take 10s of seconds, userspace may give up so we
3204 * need to be interruptible while waiting.
3206 INIT_WORK_ONSTACK(&flush
.work
, flush_probe
);
3207 init_completion(&flush
.cmp
);
3208 queue_work(nfit_wq
, &flush
.work
);
3209 mutex_unlock(&acpi_desc
->init_mutex
);
3211 rc
= wait_for_completion_interruptible(&flush
.cmp
);
3212 cancel_work_sync(&flush
.work
);
3216 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
3217 struct nvdimm
*nvdimm
, unsigned int cmd
)
3219 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
3223 if (cmd
!= ND_CMD_ARS_START
)
3227 * The kernel and userspace may race to initiate a scrub, but
3228 * the scrub thread is prepared to lose that initial race. It
3229 * just needs guarantees that any ars it initiates are not
3230 * interrupted by any intervening start reqeusts from userspace.
3232 if (work_busy(&acpi_desc
->work
))
3238 int acpi_nfit_ars_rescan(struct acpi_nfit_desc
*acpi_desc
, u8 flags
)
3240 struct device
*dev
= acpi_desc
->dev
;
3241 struct nfit_spa
*nfit_spa
;
3243 if (work_busy(&acpi_desc
->work
))
3246 mutex_lock(&acpi_desc
->init_mutex
);
3247 if (acpi_desc
->cancel
) {
3248 mutex_unlock(&acpi_desc
->init_mutex
);
3252 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3253 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
3255 if (nfit_spa_type(spa
) != NFIT_SPA_PM
)
3258 nfit_spa
->ars_required
= 1;
3260 acpi_desc
->ars_start_flags
= flags
;
3261 queue_work(nfit_wq
, &acpi_desc
->work
);
3262 dev_dbg(dev
, "%s: ars_scan triggered\n", __func__
);
3263 mutex_unlock(&acpi_desc
->init_mutex
);
3268 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
3270 struct nvdimm_bus_descriptor
*nd_desc
;
3272 dev_set_drvdata(dev
, acpi_desc
);
3273 acpi_desc
->dev
= dev
;
3274 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
3275 nd_desc
= &acpi_desc
->nd_desc
;
3276 nd_desc
->provider_name
= "ACPI.NFIT";
3277 nd_desc
->module
= THIS_MODULE
;
3278 nd_desc
->ndctl
= acpi_nfit_ctl
;
3279 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
3280 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
3281 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
3283 INIT_LIST_HEAD(&acpi_desc
->spas
);
3284 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
3285 INIT_LIST_HEAD(&acpi_desc
->bdws
);
3286 INIT_LIST_HEAD(&acpi_desc
->idts
);
3287 INIT_LIST_HEAD(&acpi_desc
->flushes
);
3288 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
3289 INIT_LIST_HEAD(&acpi_desc
->dimms
);
3290 INIT_LIST_HEAD(&acpi_desc
->list
);
3291 mutex_init(&acpi_desc
->init_mutex
);
3292 INIT_WORK(&acpi_desc
->work
, acpi_nfit_scrub
);
3294 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
3296 static void acpi_nfit_put_table(void *table
)
3298 acpi_put_table(table
);
3301 void acpi_nfit_shutdown(void *data
)
3303 struct acpi_nfit_desc
*acpi_desc
= data
;
3304 struct device
*bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
3307 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3310 mutex_lock(&acpi_desc_lock
);
3311 list_del(&acpi_desc
->list
);
3312 mutex_unlock(&acpi_desc_lock
);
3314 mutex_lock(&acpi_desc
->init_mutex
);
3315 acpi_desc
->cancel
= 1;
3316 mutex_unlock(&acpi_desc
->init_mutex
);
3319 * Bounce the nvdimm bus lock to make sure any in-flight
3320 * acpi_nfit_ars_rescan() submissions have had a chance to
3321 * either submit or see ->cancel set.
3323 device_lock(bus_dev
);
3324 device_unlock(bus_dev
);
3326 flush_workqueue(nfit_wq
);
3328 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown
);
3330 static int acpi_nfit_add(struct acpi_device
*adev
)
3332 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3333 struct acpi_nfit_desc
*acpi_desc
;
3334 struct device
*dev
= &adev
->dev
;
3335 struct acpi_table_header
*tbl
;
3336 acpi_status status
= AE_OK
;
3340 status
= acpi_get_table(ACPI_SIG_NFIT
, 0, &tbl
);
3341 if (ACPI_FAILURE(status
)) {
3342 /* This is ok, we could have an nvdimm hotplugged later */
3343 dev_dbg(dev
, "failed to find NFIT at startup\n");
3347 rc
= devm_add_action_or_reset(dev
, acpi_nfit_put_table
, tbl
);
3352 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
3355 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
3357 /* Save the acpi header for exporting the revision via sysfs */
3358 acpi_desc
->acpi_header
= *tbl
;
3360 /* Evaluate _FIT and override with that if present */
3361 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
3362 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
3363 union acpi_object
*obj
= buf
.pointer
;
3365 if (obj
->type
== ACPI_TYPE_BUFFER
)
3366 rc
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
3367 obj
->buffer
.length
);
3369 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
3370 __func__
, (int) obj
->type
);
3373 /* skip over the lead-in header table */
3374 rc
= acpi_nfit_init(acpi_desc
, (void *) tbl
3375 + sizeof(struct acpi_table_nfit
),
3376 sz
- sizeof(struct acpi_table_nfit
));
3380 return devm_add_action_or_reset(dev
, acpi_nfit_shutdown
, acpi_desc
);
3383 static int acpi_nfit_remove(struct acpi_device
*adev
)
3385 /* see acpi_nfit_unregister */
3389 static void acpi_nfit_update_notify(struct device
*dev
, acpi_handle handle
)
3391 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
3392 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3393 union acpi_object
*obj
;
3398 /* dev->driver may be null if we're being removed */
3399 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
3404 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
3407 acpi_nfit_desc_init(acpi_desc
, dev
);
3410 * Finish previous registration before considering new
3413 flush_workqueue(nfit_wq
);
3417 status
= acpi_evaluate_object(handle
, "_FIT", NULL
, &buf
);
3418 if (ACPI_FAILURE(status
)) {
3419 dev_err(dev
, "failed to evaluate _FIT\n");
3424 if (obj
->type
== ACPI_TYPE_BUFFER
) {
3425 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
3426 obj
->buffer
.length
);
3428 dev_err(dev
, "failed to merge updated NFIT\n");
3430 dev_err(dev
, "Invalid _FIT\n");
3434 static void acpi_nfit_uc_error_notify(struct device
*dev
, acpi_handle handle
)
3436 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
3437 u8 flags
= (acpi_desc
->scrub_mode
== HW_ERROR_SCRUB_ON
) ?
3438 0 : ND_ARS_RETURN_PREV_DATA
;
3440 acpi_nfit_ars_rescan(acpi_desc
, flags
);
3443 void __acpi_nfit_notify(struct device
*dev
, acpi_handle handle
, u32 event
)
3445 dev_dbg(dev
, "%s: event: 0x%x\n", __func__
, event
);
3448 case NFIT_NOTIFY_UPDATE
:
3449 return acpi_nfit_update_notify(dev
, handle
);
3450 case NFIT_NOTIFY_UC_MEMORY_ERROR
:
3451 return acpi_nfit_uc_error_notify(dev
, handle
);
3456 EXPORT_SYMBOL_GPL(__acpi_nfit_notify
);
3458 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
3460 device_lock(&adev
->dev
);
3461 __acpi_nfit_notify(&adev
->dev
, adev
->handle
, event
);
3462 device_unlock(&adev
->dev
);
3465 static const struct acpi_device_id acpi_nfit_ids
[] = {
3469 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
3471 static struct acpi_driver acpi_nfit_driver
= {
3472 .name
= KBUILD_MODNAME
,
3473 .ids
= acpi_nfit_ids
,
3475 .add
= acpi_nfit_add
,
3476 .remove
= acpi_nfit_remove
,
3477 .notify
= acpi_nfit_notify
,
3481 static __init
int nfit_init(void)
3485 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
3486 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
3487 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
3488 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
3489 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
3490 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
3491 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
3492 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities
) != 16);
3494 guid_parse(UUID_VOLATILE_MEMORY
, &nfit_uuid
[NFIT_SPA_VOLATILE
]);
3495 guid_parse(UUID_PERSISTENT_MEMORY
, &nfit_uuid
[NFIT_SPA_PM
]);
3496 guid_parse(UUID_CONTROL_REGION
, &nfit_uuid
[NFIT_SPA_DCR
]);
3497 guid_parse(UUID_DATA_REGION
, &nfit_uuid
[NFIT_SPA_BDW
]);
3498 guid_parse(UUID_VOLATILE_VIRTUAL_DISK
, &nfit_uuid
[NFIT_SPA_VDISK
]);
3499 guid_parse(UUID_VOLATILE_VIRTUAL_CD
, &nfit_uuid
[NFIT_SPA_VCD
]);
3500 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK
, &nfit_uuid
[NFIT_SPA_PDISK
]);
3501 guid_parse(UUID_PERSISTENT_VIRTUAL_CD
, &nfit_uuid
[NFIT_SPA_PCD
]);
3502 guid_parse(UUID_NFIT_BUS
, &nfit_uuid
[NFIT_DEV_BUS
]);
3503 guid_parse(UUID_NFIT_DIMM
, &nfit_uuid
[NFIT_DEV_DIMM
]);
3504 guid_parse(UUID_NFIT_DIMM_N_HPE1
, &nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
3505 guid_parse(UUID_NFIT_DIMM_N_HPE2
, &nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
3506 guid_parse(UUID_NFIT_DIMM_N_MSFT
, &nfit_uuid
[NFIT_DEV_DIMM_N_MSFT
]);
3508 nfit_wq
= create_singlethread_workqueue("nfit");
3512 nfit_mce_register();
3513 ret
= acpi_bus_register_driver(&acpi_nfit_driver
);
3515 nfit_mce_unregister();
3516 destroy_workqueue(nfit_wq
);
3523 static __exit
void nfit_exit(void)
3525 nfit_mce_unregister();
3526 acpi_bus_unregister_driver(&acpi_nfit_driver
);
3527 destroy_workqueue(nfit_wq
);
3528 WARN_ON(!list_empty(&acpi_descs
));
3531 module_init(nfit_init
);
3532 module_exit(nfit_exit
);
3533 MODULE_LICENSE("GPL v2");
3534 MODULE_AUTHOR("Intel Corporation");