2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
25 #include <asm/cacheflush.h>
26 #include <acpi/nfit.h>
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33 #include <linux/io-64-nonatomic-hi-lo.h>
35 static bool force_enable_dimms
;
36 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
37 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
39 static bool disable_vendor_specific
;
40 module_param(disable_vendor_specific
, bool, S_IRUGO
);
41 MODULE_PARM_DESC(disable_vendor_specific
,
42 "Limit commands to the publicly specified set");
44 static unsigned long override_dsm_mask
;
45 module_param(override_dsm_mask
, ulong
, S_IRUGO
);
46 MODULE_PARM_DESC(override_dsm_mask
, "Bitmask of allowed NVDIMM DSM functions");
48 static int default_dsm_family
= -1;
49 module_param(default_dsm_family
, int, S_IRUGO
);
50 MODULE_PARM_DESC(default_dsm_family
,
51 "Try this DSM type first when identifying NVDIMM family");
53 static bool no_init_ars
;
54 module_param(no_init_ars
, bool, 0644);
55 MODULE_PARM_DESC(no_init_ars
, "Skip ARS run at nfit init time");
57 LIST_HEAD(acpi_descs
);
58 DEFINE_MUTEX(acpi_desc_lock
);
60 static struct workqueue_struct
*nfit_wq
;
62 struct nfit_table_prev
{
63 struct list_head spas
;
64 struct list_head memdevs
;
65 struct list_head dcrs
;
66 struct list_head bdws
;
67 struct list_head idts
;
68 struct list_head flushes
;
71 static guid_t nfit_uuid
[NFIT_UUID_MAX
];
73 const guid_t
*to_nfit_uuid(enum nfit_uuids id
)
75 return &nfit_uuid
[id
];
77 EXPORT_SYMBOL(to_nfit_uuid
);
79 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
80 struct nvdimm_bus_descriptor
*nd_desc
)
82 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
85 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
87 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
90 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
93 if (!nd_desc
->provider_name
94 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
97 return to_acpi_device(acpi_desc
->dev
);
100 static int xlat_bus_status(void *buf
, unsigned int cmd
, u32 status
)
102 struct nd_cmd_clear_error
*clear_err
;
103 struct nd_cmd_ars_status
*ars_status
;
108 if ((status
& 0xffff) == NFIT_ARS_CAP_NONE
)
115 /* No supported scan types for this range */
116 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
117 if ((status
>> 16 & flags
) == 0)
120 case ND_CMD_ARS_START
:
121 /* ARS is in progress */
122 if ((status
& 0xffff) == NFIT_ARS_START_BUSY
)
129 case ND_CMD_ARS_STATUS
:
134 /* Check extended status (Upper two bytes) */
135 if (status
== NFIT_ARS_STATUS_DONE
)
138 /* ARS is in progress */
139 if (status
== NFIT_ARS_STATUS_BUSY
)
142 /* No ARS performed for the current boot */
143 if (status
== NFIT_ARS_STATUS_NONE
)
147 * ARS interrupted, either we overflowed or some other
148 * agent wants the scan to stop. If we didn't overflow
149 * then just continue with the returned results.
151 if (status
== NFIT_ARS_STATUS_INTR
) {
152 if (ars_status
->out_length
>= 40 && (ars_status
->flags
153 & NFIT_ARS_F_OVERFLOW
))
162 case ND_CMD_CLEAR_ERROR
:
166 if (!clear_err
->cleared
)
168 if (clear_err
->length
> clear_err
->cleared
)
169 return clear_err
->cleared
;
175 /* all other non-zero status results in an error */
181 #define ACPI_LABELS_LOCKED 3
183 static int xlat_nvdimm_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
186 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
189 case ND_CMD_GET_CONFIG_SIZE
:
191 * In the _LSI, _LSR, _LSW case the locked status is
192 * communicated via the read/write commands
194 if (nfit_mem
->has_lsr
)
197 if (status
>> 16 & ND_CONFIG_LOCKED
)
200 case ND_CMD_GET_CONFIG_DATA
:
201 if (nfit_mem
->has_lsr
&& status
== ACPI_LABELS_LOCKED
)
204 case ND_CMD_SET_CONFIG_DATA
:
205 if (nfit_mem
->has_lsw
&& status
== ACPI_LABELS_LOCKED
)
212 /* all other non-zero status results in an error */
218 static int xlat_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
222 return xlat_bus_status(buf
, cmd
, status
);
223 return xlat_nvdimm_status(nvdimm
, buf
, cmd
, status
);
226 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
227 static union acpi_object
*pkg_to_buf(union acpi_object
*pkg
)
232 union acpi_object
*buf
= NULL
;
234 if (pkg
->type
!= ACPI_TYPE_PACKAGE
) {
235 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
240 for (i
= 0; i
< pkg
->package
.count
; i
++) {
241 union acpi_object
*obj
= &pkg
->package
.elements
[i
];
243 if (obj
->type
== ACPI_TYPE_INTEGER
)
245 else if (obj
->type
== ACPI_TYPE_BUFFER
)
246 size
+= obj
->buffer
.length
;
248 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
254 buf
= ACPI_ALLOCATE(sizeof(*buf
) + size
);
259 buf
->type
= ACPI_TYPE_BUFFER
;
260 buf
->buffer
.length
= size
;
261 buf
->buffer
.pointer
= dst
;
262 for (i
= 0; i
< pkg
->package
.count
; i
++) {
263 union acpi_object
*obj
= &pkg
->package
.elements
[i
];
265 if (obj
->type
== ACPI_TYPE_INTEGER
) {
266 memcpy(dst
, &obj
->integer
.value
, 4);
268 } else if (obj
->type
== ACPI_TYPE_BUFFER
) {
269 memcpy(dst
, obj
->buffer
.pointer
, obj
->buffer
.length
);
270 dst
+= obj
->buffer
.length
;
278 static union acpi_object
*int_to_buf(union acpi_object
*integer
)
280 union acpi_object
*buf
= ACPI_ALLOCATE(sizeof(*buf
) + 4);
286 if (integer
->type
!= ACPI_TYPE_INTEGER
) {
287 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
293 buf
->type
= ACPI_TYPE_BUFFER
;
294 buf
->buffer
.length
= 4;
295 buf
->buffer
.pointer
= dst
;
296 memcpy(dst
, &integer
->integer
.value
, 4);
302 static union acpi_object
*acpi_label_write(acpi_handle handle
, u32 offset
,
306 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
307 struct acpi_object_list input
= {
309 .pointer
= (union acpi_object
[]) {
311 .integer
.type
= ACPI_TYPE_INTEGER
,
312 .integer
.value
= offset
,
315 .integer
.type
= ACPI_TYPE_INTEGER
,
316 .integer
.value
= len
,
319 .buffer
.type
= ACPI_TYPE_BUFFER
,
320 .buffer
.pointer
= data
,
321 .buffer
.length
= len
,
326 rc
= acpi_evaluate_object(handle
, "_LSW", &input
, &buf
);
327 if (ACPI_FAILURE(rc
))
329 return int_to_buf(buf
.pointer
);
332 static union acpi_object
*acpi_label_read(acpi_handle handle
, u32 offset
,
336 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
337 struct acpi_object_list input
= {
339 .pointer
= (union acpi_object
[]) {
341 .integer
.type
= ACPI_TYPE_INTEGER
,
342 .integer
.value
= offset
,
345 .integer
.type
= ACPI_TYPE_INTEGER
,
346 .integer
.value
= len
,
351 rc
= acpi_evaluate_object(handle
, "_LSR", &input
, &buf
);
352 if (ACPI_FAILURE(rc
))
354 return pkg_to_buf(buf
.pointer
);
357 static union acpi_object
*acpi_label_info(acpi_handle handle
)
360 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
362 rc
= acpi_evaluate_object(handle
, "_LSI", NULL
, &buf
);
363 if (ACPI_FAILURE(rc
))
365 return pkg_to_buf(buf
.pointer
);
368 static u8
nfit_dsm_revid(unsigned family
, unsigned func
)
370 static const u8 revid_table
[NVDIMM_FAMILY_MAX
+1][32] = {
371 [NVDIMM_FAMILY_INTEL
] = {
372 [NVDIMM_INTEL_GET_MODES
] = 2,
373 [NVDIMM_INTEL_GET_FWINFO
] = 2,
374 [NVDIMM_INTEL_START_FWUPDATE
] = 2,
375 [NVDIMM_INTEL_SEND_FWUPDATE
] = 2,
376 [NVDIMM_INTEL_FINISH_FWUPDATE
] = 2,
377 [NVDIMM_INTEL_QUERY_FWUPDATE
] = 2,
378 [NVDIMM_INTEL_SET_THRESHOLD
] = 2,
379 [NVDIMM_INTEL_INJECT_ERROR
] = 2,
384 if (family
> NVDIMM_FAMILY_MAX
)
388 id
= revid_table
[family
][func
];
390 return 1; /* default */
394 static int cmd_to_func(struct nfit_mem
*nfit_mem
, unsigned int cmd
,
395 struct nd_cmd_pkg
*call_pkg
)
400 if (nfit_mem
&& nfit_mem
->family
!= call_pkg
->nd_family
)
403 for (i
= 0; i
< ARRAY_SIZE(call_pkg
->nd_reserved2
); i
++)
404 if (call_pkg
->nd_reserved2
[i
])
406 return call_pkg
->nd_command
;
409 /* In the !call_pkg case, bus commands == bus functions */
413 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
414 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
418 * Force function number validation to fail since 0 is never
419 * published as a valid function in dsm_mask.
424 int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
, struct nvdimm
*nvdimm
,
425 unsigned int cmd
, void *buf
, unsigned int buf_len
, int *cmd_rc
)
427 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
428 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
429 union acpi_object in_obj
, in_buf
, *out_obj
;
430 const struct nd_cmd_desc
*desc
= NULL
;
431 struct device
*dev
= acpi_desc
->dev
;
432 struct nd_cmd_pkg
*call_pkg
= NULL
;
433 const char *cmd_name
, *dimm_name
;
434 unsigned long cmd_mask
, dsm_mask
;
435 u32 offset
, fw_status
= 0;
443 if (cmd
== ND_CMD_CALL
)
445 func
= cmd_to_func(nfit_mem
, cmd
, call_pkg
);
450 struct acpi_device
*adev
= nfit_mem
->adev
;
455 dimm_name
= nvdimm_name(nvdimm
);
456 cmd_name
= nvdimm_cmd_name(cmd
);
457 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
458 dsm_mask
= nfit_mem
->dsm_mask
;
459 desc
= nd_cmd_dimm_desc(cmd
);
460 guid
= to_nfit_uuid(nfit_mem
->family
);
461 handle
= adev
->handle
;
463 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
465 cmd_name
= nvdimm_bus_cmd_name(cmd
);
466 cmd_mask
= nd_desc
->cmd_mask
;
467 dsm_mask
= nd_desc
->bus_dsm_mask
;
468 desc
= nd_cmd_bus_desc(cmd
);
469 guid
= to_nfit_uuid(NFIT_DEV_BUS
);
470 handle
= adev
->handle
;
474 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
478 * Check for a valid command. For ND_CMD_CALL, we also have to
479 * make sure that the DSM function is supported.
481 if (cmd
== ND_CMD_CALL
&& !test_bit(func
, &dsm_mask
))
483 else if (!test_bit(cmd
, &cmd_mask
))
486 in_obj
.type
= ACPI_TYPE_PACKAGE
;
487 in_obj
.package
.count
= 1;
488 in_obj
.package
.elements
= &in_buf
;
489 in_buf
.type
= ACPI_TYPE_BUFFER
;
490 in_buf
.buffer
.pointer
= buf
;
491 in_buf
.buffer
.length
= 0;
493 /* libnvdimm has already validated the input envelope */
494 for (i
= 0; i
< desc
->in_num
; i
++)
495 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
499 /* skip over package wrapper */
500 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
501 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
504 dev_dbg(dev
, "%s cmd: %d: func: %d input length: %d\n",
505 dimm_name
, cmd
, func
, in_buf
.buffer
.length
);
506 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
507 in_buf
.buffer
.pointer
,
508 min_t(u32
, 256, in_buf
.buffer
.length
), true);
510 /* call the BIOS, prefer the named methods over _DSM if available */
511 if (nvdimm
&& cmd
== ND_CMD_GET_CONFIG_SIZE
&& nfit_mem
->has_lsr
)
512 out_obj
= acpi_label_info(handle
);
513 else if (nvdimm
&& cmd
== ND_CMD_GET_CONFIG_DATA
&& nfit_mem
->has_lsr
) {
514 struct nd_cmd_get_config_data_hdr
*p
= buf
;
516 out_obj
= acpi_label_read(handle
, p
->in_offset
, p
->in_length
);
517 } else if (nvdimm
&& cmd
== ND_CMD_SET_CONFIG_DATA
518 && nfit_mem
->has_lsw
) {
519 struct nd_cmd_set_config_hdr
*p
= buf
;
521 out_obj
= acpi_label_write(handle
, p
->in_offset
, p
->in_length
,
527 revid
= nfit_dsm_revid(nfit_mem
->family
, func
);
530 out_obj
= acpi_evaluate_dsm(handle
, guid
, revid
, func
, &in_obj
);
534 dev_dbg(dev
, "%s _DSM failed cmd: %s\n", dimm_name
, cmd_name
);
538 if (out_obj
->type
!= ACPI_TYPE_BUFFER
) {
539 dev_dbg(dev
, "%s unexpected output object type cmd: %s type: %d\n",
540 dimm_name
, cmd_name
, out_obj
->type
);
545 dev_dbg(dev
, "%s cmd: %s output length: %d\n", dimm_name
,
546 cmd_name
, out_obj
->buffer
.length
);
547 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4, 4,
548 out_obj
->buffer
.pointer
,
549 min_t(u32
, 128, out_obj
->buffer
.length
), true);
552 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
553 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
554 out_obj
->buffer
.pointer
,
555 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
559 * Need to support FW function w/o known size in advance.
560 * Caller can determine required size based upon nd_fw_size.
561 * If we return an error (like elsewhere) then caller wouldn't
562 * be able to rely upon data returned to make calculation.
569 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
570 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
571 (u32
*) out_obj
->buffer
.pointer
,
572 out_obj
->buffer
.length
- offset
);
574 if (offset
+ out_size
> out_obj
->buffer
.length
) {
575 dev_dbg(dev
, "%s output object underflow cmd: %s field: %d\n",
576 dimm_name
, cmd_name
, i
);
580 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
581 dev_dbg(dev
, "%s output overrun cmd: %s field: %d\n",
582 dimm_name
, cmd_name
, i
);
586 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
587 out_obj
->buffer
.pointer
+ offset
, out_size
);
592 * Set fw_status for all the commands with a known format to be
593 * later interpreted by xlat_status().
595 if (i
>= 1 && ((!nvdimm
&& cmd
>= ND_CMD_ARS_CAP
596 && cmd
<= ND_CMD_CLEAR_ERROR
)
597 || (nvdimm
&& cmd
>= ND_CMD_SMART
598 && cmd
<= ND_CMD_VENDOR
)))
599 fw_status
= *(u32
*) out_obj
->buffer
.pointer
;
601 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
604 * status valid, return the number of bytes left
605 * unfilled in the output buffer
607 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
609 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
,
612 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
613 __func__
, dimm_name
, cmd_name
, buf_len
,
620 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
, fw_status
);
628 EXPORT_SYMBOL_GPL(acpi_nfit_ctl
);
630 static const char *spa_type_name(u16 type
)
632 static const char *to_name
[] = {
633 [NFIT_SPA_VOLATILE
] = "volatile",
634 [NFIT_SPA_PM
] = "pmem",
635 [NFIT_SPA_DCR
] = "dimm-control-region",
636 [NFIT_SPA_BDW
] = "block-data-window",
637 [NFIT_SPA_VDISK
] = "volatile-disk",
638 [NFIT_SPA_VCD
] = "volatile-cd",
639 [NFIT_SPA_PDISK
] = "persistent-disk",
640 [NFIT_SPA_PCD
] = "persistent-cd",
644 if (type
> NFIT_SPA_PCD
)
647 return to_name
[type
];
650 int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
654 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
655 if (guid_equal(to_nfit_uuid(i
), (guid_t
*)&spa
->range_guid
))
660 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
661 struct nfit_table_prev
*prev
,
662 struct acpi_nfit_system_address
*spa
)
664 struct device
*dev
= acpi_desc
->dev
;
665 struct nfit_spa
*nfit_spa
;
667 if (spa
->header
.length
!= sizeof(*spa
))
670 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
671 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
672 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
677 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
) + sizeof(*spa
),
681 INIT_LIST_HEAD(&nfit_spa
->list
);
682 memcpy(nfit_spa
->spa
, spa
, sizeof(*spa
));
683 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
684 dev_dbg(dev
, "spa index: %d type: %s\n",
686 spa_type_name(nfit_spa_type(spa
)));
690 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
691 struct nfit_table_prev
*prev
,
692 struct acpi_nfit_memory_map
*memdev
)
694 struct device
*dev
= acpi_desc
->dev
;
695 struct nfit_memdev
*nfit_memdev
;
697 if (memdev
->header
.length
!= sizeof(*memdev
))
700 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
701 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
702 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
706 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
) + sizeof(*memdev
),
710 INIT_LIST_HEAD(&nfit_memdev
->list
);
711 memcpy(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
));
712 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
713 dev_dbg(dev
, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
714 memdev
->device_handle
, memdev
->range_index
,
715 memdev
->region_index
, memdev
->flags
);
719 int nfit_get_smbios_id(u32 device_handle
, u16
*flags
)
721 struct acpi_nfit_memory_map
*memdev
;
722 struct acpi_nfit_desc
*acpi_desc
;
723 struct nfit_mem
*nfit_mem
;
726 mutex_lock(&acpi_desc_lock
);
727 list_for_each_entry(acpi_desc
, &acpi_descs
, list
) {
728 mutex_lock(&acpi_desc
->init_mutex
);
729 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
730 memdev
= __to_nfit_memdev(nfit_mem
);
731 if (memdev
->device_handle
== device_handle
) {
732 *flags
= memdev
->flags
;
733 physical_id
= memdev
->physical_id
;
734 mutex_unlock(&acpi_desc
->init_mutex
);
735 mutex_unlock(&acpi_desc_lock
);
739 mutex_unlock(&acpi_desc
->init_mutex
);
741 mutex_unlock(&acpi_desc_lock
);
745 EXPORT_SYMBOL_GPL(nfit_get_smbios_id
);
748 * An implementation may provide a truncated control region if no block windows
751 static size_t sizeof_dcr(struct acpi_nfit_control_region
*dcr
)
753 if (dcr
->header
.length
< offsetof(struct acpi_nfit_control_region
,
758 return offsetof(struct acpi_nfit_control_region
, window_size
);
761 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
762 struct nfit_table_prev
*prev
,
763 struct acpi_nfit_control_region
*dcr
)
765 struct device
*dev
= acpi_desc
->dev
;
766 struct nfit_dcr
*nfit_dcr
;
768 if (!sizeof_dcr(dcr
))
771 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
772 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
)) == 0) {
773 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
777 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
) + sizeof(*dcr
),
781 INIT_LIST_HEAD(&nfit_dcr
->list
);
782 memcpy(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
));
783 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
784 dev_dbg(dev
, "dcr index: %d windows: %d\n",
785 dcr
->region_index
, dcr
->windows
);
789 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
790 struct nfit_table_prev
*prev
,
791 struct acpi_nfit_data_region
*bdw
)
793 struct device
*dev
= acpi_desc
->dev
;
794 struct nfit_bdw
*nfit_bdw
;
796 if (bdw
->header
.length
!= sizeof(*bdw
))
798 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
799 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
800 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
804 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
) + sizeof(*bdw
),
808 INIT_LIST_HEAD(&nfit_bdw
->list
);
809 memcpy(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
));
810 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
811 dev_dbg(dev
, "bdw dcr: %d windows: %d\n",
812 bdw
->region_index
, bdw
->windows
);
816 static size_t sizeof_idt(struct acpi_nfit_interleave
*idt
)
818 if (idt
->header
.length
< sizeof(*idt
))
820 return sizeof(*idt
) + sizeof(u32
) * (idt
->line_count
- 1);
823 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
824 struct nfit_table_prev
*prev
,
825 struct acpi_nfit_interleave
*idt
)
827 struct device
*dev
= acpi_desc
->dev
;
828 struct nfit_idt
*nfit_idt
;
830 if (!sizeof_idt(idt
))
833 list_for_each_entry(nfit_idt
, &prev
->idts
, list
) {
834 if (sizeof_idt(nfit_idt
->idt
) != sizeof_idt(idt
))
837 if (memcmp(nfit_idt
->idt
, idt
, sizeof_idt(idt
)) == 0) {
838 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
843 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
) + sizeof_idt(idt
),
847 INIT_LIST_HEAD(&nfit_idt
->list
);
848 memcpy(nfit_idt
->idt
, idt
, sizeof_idt(idt
));
849 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
850 dev_dbg(dev
, "idt index: %d num_lines: %d\n",
851 idt
->interleave_index
, idt
->line_count
);
855 static size_t sizeof_flush(struct acpi_nfit_flush_address
*flush
)
857 if (flush
->header
.length
< sizeof(*flush
))
859 return sizeof(*flush
) + sizeof(u64
) * (flush
->hint_count
- 1);
862 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
863 struct nfit_table_prev
*prev
,
864 struct acpi_nfit_flush_address
*flush
)
866 struct device
*dev
= acpi_desc
->dev
;
867 struct nfit_flush
*nfit_flush
;
869 if (!sizeof_flush(flush
))
872 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
) {
873 if (sizeof_flush(nfit_flush
->flush
) != sizeof_flush(flush
))
876 if (memcmp(nfit_flush
->flush
, flush
,
877 sizeof_flush(flush
)) == 0) {
878 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
883 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
)
884 + sizeof_flush(flush
), GFP_KERNEL
);
887 INIT_LIST_HEAD(&nfit_flush
->list
);
888 memcpy(nfit_flush
->flush
, flush
, sizeof_flush(flush
));
889 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
890 dev_dbg(dev
, "nfit_flush handle: %d hint_count: %d\n",
891 flush
->device_handle
, flush
->hint_count
);
895 static bool add_platform_cap(struct acpi_nfit_desc
*acpi_desc
,
896 struct acpi_nfit_capabilities
*pcap
)
898 struct device
*dev
= acpi_desc
->dev
;
901 mask
= (1 << (pcap
->highest_capability
+ 1)) - 1;
902 acpi_desc
->platform_cap
= pcap
->capabilities
& mask
;
903 dev_dbg(dev
, "cap: %#x\n", acpi_desc
->platform_cap
);
907 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
908 struct nfit_table_prev
*prev
, void *table
, const void *end
)
910 struct device
*dev
= acpi_desc
->dev
;
911 struct acpi_nfit_header
*hdr
;
912 void *err
= ERR_PTR(-ENOMEM
);
919 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
925 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
926 if (!add_spa(acpi_desc
, prev
, table
))
929 case ACPI_NFIT_TYPE_MEMORY_MAP
:
930 if (!add_memdev(acpi_desc
, prev
, table
))
933 case ACPI_NFIT_TYPE_CONTROL_REGION
:
934 if (!add_dcr(acpi_desc
, prev
, table
))
937 case ACPI_NFIT_TYPE_DATA_REGION
:
938 if (!add_bdw(acpi_desc
, prev
, table
))
941 case ACPI_NFIT_TYPE_INTERLEAVE
:
942 if (!add_idt(acpi_desc
, prev
, table
))
945 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
946 if (!add_flush(acpi_desc
, prev
, table
))
949 case ACPI_NFIT_TYPE_SMBIOS
:
950 dev_dbg(dev
, "smbios\n");
952 case ACPI_NFIT_TYPE_CAPABILITIES
:
953 if (!add_platform_cap(acpi_desc
, table
))
957 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
961 return table
+ hdr
->length
;
964 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
965 struct nfit_mem
*nfit_mem
)
967 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
968 u16 dcr
= nfit_mem
->dcr
->region_index
;
969 struct nfit_spa
*nfit_spa
;
971 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
972 u16 range_index
= nfit_spa
->spa
->range_index
;
973 int type
= nfit_spa_type(nfit_spa
->spa
);
974 struct nfit_memdev
*nfit_memdev
;
976 if (type
!= NFIT_SPA_BDW
)
979 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
980 if (nfit_memdev
->memdev
->range_index
!= range_index
)
982 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
984 if (nfit_memdev
->memdev
->region_index
!= dcr
)
987 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
992 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
993 nfit_mem
->spa_dcr
->range_index
);
994 nfit_mem
->bdw
= NULL
;
997 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
998 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
1000 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
1001 struct nfit_memdev
*nfit_memdev
;
1002 struct nfit_bdw
*nfit_bdw
;
1003 struct nfit_idt
*nfit_idt
;
1004 u16 idt_idx
, range_index
;
1006 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
1007 if (nfit_bdw
->bdw
->region_index
!= dcr
)
1009 nfit_mem
->bdw
= nfit_bdw
->bdw
;
1016 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
1018 if (!nfit_mem
->spa_bdw
)
1021 range_index
= nfit_mem
->spa_bdw
->range_index
;
1022 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1023 if (nfit_memdev
->memdev
->range_index
!= range_index
||
1024 nfit_memdev
->memdev
->region_index
!= dcr
)
1026 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
1027 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
1028 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
1029 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
1031 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
1038 static int __nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
,
1039 struct acpi_nfit_system_address
*spa
)
1041 struct nfit_mem
*nfit_mem
, *found
;
1042 struct nfit_memdev
*nfit_memdev
;
1043 int type
= spa
? nfit_spa_type(spa
) : 0;
1055 * This loop runs in two modes, when a dimm is mapped the loop
1056 * adds memdev associations to an existing dimm, or creates a
1057 * dimm. In the unmapped dimm case this loop sweeps for memdev
1058 * instances with an invalid / zero range_index and adds those
1059 * dimms without spa associations.
1061 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1062 struct nfit_flush
*nfit_flush
;
1063 struct nfit_dcr
*nfit_dcr
;
1067 if (spa
&& nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
1069 if (!spa
&& nfit_memdev
->memdev
->range_index
)
1072 dcr
= nfit_memdev
->memdev
->region_index
;
1073 device_handle
= nfit_memdev
->memdev
->device_handle
;
1074 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1075 if (__to_nfit_memdev(nfit_mem
)->device_handle
1084 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
1085 sizeof(*nfit_mem
), GFP_KERNEL
);
1088 INIT_LIST_HEAD(&nfit_mem
->list
);
1089 nfit_mem
->acpi_desc
= acpi_desc
;
1090 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
1093 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1094 if (nfit_dcr
->dcr
->region_index
!= dcr
)
1097 * Record the control region for the dimm. For
1098 * the ACPI 6.1 case, where there are separate
1099 * control regions for the pmem vs blk
1100 * interfaces, be sure to record the extended
1104 nfit_mem
->dcr
= nfit_dcr
->dcr
;
1105 else if (nfit_mem
->dcr
->windows
== 0
1106 && nfit_dcr
->dcr
->windows
)
1107 nfit_mem
->dcr
= nfit_dcr
->dcr
;
1111 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
1112 struct acpi_nfit_flush_address
*flush
;
1115 if (nfit_flush
->flush
->device_handle
!= device_handle
)
1117 nfit_mem
->nfit_flush
= nfit_flush
;
1118 flush
= nfit_flush
->flush
;
1119 nfit_mem
->flush_wpq
= devm_kcalloc(acpi_desc
->dev
,
1121 sizeof(struct resource
),
1123 if (!nfit_mem
->flush_wpq
)
1125 for (i
= 0; i
< flush
->hint_count
; i
++) {
1126 struct resource
*res
= &nfit_mem
->flush_wpq
[i
];
1128 res
->start
= flush
->hint_address
[i
];
1129 res
->end
= res
->start
+ 8 - 1;
1134 if (dcr
&& !nfit_mem
->dcr
) {
1135 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
1136 spa
->range_index
, dcr
);
1140 if (type
== NFIT_SPA_DCR
) {
1141 struct nfit_idt
*nfit_idt
;
1144 /* multiple dimms may share a SPA when interleaved */
1145 nfit_mem
->spa_dcr
= spa
;
1146 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
1147 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
1148 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
1149 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
1151 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
1154 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
1155 } else if (type
== NFIT_SPA_PM
) {
1157 * A single dimm may belong to multiple SPA-PM
1158 * ranges, record at least one in addition to
1159 * any SPA-DCR range.
1161 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
1163 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
1169 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
1171 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
1172 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
1173 u32 handleA
, handleB
;
1175 handleA
= __to_nfit_memdev(a
)->device_handle
;
1176 handleB
= __to_nfit_memdev(b
)->device_handle
;
1177 if (handleA
< handleB
)
1179 else if (handleA
> handleB
)
1184 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
1186 struct nfit_spa
*nfit_spa
;
1191 * For each SPA-DCR or SPA-PMEM address range find its
1192 * corresponding MEMDEV(s). From each MEMDEV find the
1193 * corresponding DCR. Then, if we're operating on a SPA-DCR,
1194 * try to find a SPA-BDW and a corresponding BDW that references
1195 * the DCR. Throw it all into an nfit_mem object. Note, that
1196 * BDWs are optional.
1198 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
1199 rc
= __nfit_mem_init(acpi_desc
, nfit_spa
->spa
);
1205 * If a DIMM has failed to be mapped into SPA there will be no
1206 * SPA entries above. Find and register all the unmapped DIMMs
1207 * for reporting and recovery purposes.
1209 rc
= __nfit_mem_init(acpi_desc
, NULL
);
1213 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
1218 static ssize_t
bus_dsm_mask_show(struct device
*dev
,
1219 struct device_attribute
*attr
, char *buf
)
1221 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1222 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1224 return sprintf(buf
, "%#lx\n", nd_desc
->bus_dsm_mask
);
1226 static struct device_attribute dev_attr_bus_dsm_mask
=
1227 __ATTR(dsm_mask
, 0444, bus_dsm_mask_show
, NULL
);
1229 static ssize_t
revision_show(struct device
*dev
,
1230 struct device_attribute
*attr
, char *buf
)
1232 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1233 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1234 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1236 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
1238 static DEVICE_ATTR_RO(revision
);
1240 static ssize_t
hw_error_scrub_show(struct device
*dev
,
1241 struct device_attribute
*attr
, char *buf
)
1243 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1244 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1245 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1247 return sprintf(buf
, "%d\n", acpi_desc
->scrub_mode
);
1251 * The 'hw_error_scrub' attribute can have the following values written to it:
1252 * '0': Switch to the default mode where an exception will only insert
1253 * the address of the memory error into the poison and badblocks lists.
1254 * '1': Enable a full scrub to happen if an exception for a memory error is
1257 static ssize_t
hw_error_scrub_store(struct device
*dev
,
1258 struct device_attribute
*attr
, const char *buf
, size_t size
)
1260 struct nvdimm_bus_descriptor
*nd_desc
;
1264 rc
= kstrtol(buf
, 0, &val
);
1269 nd_desc
= dev_get_drvdata(dev
);
1271 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1274 case HW_ERROR_SCRUB_ON
:
1275 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_ON
;
1277 case HW_ERROR_SCRUB_OFF
:
1278 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_OFF
;
1290 static DEVICE_ATTR_RW(hw_error_scrub
);
1293 * This shows the number of full Address Range Scrubs that have been
1294 * completed since driver load time. Userspace can wait on this using
1295 * select/poll etc. A '+' at the end indicates an ARS is in progress
1297 static ssize_t
scrub_show(struct device
*dev
,
1298 struct device_attribute
*attr
, char *buf
)
1300 struct nvdimm_bus_descriptor
*nd_desc
;
1301 struct acpi_nfit_desc
*acpi_desc
;
1302 ssize_t rc
= -ENXIO
;
1306 nd_desc
= dev_get_drvdata(dev
);
1311 acpi_desc
= to_acpi_desc(nd_desc
);
1313 mutex_lock(&acpi_desc
->init_mutex
);
1314 busy
= test_bit(ARS_BUSY
, &acpi_desc
->scrub_flags
)
1315 && !test_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
);
1316 rc
= sprintf(buf
, "%d%s", acpi_desc
->scrub_count
, busy
? "+\n" : "\n");
1317 /* Allow an admin to poll the busy state at a higher rate */
1318 if (busy
&& capable(CAP_SYS_RAWIO
) && !test_and_set_bit(ARS_POLL
,
1319 &acpi_desc
->scrub_flags
)) {
1320 acpi_desc
->scrub_tmo
= 1;
1321 mod_delayed_work(nfit_wq
, &acpi_desc
->dwork
, HZ
);
1324 mutex_unlock(&acpi_desc
->init_mutex
);
1329 static ssize_t
scrub_store(struct device
*dev
,
1330 struct device_attribute
*attr
, const char *buf
, size_t size
)
1332 struct nvdimm_bus_descriptor
*nd_desc
;
1336 rc
= kstrtol(buf
, 0, &val
);
1343 nd_desc
= dev_get_drvdata(dev
);
1345 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1347 rc
= acpi_nfit_ars_rescan(acpi_desc
, ARS_REQ_LONG
);
1354 static DEVICE_ATTR_RW(scrub
);
1356 static bool ars_supported(struct nvdimm_bus
*nvdimm_bus
)
1358 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1359 const unsigned long mask
= 1 << ND_CMD_ARS_CAP
| 1 << ND_CMD_ARS_START
1360 | 1 << ND_CMD_ARS_STATUS
;
1362 return (nd_desc
->cmd_mask
& mask
) == mask
;
1365 static umode_t
nfit_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
1367 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1368 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1370 if (a
== &dev_attr_scrub
.attr
&& !ars_supported(nvdimm_bus
))
1375 static struct attribute
*acpi_nfit_attributes
[] = {
1376 &dev_attr_revision
.attr
,
1377 &dev_attr_scrub
.attr
,
1378 &dev_attr_hw_error_scrub
.attr
,
1379 &dev_attr_bus_dsm_mask
.attr
,
1383 static const struct attribute_group acpi_nfit_attribute_group
= {
1385 .attrs
= acpi_nfit_attributes
,
1386 .is_visible
= nfit_visible
,
1389 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
1390 &nvdimm_bus_attribute_group
,
1391 &acpi_nfit_attribute_group
,
1395 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
1397 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1398 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1400 return __to_nfit_memdev(nfit_mem
);
1403 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
1405 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1406 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1408 return nfit_mem
->dcr
;
1411 static ssize_t
handle_show(struct device
*dev
,
1412 struct device_attribute
*attr
, char *buf
)
1414 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1416 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
1418 static DEVICE_ATTR_RO(handle
);
1420 static ssize_t
phys_id_show(struct device
*dev
,
1421 struct device_attribute
*attr
, char *buf
)
1423 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1425 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
1427 static DEVICE_ATTR_RO(phys_id
);
1429 static ssize_t
vendor_show(struct device
*dev
,
1430 struct device_attribute
*attr
, char *buf
)
1432 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1434 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
1436 static DEVICE_ATTR_RO(vendor
);
1438 static ssize_t
rev_id_show(struct device
*dev
,
1439 struct device_attribute
*attr
, char *buf
)
1441 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1443 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
1445 static DEVICE_ATTR_RO(rev_id
);
1447 static ssize_t
device_show(struct device
*dev
,
1448 struct device_attribute
*attr
, char *buf
)
1450 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1452 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
1454 static DEVICE_ATTR_RO(device
);
1456 static ssize_t
subsystem_vendor_show(struct device
*dev
,
1457 struct device_attribute
*attr
, char *buf
)
1459 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1461 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
1463 static DEVICE_ATTR_RO(subsystem_vendor
);
1465 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
1466 struct device_attribute
*attr
, char *buf
)
1468 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1470 return sprintf(buf
, "0x%04x\n",
1471 be16_to_cpu(dcr
->subsystem_revision_id
));
1473 static DEVICE_ATTR_RO(subsystem_rev_id
);
1475 static ssize_t
subsystem_device_show(struct device
*dev
,
1476 struct device_attribute
*attr
, char *buf
)
1478 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1480 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
1482 static DEVICE_ATTR_RO(subsystem_device
);
1484 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
1486 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1489 if (nfit_mem
->memdev_pmem
)
1491 if (nfit_mem
->memdev_bdw
)
1496 static ssize_t
format_show(struct device
*dev
,
1497 struct device_attribute
*attr
, char *buf
)
1499 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1501 return sprintf(buf
, "0x%04x\n", le16_to_cpu(dcr
->code
));
1503 static DEVICE_ATTR_RO(format
);
1505 static ssize_t
format1_show(struct device
*dev
,
1506 struct device_attribute
*attr
, char *buf
)
1509 ssize_t rc
= -ENXIO
;
1510 struct nfit_mem
*nfit_mem
;
1511 struct nfit_memdev
*nfit_memdev
;
1512 struct acpi_nfit_desc
*acpi_desc
;
1513 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1514 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1516 nfit_mem
= nvdimm_provider_data(nvdimm
);
1517 acpi_desc
= nfit_mem
->acpi_desc
;
1518 handle
= to_nfit_memdev(dev
)->device_handle
;
1520 /* assumes DIMMs have at most 2 published interface codes */
1521 mutex_lock(&acpi_desc
->init_mutex
);
1522 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1523 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1524 struct nfit_dcr
*nfit_dcr
;
1526 if (memdev
->device_handle
!= handle
)
1529 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1530 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
1532 if (nfit_dcr
->dcr
->code
== dcr
->code
)
1534 rc
= sprintf(buf
, "0x%04x\n",
1535 le16_to_cpu(nfit_dcr
->dcr
->code
));
1541 mutex_unlock(&acpi_desc
->init_mutex
);
1544 static DEVICE_ATTR_RO(format1
);
1546 static ssize_t
formats_show(struct device
*dev
,
1547 struct device_attribute
*attr
, char *buf
)
1549 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1551 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
1553 static DEVICE_ATTR_RO(formats
);
1555 static ssize_t
serial_show(struct device
*dev
,
1556 struct device_attribute
*attr
, char *buf
)
1558 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1560 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
1562 static DEVICE_ATTR_RO(serial
);
1564 static ssize_t
family_show(struct device
*dev
,
1565 struct device_attribute
*attr
, char *buf
)
1567 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1568 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1570 if (nfit_mem
->family
< 0)
1572 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1574 static DEVICE_ATTR_RO(family
);
1576 static ssize_t
dsm_mask_show(struct device
*dev
,
1577 struct device_attribute
*attr
, char *buf
)
1579 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1580 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1582 if (nfit_mem
->family
< 0)
1584 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1586 static DEVICE_ATTR_RO(dsm_mask
);
1588 static ssize_t
flags_show(struct device
*dev
,
1589 struct device_attribute
*attr
, char *buf
)
1591 u16 flags
= to_nfit_memdev(dev
)->flags
;
1593 return sprintf(buf
, "%s%s%s%s%s%s%s\n",
1594 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1595 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1596 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1597 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1598 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "",
1599 flags
& ACPI_NFIT_MEM_MAP_FAILED
? "map_fail " : "",
1600 flags
& ACPI_NFIT_MEM_HEALTH_ENABLED
? "smart_notify " : "");
1602 static DEVICE_ATTR_RO(flags
);
1604 static ssize_t
id_show(struct device
*dev
,
1605 struct device_attribute
*attr
, char *buf
)
1607 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1609 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1610 return sprintf(buf
, "%04x-%02x-%04x-%08x\n",
1611 be16_to_cpu(dcr
->vendor_id
),
1612 dcr
->manufacturing_location
,
1613 be16_to_cpu(dcr
->manufacturing_date
),
1614 be32_to_cpu(dcr
->serial_number
));
1616 return sprintf(buf
, "%04x-%08x\n",
1617 be16_to_cpu(dcr
->vendor_id
),
1618 be32_to_cpu(dcr
->serial_number
));
1620 static DEVICE_ATTR_RO(id
);
1622 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1623 &dev_attr_handle
.attr
,
1624 &dev_attr_phys_id
.attr
,
1625 &dev_attr_vendor
.attr
,
1626 &dev_attr_device
.attr
,
1627 &dev_attr_rev_id
.attr
,
1628 &dev_attr_subsystem_vendor
.attr
,
1629 &dev_attr_subsystem_device
.attr
,
1630 &dev_attr_subsystem_rev_id
.attr
,
1631 &dev_attr_format
.attr
,
1632 &dev_attr_formats
.attr
,
1633 &dev_attr_format1
.attr
,
1634 &dev_attr_serial
.attr
,
1635 &dev_attr_flags
.attr
,
1637 &dev_attr_family
.attr
,
1638 &dev_attr_dsm_mask
.attr
,
1642 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1643 struct attribute
*a
, int n
)
1645 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1646 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1648 if (!to_nfit_dcr(dev
)) {
1649 /* Without a dcr only the memdev attributes can be surfaced */
1650 if (a
== &dev_attr_handle
.attr
|| a
== &dev_attr_phys_id
.attr
1651 || a
== &dev_attr_flags
.attr
1652 || a
== &dev_attr_family
.attr
1653 || a
== &dev_attr_dsm_mask
.attr
)
1658 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1663 static const struct attribute_group acpi_nfit_dimm_attribute_group
= {
1665 .attrs
= acpi_nfit_dimm_attributes
,
1666 .is_visible
= acpi_nfit_dimm_attr_visible
,
1669 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1670 &nvdimm_attribute_group
,
1671 &nd_device_attribute_group
,
1672 &acpi_nfit_dimm_attribute_group
,
1676 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1679 struct nfit_mem
*nfit_mem
;
1681 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1682 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1683 return nfit_mem
->nvdimm
;
1688 void __acpi_nvdimm_notify(struct device
*dev
, u32 event
)
1690 struct nfit_mem
*nfit_mem
;
1691 struct acpi_nfit_desc
*acpi_desc
;
1693 dev_dbg(dev
->parent
, "%s: event: %d\n", dev_name(dev
),
1696 if (event
!= NFIT_NOTIFY_DIMM_HEALTH
) {
1697 dev_dbg(dev
->parent
, "%s: unknown event: %d\n", dev_name(dev
),
1702 acpi_desc
= dev_get_drvdata(dev
->parent
);
1707 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1710 nfit_mem
= dev_get_drvdata(dev
);
1711 if (nfit_mem
&& nfit_mem
->flags_attr
)
1712 sysfs_notify_dirent(nfit_mem
->flags_attr
);
1714 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify
);
1716 static void acpi_nvdimm_notify(acpi_handle handle
, u32 event
, void *data
)
1718 struct acpi_device
*adev
= data
;
1719 struct device
*dev
= &adev
->dev
;
1721 device_lock(dev
->parent
);
1722 __acpi_nvdimm_notify(dev
, event
);
1723 device_unlock(dev
->parent
);
1726 static bool acpi_nvdimm_has_method(struct acpi_device
*adev
, char *method
)
1731 status
= acpi_get_handle(adev
->handle
, method
, &handle
);
1733 if (ACPI_SUCCESS(status
))
1738 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1739 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1741 struct acpi_device
*adev
, *adev_dimm
;
1742 struct device
*dev
= acpi_desc
->dev
;
1743 unsigned long dsm_mask
, label_mask
;
1748 /* nfit test assumes 1:1 relationship between commands and dsms */
1749 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1750 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1751 adev
= to_acpi_dev(acpi_desc
);
1755 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1756 nfit_mem
->adev
= adev_dimm
;
1758 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1760 return force_enable_dimms
? 0 : -ENODEV
;
1763 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm
->handle
,
1764 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
, adev_dimm
))) {
1765 dev_err(dev
, "%s: notification registration failed\n",
1766 dev_name(&adev_dimm
->dev
));
1770 * Record nfit_mem for the notification path to track back to
1771 * the nfit sysfs attributes for this dimm device object.
1773 dev_set_drvdata(&adev_dimm
->dev
, nfit_mem
);
1776 * Until standardization materializes we need to consider 4
1777 * different command sets. Note, that checking for function0 (bit0)
1778 * tells us if any commands are reachable through this GUID.
1780 for (i
= 0; i
<= NVDIMM_FAMILY_MAX
; i
++)
1781 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1782 if (family
< 0 || i
== default_dsm_family
)
1785 /* limit the supported commands to those that are publicly documented */
1786 nfit_mem
->family
= family
;
1787 if (override_dsm_mask
&& !disable_vendor_specific
)
1788 dsm_mask
= override_dsm_mask
;
1789 else if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1790 dsm_mask
= NVDIMM_INTEL_CMDMASK
;
1791 if (disable_vendor_specific
)
1792 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1793 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
) {
1794 dsm_mask
= 0x1c3c76;
1795 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1797 if (disable_vendor_specific
)
1798 dsm_mask
&= ~(1 << 8);
1799 } else if (nfit_mem
->family
== NVDIMM_FAMILY_MSFT
) {
1800 dsm_mask
= 0xffffffff;
1802 dev_dbg(dev
, "unknown dimm command family\n");
1803 nfit_mem
->family
= -1;
1804 /* DSMs are optional, continue loading the driver... */
1809 * Function 0 is the command interrogation function, don't
1810 * export it to potential userspace use, and enable it to be
1811 * used as an error value in acpi_nfit_ctl().
1815 guid
= to_nfit_uuid(nfit_mem
->family
);
1816 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1817 if (acpi_check_dsm(adev_dimm
->handle
, guid
,
1818 nfit_dsm_revid(nfit_mem
->family
, i
),
1820 set_bit(i
, &nfit_mem
->dsm_mask
);
1823 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1824 * due to their better semantics handling locked capacity.
1826 label_mask
= 1 << ND_CMD_GET_CONFIG_SIZE
| 1 << ND_CMD_GET_CONFIG_DATA
1827 | 1 << ND_CMD_SET_CONFIG_DATA
;
1828 if (family
== NVDIMM_FAMILY_INTEL
1829 && (dsm_mask
& label_mask
) == label_mask
)
1832 if (acpi_nvdimm_has_method(adev_dimm
, "_LSI")
1833 && acpi_nvdimm_has_method(adev_dimm
, "_LSR")) {
1834 dev_dbg(dev
, "%s: has _LSR\n", dev_name(&adev_dimm
->dev
));
1835 nfit_mem
->has_lsr
= true;
1838 if (nfit_mem
->has_lsr
&& acpi_nvdimm_has_method(adev_dimm
, "_LSW")) {
1839 dev_dbg(dev
, "%s: has _LSW\n", dev_name(&adev_dimm
->dev
));
1840 nfit_mem
->has_lsw
= true;
1846 static void shutdown_dimm_notify(void *data
)
1848 struct acpi_nfit_desc
*acpi_desc
= data
;
1849 struct nfit_mem
*nfit_mem
;
1851 mutex_lock(&acpi_desc
->init_mutex
);
1853 * Clear out the nfit_mem->flags_attr and shut down dimm event
1856 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1857 struct acpi_device
*adev_dimm
= nfit_mem
->adev
;
1859 if (nfit_mem
->flags_attr
) {
1860 sysfs_put(nfit_mem
->flags_attr
);
1861 nfit_mem
->flags_attr
= NULL
;
1864 acpi_remove_notify_handler(adev_dimm
->handle
,
1865 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
);
1866 dev_set_drvdata(&adev_dimm
->dev
, NULL
);
1869 mutex_unlock(&acpi_desc
->init_mutex
);
1872 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
1874 struct nfit_mem
*nfit_mem
;
1875 int dimm_count
= 0, rc
;
1876 struct nvdimm
*nvdimm
;
1878 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1879 struct acpi_nfit_flush_address
*flush
;
1880 unsigned long flags
= 0, cmd_mask
;
1881 struct nfit_memdev
*nfit_memdev
;
1885 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
1886 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
1892 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
1893 set_bit(NDD_ALIASING
, &flags
);
1895 /* collate flags across all memdevs for this dimm */
1896 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1897 struct acpi_nfit_memory_map
*dimm_memdev
;
1899 dimm_memdev
= __to_nfit_memdev(nfit_mem
);
1900 if (dimm_memdev
->device_handle
1901 != nfit_memdev
->memdev
->device_handle
)
1903 dimm_memdev
->flags
|= nfit_memdev
->memdev
->flags
;
1906 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1907 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
1908 set_bit(NDD_UNARMED
, &flags
);
1910 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
1915 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1916 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1917 * userspace interface.
1919 cmd_mask
= 1UL << ND_CMD_CALL
;
1920 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1922 * These commands have a 1:1 correspondence
1923 * between DSM payload and libnvdimm ioctl
1926 cmd_mask
|= nfit_mem
->dsm_mask
& NVDIMM_STANDARD_CMDMASK
;
1929 if (nfit_mem
->has_lsr
) {
1930 set_bit(ND_CMD_GET_CONFIG_SIZE
, &cmd_mask
);
1931 set_bit(ND_CMD_GET_CONFIG_DATA
, &cmd_mask
);
1933 if (nfit_mem
->has_lsw
)
1934 set_bit(ND_CMD_SET_CONFIG_DATA
, &cmd_mask
);
1936 flush
= nfit_mem
->nfit_flush
? nfit_mem
->nfit_flush
->flush
1938 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
1939 acpi_nfit_dimm_attribute_groups
,
1940 flags
, cmd_mask
, flush
? flush
->hint_count
: 0,
1941 nfit_mem
->flush_wpq
);
1945 nfit_mem
->nvdimm
= nvdimm
;
1948 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
1951 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s%s\n",
1952 nvdimm_name(nvdimm
),
1953 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
1954 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
1955 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
1956 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "",
1957 mem_flags
& ACPI_NFIT_MEM_MAP_FAILED
? " map_fail" : "");
1961 rc
= nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
1966 * Now that dimms are successfully registered, and async registration
1967 * is flushed, attempt to enable event notification.
1969 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1970 struct kernfs_node
*nfit_kernfs
;
1972 nvdimm
= nfit_mem
->nvdimm
;
1976 nfit_kernfs
= sysfs_get_dirent(nvdimm_kobj(nvdimm
)->sd
, "nfit");
1978 nfit_mem
->flags_attr
= sysfs_get_dirent(nfit_kernfs
,
1980 sysfs_put(nfit_kernfs
);
1981 if (!nfit_mem
->flags_attr
)
1982 dev_warn(acpi_desc
->dev
, "%s: notifications disabled\n",
1983 nvdimm_name(nvdimm
));
1986 return devm_add_action_or_reset(acpi_desc
->dev
, shutdown_dimm_notify
,
1991 * These constants are private because there are no kernel consumers of
1994 enum nfit_aux_cmds
{
1995 NFIT_CMD_TRANSLATE_SPA
= 5,
1996 NFIT_CMD_ARS_INJECT_SET
= 7,
1997 NFIT_CMD_ARS_INJECT_CLEAR
= 8,
1998 NFIT_CMD_ARS_INJECT_GET
= 9,
2001 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
2003 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2004 const guid_t
*guid
= to_nfit_uuid(NFIT_DEV_BUS
);
2005 struct acpi_device
*adev
;
2006 unsigned long dsm_mask
;
2009 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
2010 nd_desc
->bus_dsm_mask
= acpi_desc
->bus_nfit_cmd_force_en
;
2011 adev
= to_acpi_dev(acpi_desc
);
2015 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
2016 if (acpi_check_dsm(adev
->handle
, guid
, 1, 1ULL << i
))
2017 set_bit(i
, &nd_desc
->cmd_mask
);
2018 set_bit(ND_CMD_CALL
, &nd_desc
->cmd_mask
);
2021 (1 << ND_CMD_ARS_CAP
) |
2022 (1 << ND_CMD_ARS_START
) |
2023 (1 << ND_CMD_ARS_STATUS
) |
2024 (1 << ND_CMD_CLEAR_ERROR
) |
2025 (1 << NFIT_CMD_TRANSLATE_SPA
) |
2026 (1 << NFIT_CMD_ARS_INJECT_SET
) |
2027 (1 << NFIT_CMD_ARS_INJECT_CLEAR
) |
2028 (1 << NFIT_CMD_ARS_INJECT_GET
);
2029 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
2030 if (acpi_check_dsm(adev
->handle
, guid
, 1, 1ULL << i
))
2031 set_bit(i
, &nd_desc
->bus_dsm_mask
);
2034 static ssize_t
range_index_show(struct device
*dev
,
2035 struct device_attribute
*attr
, char *buf
)
2037 struct nd_region
*nd_region
= to_nd_region(dev
);
2038 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
2040 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
2042 static DEVICE_ATTR_RO(range_index
);
2044 static struct attribute
*acpi_nfit_region_attributes
[] = {
2045 &dev_attr_range_index
.attr
,
2049 static const struct attribute_group acpi_nfit_region_attribute_group
= {
2051 .attrs
= acpi_nfit_region_attributes
,
2054 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
2055 &nd_region_attribute_group
,
2056 &nd_mapping_attribute_group
,
2057 &nd_device_attribute_group
,
2058 &nd_numa_attribute_group
,
2059 &acpi_nfit_region_attribute_group
,
2063 /* enough info to uniquely specify an interleave set */
2064 struct nfit_set_info
{
2065 struct nfit_set_info_map
{
2072 struct nfit_set_info2
{
2073 struct nfit_set_info_map2
{
2077 u16 manufacturing_date
;
2078 u8 manufacturing_location
;
2083 static size_t sizeof_nfit_set_info(int num_mappings
)
2085 return sizeof(struct nfit_set_info
)
2086 + num_mappings
* sizeof(struct nfit_set_info_map
);
2089 static size_t sizeof_nfit_set_info2(int num_mappings
)
2091 return sizeof(struct nfit_set_info2
)
2092 + num_mappings
* sizeof(struct nfit_set_info_map2
);
2095 static int cmp_map_compat(const void *m0
, const void *m1
)
2097 const struct nfit_set_info_map
*map0
= m0
;
2098 const struct nfit_set_info_map
*map1
= m1
;
2100 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
2104 static int cmp_map(const void *m0
, const void *m1
)
2106 const struct nfit_set_info_map
*map0
= m0
;
2107 const struct nfit_set_info_map
*map1
= m1
;
2109 if (map0
->region_offset
< map1
->region_offset
)
2111 else if (map0
->region_offset
> map1
->region_offset
)
2116 static int cmp_map2(const void *m0
, const void *m1
)
2118 const struct nfit_set_info_map2
*map0
= m0
;
2119 const struct nfit_set_info_map2
*map1
= m1
;
2121 if (map0
->region_offset
< map1
->region_offset
)
2123 else if (map0
->region_offset
> map1
->region_offset
)
2128 /* Retrieve the nth entry referencing this spa */
2129 static struct acpi_nfit_memory_map
*memdev_from_spa(
2130 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
2132 struct nfit_memdev
*nfit_memdev
;
2134 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
2135 if (nfit_memdev
->memdev
->range_index
== range_index
)
2137 return nfit_memdev
->memdev
;
2141 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
2142 struct nd_region_desc
*ndr_desc
,
2143 struct acpi_nfit_system_address
*spa
)
2145 struct device
*dev
= acpi_desc
->dev
;
2146 struct nd_interleave_set
*nd_set
;
2147 u16 nr
= ndr_desc
->num_mappings
;
2148 struct nfit_set_info2
*info2
;
2149 struct nfit_set_info
*info
;
2152 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
2155 ndr_desc
->nd_set
= nd_set
;
2156 guid_copy(&nd_set
->type_guid
, (guid_t
*) spa
->range_guid
);
2158 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
2162 info2
= devm_kzalloc(dev
, sizeof_nfit_set_info2(nr
), GFP_KERNEL
);
2166 for (i
= 0; i
< nr
; i
++) {
2167 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
2168 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
2169 struct nfit_set_info_map2
*map2
= &info2
->mapping
[i
];
2170 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
2171 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
2172 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
2173 spa
->range_index
, i
);
2174 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
2176 if (!memdev
|| !nfit_mem
->dcr
) {
2177 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
2181 map
->region_offset
= memdev
->region_offset
;
2182 map
->serial_number
= dcr
->serial_number
;
2184 map2
->region_offset
= memdev
->region_offset
;
2185 map2
->serial_number
= dcr
->serial_number
;
2186 map2
->vendor_id
= dcr
->vendor_id
;
2187 map2
->manufacturing_date
= dcr
->manufacturing_date
;
2188 map2
->manufacturing_location
= dcr
->manufacturing_location
;
2191 /* v1.1 namespaces */
2192 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
2194 nd_set
->cookie1
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
2196 /* v1.2 namespaces */
2197 sort(&info2
->mapping
[0], nr
, sizeof(struct nfit_set_info_map2
),
2199 nd_set
->cookie2
= nd_fletcher64(info2
, sizeof_nfit_set_info2(nr
), 0);
2201 /* support v1.1 namespaces created with the wrong sort order */
2202 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
2203 cmp_map_compat
, NULL
);
2204 nd_set
->altcookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
2206 /* record the result of the sort for the mapping position */
2207 for (i
= 0; i
< nr
; i
++) {
2208 struct nfit_set_info_map2
*map2
= &info2
->mapping
[i
];
2211 for (j
= 0; j
< nr
; j
++) {
2212 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[j
];
2213 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
2214 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
2215 struct acpi_nfit_control_region
*dcr
= nfit_mem
->dcr
;
2217 if (map2
->serial_number
== dcr
->serial_number
&&
2218 map2
->vendor_id
== dcr
->vendor_id
&&
2219 map2
->manufacturing_date
== dcr
->manufacturing_date
&&
2220 map2
->manufacturing_location
2221 == dcr
->manufacturing_location
) {
2222 mapping
->position
= i
;
2228 ndr_desc
->nd_set
= nd_set
;
2229 devm_kfree(dev
, info
);
2230 devm_kfree(dev
, info2
);
2235 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
2237 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
2238 u32 sub_line_offset
, line_index
, line_offset
;
2239 u64 line_no
, table_skip_count
, table_offset
;
2241 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
2242 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
2243 line_offset
= idt
->line_offset
[line_index
]
2245 table_offset
= table_skip_count
* mmio
->table_size
;
2247 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
2250 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
2252 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
2253 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
2254 const u32 STATUS_MASK
= 0x80000037;
2256 if (mmio
->num_lines
)
2257 offset
= to_interleave_offset(offset
, mmio
);
2259 return readl(mmio
->addr
.base
+ offset
) & STATUS_MASK
;
2262 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
2263 resource_size_t dpa
, unsigned int len
, unsigned int write
)
2266 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
2269 BCW_OFFSET_MASK
= (1ULL << 48)-1,
2271 BCW_LEN_MASK
= (1ULL << 8) - 1,
2275 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
2276 len
= len
>> L1_CACHE_SHIFT
;
2277 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
2278 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
2280 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
2281 if (mmio
->num_lines
)
2282 offset
= to_interleave_offset(offset
, mmio
);
2284 writeq(cmd
, mmio
->addr
.base
+ offset
);
2285 nvdimm_flush(nfit_blk
->nd_region
);
2287 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
2288 readq(mmio
->addr
.base
+ offset
);
2291 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
2292 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
2295 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2296 unsigned int copied
= 0;
2300 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
2301 + lane
* mmio
->size
;
2302 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
2307 if (mmio
->num_lines
) {
2310 offset
= to_interleave_offset(base_offset
+ copied
,
2312 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
2313 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
2315 offset
= base_offset
+ nfit_blk
->bdw_offset
;
2320 memcpy_flushcache(mmio
->addr
.aperture
+ offset
, iobuf
+ copied
, c
);
2322 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
2323 arch_invalidate_pmem((void __force
*)
2324 mmio
->addr
.aperture
+ offset
, c
);
2326 memcpy(iobuf
+ copied
, mmio
->addr
.aperture
+ offset
, c
);
2334 nvdimm_flush(nfit_blk
->nd_region
);
2336 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
2340 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
2341 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
2343 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
2344 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2345 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
2346 unsigned int lane
, copied
= 0;
2349 lane
= nd_region_acquire_lane(nd_region
);
2351 u64 c
= min(len
, mmio
->size
);
2353 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
2354 iobuf
+ copied
, c
, rw
, lane
);
2361 nd_region_release_lane(nd_region
, lane
);
2366 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
2367 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
2370 mmio
->num_lines
= idt
->line_count
;
2371 mmio
->line_size
= idt
->line_size
;
2372 if (interleave_ways
== 0)
2374 mmio
->table_size
= mmio
->num_lines
* interleave_ways
2381 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
2382 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
2384 struct nd_cmd_dimm_flags flags
;
2387 memset(&flags
, 0, sizeof(flags
));
2388 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
2389 sizeof(flags
), NULL
);
2391 if (rc
>= 0 && flags
.status
== 0)
2392 nfit_blk
->dimm_flags
= flags
.flags
;
2393 else if (rc
== -ENOTTY
) {
2394 /* fall back to a conservative default */
2395 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
2403 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
2406 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
2407 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
2408 struct nfit_blk_mmio
*mmio
;
2409 struct nfit_blk
*nfit_blk
;
2410 struct nfit_mem
*nfit_mem
;
2411 struct nvdimm
*nvdimm
;
2414 nvdimm
= nd_blk_region_to_dimm(ndbr
);
2415 nfit_mem
= nvdimm_provider_data(nvdimm
);
2416 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
2417 dev_dbg(dev
, "missing%s%s%s\n",
2418 nfit_mem
? "" : " nfit_mem",
2419 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
2420 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
2424 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
2427 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
2428 nfit_blk
->nd_region
= to_nd_region(dev
);
2430 /* map block aperture memory */
2431 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
2432 mmio
= &nfit_blk
->mmio
[BDW
];
2433 mmio
->addr
.base
= devm_nvdimm_memremap(dev
, nfit_mem
->spa_bdw
->address
,
2434 nfit_mem
->spa_bdw
->length
, nd_blk_memremap_flags(ndbr
));
2435 if (!mmio
->addr
.base
) {
2436 dev_dbg(dev
, "%s failed to map bdw\n",
2437 nvdimm_name(nvdimm
));
2440 mmio
->size
= nfit_mem
->bdw
->size
;
2441 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
2442 mmio
->idt
= nfit_mem
->idt_bdw
;
2443 mmio
->spa
= nfit_mem
->spa_bdw
;
2444 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
2445 nfit_mem
->memdev_bdw
->interleave_ways
);
2447 dev_dbg(dev
, "%s failed to init bdw interleave\n",
2448 nvdimm_name(nvdimm
));
2452 /* map block control memory */
2453 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
2454 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
2455 mmio
= &nfit_blk
->mmio
[DCR
];
2456 mmio
->addr
.base
= devm_nvdimm_ioremap(dev
, nfit_mem
->spa_dcr
->address
,
2457 nfit_mem
->spa_dcr
->length
);
2458 if (!mmio
->addr
.base
) {
2459 dev_dbg(dev
, "%s failed to map dcr\n",
2460 nvdimm_name(nvdimm
));
2463 mmio
->size
= nfit_mem
->dcr
->window_size
;
2464 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
2465 mmio
->idt
= nfit_mem
->idt_dcr
;
2466 mmio
->spa
= nfit_mem
->spa_dcr
;
2467 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
2468 nfit_mem
->memdev_dcr
->interleave_ways
);
2470 dev_dbg(dev
, "%s failed to init dcr interleave\n",
2471 nvdimm_name(nvdimm
));
2475 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
2477 dev_dbg(dev
, "%s failed get DIMM flags\n",
2478 nvdimm_name(nvdimm
));
2482 if (nvdimm_has_flush(nfit_blk
->nd_region
) < 0)
2483 dev_warn(dev
, "unable to guarantee persistence of writes\n");
2485 if (mmio
->line_size
== 0)
2488 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
2489 + 8 > mmio
->line_size
) {
2490 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
2492 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
2493 + 8 > mmio
->line_size
) {
2494 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
2501 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
2502 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
2504 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2505 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2508 cmd
->address
= spa
->address
;
2509 cmd
->length
= spa
->length
;
2510 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
2511 sizeof(*cmd
), &cmd_rc
);
2517 static int ars_start(struct acpi_nfit_desc
*acpi_desc
,
2518 struct nfit_spa
*nfit_spa
, enum nfit_ars_state req_type
)
2522 struct nd_cmd_ars_start ars_start
;
2523 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2524 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2526 memset(&ars_start
, 0, sizeof(ars_start
));
2527 ars_start
.address
= spa
->address
;
2528 ars_start
.length
= spa
->length
;
2529 if (req_type
== ARS_REQ_SHORT
)
2530 ars_start
.flags
= ND_ARS_RETURN_PREV_DATA
;
2531 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
2532 ars_start
.type
= ND_ARS_PERSISTENT
;
2533 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
2534 ars_start
.type
= ND_ARS_VOLATILE
;
2538 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
2539 sizeof(ars_start
), &cmd_rc
);
2545 set_bit(ARS_VALID
, &acpi_desc
->scrub_flags
);
2549 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
2552 struct nd_cmd_ars_start ars_start
;
2553 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2554 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2556 ars_start
= (struct nd_cmd_ars_start
) {
2557 .address
= ars_status
->restart_address
,
2558 .length
= ars_status
->restart_length
,
2559 .type
= ars_status
->type
,
2561 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
2562 sizeof(ars_start
), &cmd_rc
);
2568 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
2570 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2571 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2574 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
2575 acpi_desc
->max_ars
, &cmd_rc
);
2581 static void ars_complete(struct acpi_nfit_desc
*acpi_desc
,
2582 struct nfit_spa
*nfit_spa
)
2584 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2585 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2586 struct nd_region
*nd_region
= nfit_spa
->nd_region
;
2589 lockdep_assert_held(&acpi_desc
->init_mutex
);
2591 * Only advance the ARS state for ARS runs initiated by the
2592 * kernel, ignore ARS results from BIOS initiated runs for scrub
2593 * completion tracking.
2595 if (acpi_desc
->scrub_spa
!= nfit_spa
)
2598 if ((ars_status
->address
>= spa
->address
&& ars_status
->address
2599 < spa
->address
+ spa
->length
)
2600 || (ars_status
->address
< spa
->address
)) {
2602 * Assume that if a scrub starts at an offset from the
2603 * start of nfit_spa that we are in the continuation
2606 * Otherwise, if the scrub covers the spa range, mark
2607 * any pending request complete.
2609 if (ars_status
->address
+ ars_status
->length
2610 >= spa
->address
+ spa
->length
)
2617 acpi_desc
->scrub_spa
= NULL
;
2619 dev
= nd_region_dev(nd_region
);
2620 nvdimm_region_notify(nd_region
, NVDIMM_REVALIDATE_POISON
);
2622 dev
= acpi_desc
->dev
;
2623 dev_dbg(dev
, "ARS: range %d complete\n", spa
->range_index
);
2626 static int ars_status_process_records(struct acpi_nfit_desc
*acpi_desc
)
2628 struct nvdimm_bus
*nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2629 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2634 * First record starts at 44 byte offset from the start of the
2637 if (ars_status
->out_length
< 44)
2641 * Ignore potentially stale results that are only refreshed
2642 * after a start-ARS event.
2644 if (!test_and_clear_bit(ARS_VALID
, &acpi_desc
->scrub_flags
)) {
2645 dev_dbg(acpi_desc
->dev
, "skip %d stale records\n",
2646 ars_status
->num_records
);
2650 for (i
= 0; i
< ars_status
->num_records
; i
++) {
2651 /* only process full records */
2652 if (ars_status
->out_length
2653 < 44 + sizeof(struct nd_ars_record
) * (i
+ 1))
2655 rc
= nvdimm_bus_add_badrange(nvdimm_bus
,
2656 ars_status
->records
[i
].err_address
,
2657 ars_status
->records
[i
].length
);
2661 if (i
< ars_status
->num_records
)
2662 dev_warn(acpi_desc
->dev
, "detected truncated ars results\n");
2667 static void acpi_nfit_remove_resource(void *data
)
2669 struct resource
*res
= data
;
2671 remove_resource(res
);
2674 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
2675 struct nd_region_desc
*ndr_desc
)
2677 struct resource
*res
, *nd_res
= ndr_desc
->res
;
2680 /* No operation if the region is already registered as PMEM */
2681 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
2682 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
2683 if (is_pmem
== REGION_INTERSECTS
)
2686 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
2690 res
->name
= "Persistent Memory";
2691 res
->start
= nd_res
->start
;
2692 res
->end
= nd_res
->end
;
2693 res
->flags
= IORESOURCE_MEM
;
2694 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
2696 ret
= insert_resource(&iomem_resource
, res
);
2700 ret
= devm_add_action_or_reset(acpi_desc
->dev
,
2701 acpi_nfit_remove_resource
,
2709 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
2710 struct nd_mapping_desc
*mapping
, struct nd_region_desc
*ndr_desc
,
2711 struct acpi_nfit_memory_map
*memdev
,
2712 struct nfit_spa
*nfit_spa
)
2714 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
2715 memdev
->device_handle
);
2716 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2717 struct nd_blk_region_desc
*ndbr_desc
;
2718 struct nfit_mem
*nfit_mem
;
2722 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
2723 spa
->range_index
, memdev
->device_handle
);
2727 mapping
->nvdimm
= nvdimm
;
2728 switch (nfit_spa_type(spa
)) {
2730 case NFIT_SPA_VOLATILE
:
2731 mapping
->start
= memdev
->address
;
2732 mapping
->size
= memdev
->region_size
;
2735 nfit_mem
= nvdimm_provider_data(nvdimm
);
2736 if (!nfit_mem
|| !nfit_mem
->bdw
) {
2737 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
2738 spa
->range_index
, nvdimm_name(nvdimm
));
2742 mapping
->size
= nfit_mem
->bdw
->capacity
;
2743 mapping
->start
= nfit_mem
->bdw
->start_address
;
2744 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
2745 ndr_desc
->mapping
= mapping
;
2746 ndr_desc
->num_mappings
= 1;
2747 ndbr_desc
= to_blk_region_desc(ndr_desc
);
2748 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
2749 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
2750 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2753 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
2755 if (!nfit_spa
->nd_region
)
2763 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address
*spa
)
2765 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2766 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2767 nfit_spa_type(spa
) == NFIT_SPA_PDISK
||
2768 nfit_spa_type(spa
) == NFIT_SPA_PCD
);
2771 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address
*spa
)
2773 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2774 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2775 nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
);
2778 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
2779 struct nfit_spa
*nfit_spa
)
2781 static struct nd_mapping_desc mappings
[ND_MAX_MAPPINGS
];
2782 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2783 struct nd_blk_region_desc ndbr_desc
;
2784 struct nd_region_desc
*ndr_desc
;
2785 struct nfit_memdev
*nfit_memdev
;
2786 struct nvdimm_bus
*nvdimm_bus
;
2787 struct resource res
;
2790 if (nfit_spa
->nd_region
)
2793 if (spa
->range_index
== 0 && !nfit_spa_is_virtual(spa
)) {
2794 dev_dbg(acpi_desc
->dev
, "detected invalid spa index\n");
2798 memset(&res
, 0, sizeof(res
));
2799 memset(&mappings
, 0, sizeof(mappings
));
2800 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
2801 res
.start
= spa
->address
;
2802 res
.end
= res
.start
+ spa
->length
- 1;
2803 ndr_desc
= &ndbr_desc
.ndr_desc
;
2804 ndr_desc
->res
= &res
;
2805 ndr_desc
->provider_data
= nfit_spa
;
2806 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2807 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
2808 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2809 spa
->proximity_domain
);
2811 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2814 * Persistence domain bits are hierarchical, if
2815 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2816 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2818 if (acpi_desc
->platform_cap
& ACPI_NFIT_CAPABILITY_CACHE_FLUSH
)
2819 set_bit(ND_REGION_PERSIST_CACHE
, &ndr_desc
->flags
);
2820 else if (acpi_desc
->platform_cap
& ACPI_NFIT_CAPABILITY_MEM_FLUSH
)
2821 set_bit(ND_REGION_PERSIST_MEMCTRL
, &ndr_desc
->flags
);
2823 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2824 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2825 struct nd_mapping_desc
*mapping
;
2827 if (memdev
->range_index
!= spa
->range_index
)
2829 if (count
>= ND_MAX_MAPPINGS
) {
2830 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2831 spa
->range_index
, ND_MAX_MAPPINGS
);
2834 mapping
= &mappings
[count
++];
2835 rc
= acpi_nfit_init_mapping(acpi_desc
, mapping
, ndr_desc
,
2841 ndr_desc
->mapping
= mappings
;
2842 ndr_desc
->num_mappings
= count
;
2843 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2847 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2848 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2849 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2851 dev_warn(acpi_desc
->dev
,
2852 "failed to insert pmem resource to iomem: %d\n",
2857 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2859 if (!nfit_spa
->nd_region
)
2861 } else if (nfit_spa_is_volatile(spa
)) {
2862 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
2864 if (!nfit_spa
->nd_region
)
2866 } else if (nfit_spa_is_virtual(spa
)) {
2867 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2869 if (!nfit_spa
->nd_region
)
2875 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
2876 nfit_spa
->spa
->range_index
);
2880 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
)
2882 struct device
*dev
= acpi_desc
->dev
;
2883 struct nd_cmd_ars_status
*ars_status
;
2885 if (acpi_desc
->ars_status
) {
2886 memset(acpi_desc
->ars_status
, 0, acpi_desc
->max_ars
);
2890 ars_status
= devm_kzalloc(dev
, acpi_desc
->max_ars
, GFP_KERNEL
);
2893 acpi_desc
->ars_status
= ars_status
;
2897 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
)
2901 if (ars_status_alloc(acpi_desc
))
2904 rc
= ars_get_status(acpi_desc
);
2906 if (rc
< 0 && rc
!= -ENOSPC
)
2909 if (ars_status_process_records(acpi_desc
))
2910 dev_err(acpi_desc
->dev
, "Failed to process ARS records\n");
2915 static int ars_register(struct acpi_nfit_desc
*acpi_desc
,
2916 struct nfit_spa
*nfit_spa
)
2920 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
2921 return acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2923 set_bit(ARS_REQ_SHORT
, &nfit_spa
->ars_state
);
2925 set_bit(ARS_REQ_LONG
, &nfit_spa
->ars_state
);
2927 switch (acpi_nfit_query_poison(acpi_desc
)) {
2931 rc
= ars_start(acpi_desc
, nfit_spa
, ARS_REQ_SHORT
);
2932 /* shouldn't happen, try again later */
2936 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
2939 clear_bit(ARS_REQ_SHORT
, &nfit_spa
->ars_state
);
2940 rc
= acpi_nfit_query_poison(acpi_desc
);
2943 acpi_desc
->scrub_spa
= nfit_spa
;
2944 ars_complete(acpi_desc
, nfit_spa
);
2946 * If ars_complete() says we didn't complete the
2947 * short scrub, we'll try again with a long
2950 acpi_desc
->scrub_spa
= NULL
;
2955 * BIOS was using ARS, wait for it to complete (or
2956 * resources to become available) and then perform our
2961 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
2965 return acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2968 static void ars_complete_all(struct acpi_nfit_desc
*acpi_desc
)
2970 struct nfit_spa
*nfit_spa
;
2972 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2973 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
2975 ars_complete(acpi_desc
, nfit_spa
);
2979 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc
*acpi_desc
,
2982 unsigned int tmo
= acpi_desc
->scrub_tmo
;
2983 struct device
*dev
= acpi_desc
->dev
;
2984 struct nfit_spa
*nfit_spa
;
2986 lockdep_assert_held(&acpi_desc
->init_mutex
);
2988 if (test_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
))
2991 if (query_rc
== -EBUSY
) {
2992 dev_dbg(dev
, "ARS: ARS busy\n");
2993 return min(30U * 60U, tmo
* 2);
2995 if (query_rc
== -ENOSPC
) {
2996 dev_dbg(dev
, "ARS: ARS continue\n");
2997 ars_continue(acpi_desc
);
3000 if (query_rc
&& query_rc
!= -EAGAIN
) {
3001 unsigned long long addr
, end
;
3003 addr
= acpi_desc
->ars_status
->address
;
3004 end
= addr
+ acpi_desc
->ars_status
->length
;
3005 dev_dbg(dev
, "ARS: %llx-%llx failed (%d)\n", addr
, end
,
3009 ars_complete_all(acpi_desc
);
3010 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3011 enum nfit_ars_state req_type
;
3014 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
3017 /* prefer short ARS requests first */
3018 if (test_bit(ARS_REQ_SHORT
, &nfit_spa
->ars_state
))
3019 req_type
= ARS_REQ_SHORT
;
3020 else if (test_bit(ARS_REQ_LONG
, &nfit_spa
->ars_state
))
3021 req_type
= ARS_REQ_LONG
;
3024 rc
= ars_start(acpi_desc
, nfit_spa
, req_type
);
3026 dev
= nd_region_dev(nfit_spa
->nd_region
);
3027 dev_dbg(dev
, "ARS: range %d ARS start %s (%d)\n",
3028 nfit_spa
->spa
->range_index
,
3029 req_type
== ARS_REQ_SHORT
? "short" : "long",
3032 * Hmm, we raced someone else starting ARS? Try again in
3038 dev_WARN_ONCE(dev
, acpi_desc
->scrub_spa
,
3039 "scrub start while range %d active\n",
3040 acpi_desc
->scrub_spa
->spa
->range_index
);
3041 clear_bit(req_type
, &nfit_spa
->ars_state
);
3042 acpi_desc
->scrub_spa
= nfit_spa
;
3044 * Consider this spa last for future scrub
3047 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
3051 dev_err(dev
, "ARS: range %d ARS failed (%d)\n",
3052 nfit_spa
->spa
->range_index
, rc
);
3053 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3058 static void __sched_ars(struct acpi_nfit_desc
*acpi_desc
, unsigned int tmo
)
3060 lockdep_assert_held(&acpi_desc
->init_mutex
);
3062 set_bit(ARS_BUSY
, &acpi_desc
->scrub_flags
);
3063 /* note this should only be set from within the workqueue */
3065 acpi_desc
->scrub_tmo
= tmo
;
3066 queue_delayed_work(nfit_wq
, &acpi_desc
->dwork
, tmo
* HZ
);
3069 static void sched_ars(struct acpi_nfit_desc
*acpi_desc
)
3071 __sched_ars(acpi_desc
, 0);
3074 static void notify_ars_done(struct acpi_nfit_desc
*acpi_desc
)
3076 lockdep_assert_held(&acpi_desc
->init_mutex
);
3078 clear_bit(ARS_BUSY
, &acpi_desc
->scrub_flags
);
3079 acpi_desc
->scrub_count
++;
3080 if (acpi_desc
->scrub_count_state
)
3081 sysfs_notify_dirent(acpi_desc
->scrub_count_state
);
3084 static void acpi_nfit_scrub(struct work_struct
*work
)
3086 struct acpi_nfit_desc
*acpi_desc
;
3090 acpi_desc
= container_of(work
, typeof(*acpi_desc
), dwork
.work
);
3091 mutex_lock(&acpi_desc
->init_mutex
);
3092 query_rc
= acpi_nfit_query_poison(acpi_desc
);
3093 tmo
= __acpi_nfit_scrub(acpi_desc
, query_rc
);
3095 __sched_ars(acpi_desc
, tmo
);
3097 notify_ars_done(acpi_desc
);
3098 memset(acpi_desc
->ars_status
, 0, acpi_desc
->max_ars
);
3099 clear_bit(ARS_POLL
, &acpi_desc
->scrub_flags
);
3100 mutex_unlock(&acpi_desc
->init_mutex
);
3103 static void acpi_nfit_init_ars(struct acpi_nfit_desc
*acpi_desc
,
3104 struct nfit_spa
*nfit_spa
)
3106 int type
= nfit_spa_type(nfit_spa
->spa
);
3107 struct nd_cmd_ars_cap ars_cap
;
3110 set_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3111 memset(&ars_cap
, 0, sizeof(ars_cap
));
3112 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
3115 /* check that the supported scrub types match the spa type */
3116 if (type
== NFIT_SPA_VOLATILE
&& ((ars_cap
.status
>> 16)
3117 & ND_ARS_VOLATILE
) == 0)
3119 if (type
== NFIT_SPA_PM
&& ((ars_cap
.status
>> 16)
3120 & ND_ARS_PERSISTENT
) == 0)
3123 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
3124 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
3125 acpi_desc
->max_ars
= max(nfit_spa
->max_ars
, acpi_desc
->max_ars
);
3126 clear_bit(ARS_FAILED
, &nfit_spa
->ars_state
);
3129 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
3131 struct nfit_spa
*nfit_spa
;
3134 set_bit(ARS_VALID
, &acpi_desc
->scrub_flags
);
3135 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3136 switch (nfit_spa_type(nfit_spa
->spa
)) {
3137 case NFIT_SPA_VOLATILE
:
3139 acpi_nfit_init_ars(acpi_desc
, nfit_spa
);
3144 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
3145 switch (nfit_spa_type(nfit_spa
->spa
)) {
3146 case NFIT_SPA_VOLATILE
:
3148 /* register regions and kick off initial ARS run */
3149 rc
= ars_register(acpi_desc
, nfit_spa
);
3154 /* nothing to register */
3157 case NFIT_SPA_VDISK
:
3159 case NFIT_SPA_PDISK
:
3161 /* register known regions that don't support ARS */
3162 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
3167 /* don't register unknown regions */
3171 sched_ars(acpi_desc
);
3175 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
3176 struct nfit_table_prev
*prev
)
3178 struct device
*dev
= acpi_desc
->dev
;
3180 if (!list_empty(&prev
->spas
) ||
3181 !list_empty(&prev
->memdevs
) ||
3182 !list_empty(&prev
->dcrs
) ||
3183 !list_empty(&prev
->bdws
) ||
3184 !list_empty(&prev
->idts
) ||
3185 !list_empty(&prev
->flushes
)) {
3186 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
3192 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc
*acpi_desc
)
3194 struct device
*dev
= acpi_desc
->dev
;
3195 struct kernfs_node
*nfit
;
3196 struct device
*bus_dev
;
3198 if (!ars_supported(acpi_desc
->nvdimm_bus
))
3201 bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
3202 nfit
= sysfs_get_dirent(bus_dev
->kobj
.sd
, "nfit");
3204 dev_err(dev
, "sysfs_get_dirent 'nfit' failed\n");
3207 acpi_desc
->scrub_count_state
= sysfs_get_dirent(nfit
, "scrub");
3209 if (!acpi_desc
->scrub_count_state
) {
3210 dev_err(dev
, "sysfs_get_dirent 'scrub' failed\n");
3217 static void acpi_nfit_unregister(void *data
)
3219 struct acpi_nfit_desc
*acpi_desc
= data
;
3221 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
3224 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, void *data
, acpi_size sz
)
3226 struct device
*dev
= acpi_desc
->dev
;
3227 struct nfit_table_prev prev
;
3231 if (!acpi_desc
->nvdimm_bus
) {
3232 acpi_nfit_init_dsms(acpi_desc
);
3234 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
,
3235 &acpi_desc
->nd_desc
);
3236 if (!acpi_desc
->nvdimm_bus
)
3239 rc
= devm_add_action_or_reset(dev
, acpi_nfit_unregister
,
3244 rc
= acpi_nfit_desc_init_scrub_attr(acpi_desc
);
3248 /* register this acpi_desc for mce notifications */
3249 mutex_lock(&acpi_desc_lock
);
3250 list_add_tail(&acpi_desc
->list
, &acpi_descs
);
3251 mutex_unlock(&acpi_desc_lock
);
3254 mutex_lock(&acpi_desc
->init_mutex
);
3256 INIT_LIST_HEAD(&prev
.spas
);
3257 INIT_LIST_HEAD(&prev
.memdevs
);
3258 INIT_LIST_HEAD(&prev
.dcrs
);
3259 INIT_LIST_HEAD(&prev
.bdws
);
3260 INIT_LIST_HEAD(&prev
.idts
);
3261 INIT_LIST_HEAD(&prev
.flushes
);
3263 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
3264 acpi_desc
->spas
.prev
);
3265 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
3266 acpi_desc
->memdevs
.prev
);
3267 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
3268 acpi_desc
->dcrs
.prev
);
3269 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
3270 acpi_desc
->bdws
.prev
);
3271 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
3272 acpi_desc
->idts
.prev
);
3273 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
3274 acpi_desc
->flushes
.prev
);
3277 while (!IS_ERR_OR_NULL(data
))
3278 data
= add_table(acpi_desc
, &prev
, data
, end
);
3281 dev_dbg(dev
, "nfit table parsing error: %ld\n", PTR_ERR(data
));
3286 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
3290 rc
= nfit_mem_init(acpi_desc
);
3294 rc
= acpi_nfit_register_dimms(acpi_desc
);
3298 rc
= acpi_nfit_register_regions(acpi_desc
);
3301 mutex_unlock(&acpi_desc
->init_mutex
);
3304 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
3306 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
3308 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
3309 struct device
*dev
= acpi_desc
->dev
;
3311 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3315 /* Bounce the init_mutex to complete initial registration */
3316 mutex_lock(&acpi_desc
->init_mutex
);
3317 mutex_unlock(&acpi_desc
->init_mutex
);
3322 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
3323 struct nvdimm
*nvdimm
, unsigned int cmd
)
3325 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
3329 if (cmd
!= ND_CMD_ARS_START
)
3333 * The kernel and userspace may race to initiate a scrub, but
3334 * the scrub thread is prepared to lose that initial race. It
3335 * just needs guarantees that any ars it initiates are not
3336 * interrupted by any intervening start reqeusts from userspace.
3338 if (work_busy(&acpi_desc
->dwork
.work
))
3344 int acpi_nfit_ars_rescan(struct acpi_nfit_desc
*acpi_desc
,
3345 enum nfit_ars_state req_type
)
3347 struct device
*dev
= acpi_desc
->dev
;
3348 int scheduled
= 0, busy
= 0;
3349 struct nfit_spa
*nfit_spa
;
3351 mutex_lock(&acpi_desc
->init_mutex
);
3352 if (test_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
)) {
3353 mutex_unlock(&acpi_desc
->init_mutex
);
3357 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
3358 int type
= nfit_spa_type(nfit_spa
->spa
);
3360 if (type
!= NFIT_SPA_PM
&& type
!= NFIT_SPA_VOLATILE
)
3362 if (test_bit(ARS_FAILED
, &nfit_spa
->ars_state
))
3365 if (test_and_set_bit(req_type
, &nfit_spa
->ars_state
))
3371 sched_ars(acpi_desc
);
3372 dev_dbg(dev
, "ars_scan triggered\n");
3374 mutex_unlock(&acpi_desc
->init_mutex
);
3383 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
3385 struct nvdimm_bus_descriptor
*nd_desc
;
3387 dev_set_drvdata(dev
, acpi_desc
);
3388 acpi_desc
->dev
= dev
;
3389 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
3390 nd_desc
= &acpi_desc
->nd_desc
;
3391 nd_desc
->provider_name
= "ACPI.NFIT";
3392 nd_desc
->module
= THIS_MODULE
;
3393 nd_desc
->ndctl
= acpi_nfit_ctl
;
3394 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
3395 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
3396 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
3398 INIT_LIST_HEAD(&acpi_desc
->spas
);
3399 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
3400 INIT_LIST_HEAD(&acpi_desc
->bdws
);
3401 INIT_LIST_HEAD(&acpi_desc
->idts
);
3402 INIT_LIST_HEAD(&acpi_desc
->flushes
);
3403 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
3404 INIT_LIST_HEAD(&acpi_desc
->dimms
);
3405 INIT_LIST_HEAD(&acpi_desc
->list
);
3406 mutex_init(&acpi_desc
->init_mutex
);
3407 acpi_desc
->scrub_tmo
= 1;
3408 INIT_DELAYED_WORK(&acpi_desc
->dwork
, acpi_nfit_scrub
);
3410 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
3412 static void acpi_nfit_put_table(void *table
)
3414 acpi_put_table(table
);
3417 void acpi_nfit_shutdown(void *data
)
3419 struct acpi_nfit_desc
*acpi_desc
= data
;
3420 struct device
*bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
3423 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3426 mutex_lock(&acpi_desc_lock
);
3427 list_del(&acpi_desc
->list
);
3428 mutex_unlock(&acpi_desc_lock
);
3430 mutex_lock(&acpi_desc
->init_mutex
);
3431 set_bit(ARS_CANCEL
, &acpi_desc
->scrub_flags
);
3432 cancel_delayed_work_sync(&acpi_desc
->dwork
);
3433 mutex_unlock(&acpi_desc
->init_mutex
);
3436 * Bounce the nvdimm bus lock to make sure any in-flight
3437 * acpi_nfit_ars_rescan() submissions have had a chance to
3438 * either submit or see ->cancel set.
3440 device_lock(bus_dev
);
3441 device_unlock(bus_dev
);
3443 flush_workqueue(nfit_wq
);
3445 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown
);
3447 static int acpi_nfit_add(struct acpi_device
*adev
)
3449 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3450 struct acpi_nfit_desc
*acpi_desc
;
3451 struct device
*dev
= &adev
->dev
;
3452 struct acpi_table_header
*tbl
;
3453 acpi_status status
= AE_OK
;
3457 status
= acpi_get_table(ACPI_SIG_NFIT
, 0, &tbl
);
3458 if (ACPI_FAILURE(status
)) {
3459 /* This is ok, we could have an nvdimm hotplugged later */
3460 dev_dbg(dev
, "failed to find NFIT at startup\n");
3464 rc
= devm_add_action_or_reset(dev
, acpi_nfit_put_table
, tbl
);
3469 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
3472 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
3474 /* Save the acpi header for exporting the revision via sysfs */
3475 acpi_desc
->acpi_header
= *tbl
;
3477 /* Evaluate _FIT and override with that if present */
3478 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
3479 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
3480 union acpi_object
*obj
= buf
.pointer
;
3482 if (obj
->type
== ACPI_TYPE_BUFFER
)
3483 rc
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
3484 obj
->buffer
.length
);
3486 dev_dbg(dev
, "invalid type %d, ignoring _FIT\n",
3490 /* skip over the lead-in header table */
3491 rc
= acpi_nfit_init(acpi_desc
, (void *) tbl
3492 + sizeof(struct acpi_table_nfit
),
3493 sz
- sizeof(struct acpi_table_nfit
));
3497 return devm_add_action_or_reset(dev
, acpi_nfit_shutdown
, acpi_desc
);
3500 static int acpi_nfit_remove(struct acpi_device
*adev
)
3502 /* see acpi_nfit_unregister */
3506 static void acpi_nfit_update_notify(struct device
*dev
, acpi_handle handle
)
3508 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
3509 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
3510 union acpi_object
*obj
;
3515 /* dev->driver may be null if we're being removed */
3516 dev_dbg(dev
, "no driver found for dev\n");
3521 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
3524 acpi_nfit_desc_init(acpi_desc
, dev
);
3527 * Finish previous registration before considering new
3530 flush_workqueue(nfit_wq
);
3534 status
= acpi_evaluate_object(handle
, "_FIT", NULL
, &buf
);
3535 if (ACPI_FAILURE(status
)) {
3536 dev_err(dev
, "failed to evaluate _FIT\n");
3541 if (obj
->type
== ACPI_TYPE_BUFFER
) {
3542 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
3543 obj
->buffer
.length
);
3545 dev_err(dev
, "failed to merge updated NFIT\n");
3547 dev_err(dev
, "Invalid _FIT\n");
3551 static void acpi_nfit_uc_error_notify(struct device
*dev
, acpi_handle handle
)
3553 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
3555 if (acpi_desc
->scrub_mode
== HW_ERROR_SCRUB_ON
)
3556 acpi_nfit_ars_rescan(acpi_desc
, ARS_REQ_LONG
);
3558 acpi_nfit_ars_rescan(acpi_desc
, ARS_REQ_SHORT
);
3561 void __acpi_nfit_notify(struct device
*dev
, acpi_handle handle
, u32 event
)
3563 dev_dbg(dev
, "event: 0x%x\n", event
);
3566 case NFIT_NOTIFY_UPDATE
:
3567 return acpi_nfit_update_notify(dev
, handle
);
3568 case NFIT_NOTIFY_UC_MEMORY_ERROR
:
3569 return acpi_nfit_uc_error_notify(dev
, handle
);
3574 EXPORT_SYMBOL_GPL(__acpi_nfit_notify
);
3576 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
3578 device_lock(&adev
->dev
);
3579 __acpi_nfit_notify(&adev
->dev
, adev
->handle
, event
);
3580 device_unlock(&adev
->dev
);
3583 static const struct acpi_device_id acpi_nfit_ids
[] = {
3587 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
3589 static struct acpi_driver acpi_nfit_driver
= {
3590 .name
= KBUILD_MODNAME
,
3591 .ids
= acpi_nfit_ids
,
3593 .add
= acpi_nfit_add
,
3594 .remove
= acpi_nfit_remove
,
3595 .notify
= acpi_nfit_notify
,
3599 static __init
int nfit_init(void)
3603 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
3604 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
3605 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
3606 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
3607 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
3608 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
3609 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
3610 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities
) != 16);
3612 guid_parse(UUID_VOLATILE_MEMORY
, &nfit_uuid
[NFIT_SPA_VOLATILE
]);
3613 guid_parse(UUID_PERSISTENT_MEMORY
, &nfit_uuid
[NFIT_SPA_PM
]);
3614 guid_parse(UUID_CONTROL_REGION
, &nfit_uuid
[NFIT_SPA_DCR
]);
3615 guid_parse(UUID_DATA_REGION
, &nfit_uuid
[NFIT_SPA_BDW
]);
3616 guid_parse(UUID_VOLATILE_VIRTUAL_DISK
, &nfit_uuid
[NFIT_SPA_VDISK
]);
3617 guid_parse(UUID_VOLATILE_VIRTUAL_CD
, &nfit_uuid
[NFIT_SPA_VCD
]);
3618 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK
, &nfit_uuid
[NFIT_SPA_PDISK
]);
3619 guid_parse(UUID_PERSISTENT_VIRTUAL_CD
, &nfit_uuid
[NFIT_SPA_PCD
]);
3620 guid_parse(UUID_NFIT_BUS
, &nfit_uuid
[NFIT_DEV_BUS
]);
3621 guid_parse(UUID_NFIT_DIMM
, &nfit_uuid
[NFIT_DEV_DIMM
]);
3622 guid_parse(UUID_NFIT_DIMM_N_HPE1
, &nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
3623 guid_parse(UUID_NFIT_DIMM_N_HPE2
, &nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
3624 guid_parse(UUID_NFIT_DIMM_N_MSFT
, &nfit_uuid
[NFIT_DEV_DIMM_N_MSFT
]);
3626 nfit_wq
= create_singlethread_workqueue("nfit");
3630 nfit_mce_register();
3631 ret
= acpi_bus_register_driver(&acpi_nfit_driver
);
3633 nfit_mce_unregister();
3634 destroy_workqueue(nfit_wq
);
3641 static __exit
void nfit_exit(void)
3643 nfit_mce_unregister();
3644 acpi_bus_unregister_driver(&acpi_nfit_driver
);
3645 destroy_workqueue(nfit_wq
);
3646 WARN_ON(!list_empty(&acpi_descs
));
3649 module_init(nfit_init
);
3650 module_exit(nfit_exit
);
3651 MODULE_LICENSE("GPL v2");
3652 MODULE_AUTHOR("Intel Corporation");