2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/pmem.h>
26 #include <asm/cacheflush.h>
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33 #include <linux/io-64-nonatomic-hi-lo.h>
35 static bool force_enable_dimms
;
36 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
37 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
39 static unsigned int scrub_timeout
= NFIT_ARS_TIMEOUT
;
40 module_param(scrub_timeout
, uint
, S_IRUGO
|S_IWUSR
);
41 MODULE_PARM_DESC(scrub_timeout
, "Initial scrub timeout in seconds");
43 /* after three payloads of overflow, it's dead jim */
44 static unsigned int scrub_overflow_abort
= 3;
45 module_param(scrub_overflow_abort
, uint
, S_IRUGO
|S_IWUSR
);
46 MODULE_PARM_DESC(scrub_overflow_abort
,
47 "Number of times we overflow ARS results before abort");
49 static bool disable_vendor_specific
;
50 module_param(disable_vendor_specific
, bool, S_IRUGO
);
51 MODULE_PARM_DESC(disable_vendor_specific
,
52 "Limit commands to the publicly specified set\n");
54 LIST_HEAD(acpi_descs
);
55 DEFINE_MUTEX(acpi_desc_lock
);
57 static struct workqueue_struct
*nfit_wq
;
59 struct nfit_table_prev
{
60 struct list_head spas
;
61 struct list_head memdevs
;
62 struct list_head dcrs
;
63 struct list_head bdws
;
64 struct list_head idts
;
65 struct list_head flushes
;
68 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
70 const u8
*to_nfit_uuid(enum nfit_uuids id
)
74 EXPORT_SYMBOL(to_nfit_uuid
);
76 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
77 struct nvdimm_bus_descriptor
*nd_desc
)
79 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
82 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
84 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
87 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
90 if (!nd_desc
->provider_name
91 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
94 return to_acpi_device(acpi_desc
->dev
);
97 static int xlat_bus_status(void *buf
, unsigned int cmd
, u32 status
)
99 struct nd_cmd_clear_error
*clear_err
;
100 struct nd_cmd_ars_status
*ars_status
;
105 if ((status
& 0xffff) == NFIT_ARS_CAP_NONE
)
112 /* No supported scan types for this range */
113 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
114 if ((status
>> 16 & flags
) == 0)
117 case ND_CMD_ARS_START
:
118 /* ARS is in progress */
119 if ((status
& 0xffff) == NFIT_ARS_START_BUSY
)
126 case ND_CMD_ARS_STATUS
:
131 /* Check extended status (Upper two bytes) */
132 if (status
== NFIT_ARS_STATUS_DONE
)
135 /* ARS is in progress */
136 if (status
== NFIT_ARS_STATUS_BUSY
)
139 /* No ARS performed for the current boot */
140 if (status
== NFIT_ARS_STATUS_NONE
)
144 * ARS interrupted, either we overflowed or some other
145 * agent wants the scan to stop. If we didn't overflow
146 * then just continue with the returned results.
148 if (status
== NFIT_ARS_STATUS_INTR
) {
149 if (ars_status
->out_length
>= 40 && (ars_status
->flags
150 & NFIT_ARS_F_OVERFLOW
))
159 case ND_CMD_CLEAR_ERROR
:
163 if (!clear_err
->cleared
)
165 if (clear_err
->length
> clear_err
->cleared
)
166 return clear_err
->cleared
;
172 /* all other non-zero status results in an error */
178 static int xlat_status(struct nvdimm
*nvdimm
, void *buf
, unsigned int cmd
,
182 return xlat_bus_status(buf
, cmd
, status
);
188 int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
, struct nvdimm
*nvdimm
,
189 unsigned int cmd
, void *buf
, unsigned int buf_len
, int *cmd_rc
)
191 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
192 union acpi_object in_obj
, in_buf
, *out_obj
;
193 const struct nd_cmd_desc
*desc
= NULL
;
194 struct device
*dev
= acpi_desc
->dev
;
195 struct nd_cmd_pkg
*call_pkg
= NULL
;
196 const char *cmd_name
, *dimm_name
;
197 unsigned long cmd_mask
, dsm_mask
;
198 u32 offset
, fw_status
= 0;
205 if (cmd
== ND_CMD_CALL
) {
207 func
= call_pkg
->nd_command
;
211 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
212 struct acpi_device
*adev
= nfit_mem
->adev
;
216 if (call_pkg
&& nfit_mem
->family
!= call_pkg
->nd_family
)
219 dimm_name
= nvdimm_name(nvdimm
);
220 cmd_name
= nvdimm_cmd_name(cmd
);
221 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
222 dsm_mask
= nfit_mem
->dsm_mask
;
223 desc
= nd_cmd_dimm_desc(cmd
);
224 uuid
= to_nfit_uuid(nfit_mem
->family
);
225 handle
= adev
->handle
;
227 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
229 cmd_name
= nvdimm_bus_cmd_name(cmd
);
230 cmd_mask
= nd_desc
->cmd_mask
;
232 desc
= nd_cmd_bus_desc(cmd
);
233 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
234 handle
= adev
->handle
;
238 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
241 if (!test_bit(cmd
, &cmd_mask
) || !test_bit(func
, &dsm_mask
))
244 in_obj
.type
= ACPI_TYPE_PACKAGE
;
245 in_obj
.package
.count
= 1;
246 in_obj
.package
.elements
= &in_buf
;
247 in_buf
.type
= ACPI_TYPE_BUFFER
;
248 in_buf
.buffer
.pointer
= buf
;
249 in_buf
.buffer
.length
= 0;
251 /* libnvdimm has already validated the input envelope */
252 for (i
= 0; i
< desc
->in_num
; i
++)
253 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
257 /* skip over package wrapper */
258 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
259 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
262 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
263 dev_dbg(dev
, "%s:%s cmd: %d: func: %d input length: %d\n",
264 __func__
, dimm_name
, cmd
, func
,
265 in_buf
.buffer
.length
);
266 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
267 in_buf
.buffer
.pointer
,
268 min_t(u32
, 256, in_buf
.buffer
.length
), true);
271 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, func
, &in_obj
);
273 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
279 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
280 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
281 out_obj
->buffer
.pointer
,
282 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
286 * Need to support FW function w/o known size in advance.
287 * Caller can determine required size based upon nd_fw_size.
288 * If we return an error (like elsewhere) then caller wouldn't
289 * be able to rely upon data returned to make calculation.
294 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
295 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
296 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
301 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
302 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
303 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
304 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
305 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
306 out_obj
->buffer
.length
), true);
309 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
310 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
311 (u32
*) out_obj
->buffer
.pointer
,
312 out_obj
->buffer
.length
- offset
);
314 if (offset
+ out_size
> out_obj
->buffer
.length
) {
315 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
316 __func__
, dimm_name
, cmd_name
, i
);
320 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
321 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
322 __func__
, dimm_name
, cmd_name
, i
);
326 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
327 out_obj
->buffer
.pointer
+ offset
, out_size
);
332 * Set fw_status for all the commands with a known format to be
333 * later interpreted by xlat_status().
335 if (i
>= 1 && ((cmd
>= ND_CMD_ARS_CAP
&& cmd
<= ND_CMD_CLEAR_ERROR
)
336 || (cmd
>= ND_CMD_SMART
&& cmd
<= ND_CMD_VENDOR
)))
337 fw_status
= *(u32
*) out_obj
->buffer
.pointer
;
339 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
342 * status valid, return the number of bytes left
343 * unfilled in the output buffer
345 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
347 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
,
350 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
351 __func__
, dimm_name
, cmd_name
, buf_len
,
358 *cmd_rc
= xlat_status(nvdimm
, buf
, cmd
, fw_status
);
366 EXPORT_SYMBOL_GPL(acpi_nfit_ctl
);
368 static const char *spa_type_name(u16 type
)
370 static const char *to_name
[] = {
371 [NFIT_SPA_VOLATILE
] = "volatile",
372 [NFIT_SPA_PM
] = "pmem",
373 [NFIT_SPA_DCR
] = "dimm-control-region",
374 [NFIT_SPA_BDW
] = "block-data-window",
375 [NFIT_SPA_VDISK
] = "volatile-disk",
376 [NFIT_SPA_VCD
] = "volatile-cd",
377 [NFIT_SPA_PDISK
] = "persistent-disk",
378 [NFIT_SPA_PCD
] = "persistent-cd",
382 if (type
> NFIT_SPA_PCD
)
385 return to_name
[type
];
388 int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
392 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
393 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
398 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
399 struct nfit_table_prev
*prev
,
400 struct acpi_nfit_system_address
*spa
)
402 struct device
*dev
= acpi_desc
->dev
;
403 struct nfit_spa
*nfit_spa
;
405 if (spa
->header
.length
!= sizeof(*spa
))
408 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
409 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
410 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
415 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
) + sizeof(*spa
),
419 INIT_LIST_HEAD(&nfit_spa
->list
);
420 memcpy(nfit_spa
->spa
, spa
, sizeof(*spa
));
421 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
422 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
424 spa_type_name(nfit_spa_type(spa
)));
428 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
429 struct nfit_table_prev
*prev
,
430 struct acpi_nfit_memory_map
*memdev
)
432 struct device
*dev
= acpi_desc
->dev
;
433 struct nfit_memdev
*nfit_memdev
;
435 if (memdev
->header
.length
!= sizeof(*memdev
))
438 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
439 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
440 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
444 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
) + sizeof(*memdev
),
448 INIT_LIST_HEAD(&nfit_memdev
->list
);
449 memcpy(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
));
450 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
451 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
452 __func__
, memdev
->device_handle
, memdev
->range_index
,
453 memdev
->region_index
);
458 * An implementation may provide a truncated control region if no block windows
461 static size_t sizeof_dcr(struct acpi_nfit_control_region
*dcr
)
463 if (dcr
->header
.length
< offsetof(struct acpi_nfit_control_region
,
468 return offsetof(struct acpi_nfit_control_region
, window_size
);
471 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
472 struct nfit_table_prev
*prev
,
473 struct acpi_nfit_control_region
*dcr
)
475 struct device
*dev
= acpi_desc
->dev
;
476 struct nfit_dcr
*nfit_dcr
;
478 if (!sizeof_dcr(dcr
))
481 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
482 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
)) == 0) {
483 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
487 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
) + sizeof(*dcr
),
491 INIT_LIST_HEAD(&nfit_dcr
->list
);
492 memcpy(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
));
493 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
494 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
495 dcr
->region_index
, dcr
->windows
);
499 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
500 struct nfit_table_prev
*prev
,
501 struct acpi_nfit_data_region
*bdw
)
503 struct device
*dev
= acpi_desc
->dev
;
504 struct nfit_bdw
*nfit_bdw
;
506 if (bdw
->header
.length
!= sizeof(*bdw
))
508 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
509 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
510 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
514 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
) + sizeof(*bdw
),
518 INIT_LIST_HEAD(&nfit_bdw
->list
);
519 memcpy(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
));
520 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
521 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
522 bdw
->region_index
, bdw
->windows
);
526 static size_t sizeof_idt(struct acpi_nfit_interleave
*idt
)
528 if (idt
->header
.length
< sizeof(*idt
))
530 return sizeof(*idt
) + sizeof(u32
) * (idt
->line_count
- 1);
533 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
534 struct nfit_table_prev
*prev
,
535 struct acpi_nfit_interleave
*idt
)
537 struct device
*dev
= acpi_desc
->dev
;
538 struct nfit_idt
*nfit_idt
;
540 if (!sizeof_idt(idt
))
543 list_for_each_entry(nfit_idt
, &prev
->idts
, list
) {
544 if (sizeof_idt(nfit_idt
->idt
) != sizeof_idt(idt
))
547 if (memcmp(nfit_idt
->idt
, idt
, sizeof_idt(idt
)) == 0) {
548 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
553 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
) + sizeof_idt(idt
),
557 INIT_LIST_HEAD(&nfit_idt
->list
);
558 memcpy(nfit_idt
->idt
, idt
, sizeof_idt(idt
));
559 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
560 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
561 idt
->interleave_index
, idt
->line_count
);
565 static size_t sizeof_flush(struct acpi_nfit_flush_address
*flush
)
567 if (flush
->header
.length
< sizeof(*flush
))
569 return sizeof(*flush
) + sizeof(u64
) * (flush
->hint_count
- 1);
572 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
573 struct nfit_table_prev
*prev
,
574 struct acpi_nfit_flush_address
*flush
)
576 struct device
*dev
= acpi_desc
->dev
;
577 struct nfit_flush
*nfit_flush
;
579 if (!sizeof_flush(flush
))
582 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
) {
583 if (sizeof_flush(nfit_flush
->flush
) != sizeof_flush(flush
))
586 if (memcmp(nfit_flush
->flush
, flush
,
587 sizeof_flush(flush
)) == 0) {
588 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
593 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
)
594 + sizeof_flush(flush
), GFP_KERNEL
);
597 INIT_LIST_HEAD(&nfit_flush
->list
);
598 memcpy(nfit_flush
->flush
, flush
, sizeof_flush(flush
));
599 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
600 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
601 flush
->device_handle
, flush
->hint_count
);
605 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
606 struct nfit_table_prev
*prev
, void *table
, const void *end
)
608 struct device
*dev
= acpi_desc
->dev
;
609 struct acpi_nfit_header
*hdr
;
610 void *err
= ERR_PTR(-ENOMEM
);
617 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
623 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
624 if (!add_spa(acpi_desc
, prev
, table
))
627 case ACPI_NFIT_TYPE_MEMORY_MAP
:
628 if (!add_memdev(acpi_desc
, prev
, table
))
631 case ACPI_NFIT_TYPE_CONTROL_REGION
:
632 if (!add_dcr(acpi_desc
, prev
, table
))
635 case ACPI_NFIT_TYPE_DATA_REGION
:
636 if (!add_bdw(acpi_desc
, prev
, table
))
639 case ACPI_NFIT_TYPE_INTERLEAVE
:
640 if (!add_idt(acpi_desc
, prev
, table
))
643 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
644 if (!add_flush(acpi_desc
, prev
, table
))
647 case ACPI_NFIT_TYPE_SMBIOS
:
648 dev_dbg(dev
, "%s: smbios\n", __func__
);
651 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
655 return table
+ hdr
->length
;
658 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
659 struct nfit_mem
*nfit_mem
)
661 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
662 u16 dcr
= nfit_mem
->dcr
->region_index
;
663 struct nfit_spa
*nfit_spa
;
665 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
666 u16 range_index
= nfit_spa
->spa
->range_index
;
667 int type
= nfit_spa_type(nfit_spa
->spa
);
668 struct nfit_memdev
*nfit_memdev
;
670 if (type
!= NFIT_SPA_BDW
)
673 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
674 if (nfit_memdev
->memdev
->range_index
!= range_index
)
676 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
678 if (nfit_memdev
->memdev
->region_index
!= dcr
)
681 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
686 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
687 nfit_mem
->spa_dcr
->range_index
);
688 nfit_mem
->bdw
= NULL
;
691 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
692 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
694 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
695 struct nfit_memdev
*nfit_memdev
;
696 struct nfit_bdw
*nfit_bdw
;
697 struct nfit_idt
*nfit_idt
;
698 u16 idt_idx
, range_index
;
700 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
701 if (nfit_bdw
->bdw
->region_index
!= dcr
)
703 nfit_mem
->bdw
= nfit_bdw
->bdw
;
710 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
712 if (!nfit_mem
->spa_bdw
)
715 range_index
= nfit_mem
->spa_bdw
->range_index
;
716 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
717 if (nfit_memdev
->memdev
->range_index
!= range_index
||
718 nfit_memdev
->memdev
->region_index
!= dcr
)
720 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
721 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
722 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
723 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
725 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
732 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
733 struct acpi_nfit_system_address
*spa
)
735 struct nfit_mem
*nfit_mem
, *found
;
736 struct nfit_memdev
*nfit_memdev
;
737 int type
= nfit_spa_type(spa
);
747 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
748 struct nfit_flush
*nfit_flush
;
749 struct nfit_dcr
*nfit_dcr
;
753 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
756 dcr
= nfit_memdev
->memdev
->region_index
;
757 device_handle
= nfit_memdev
->memdev
->device_handle
;
758 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
759 if (__to_nfit_memdev(nfit_mem
)->device_handle
768 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
769 sizeof(*nfit_mem
), GFP_KERNEL
);
772 INIT_LIST_HEAD(&nfit_mem
->list
);
773 nfit_mem
->acpi_desc
= acpi_desc
;
774 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
777 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
778 if (nfit_dcr
->dcr
->region_index
!= dcr
)
781 * Record the control region for the dimm. For
782 * the ACPI 6.1 case, where there are separate
783 * control regions for the pmem vs blk
784 * interfaces, be sure to record the extended
788 nfit_mem
->dcr
= nfit_dcr
->dcr
;
789 else if (nfit_mem
->dcr
->windows
== 0
790 && nfit_dcr
->dcr
->windows
)
791 nfit_mem
->dcr
= nfit_dcr
->dcr
;
795 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
796 struct acpi_nfit_flush_address
*flush
;
799 if (nfit_flush
->flush
->device_handle
!= device_handle
)
801 nfit_mem
->nfit_flush
= nfit_flush
;
802 flush
= nfit_flush
->flush
;
803 nfit_mem
->flush_wpq
= devm_kzalloc(acpi_desc
->dev
,
805 * sizeof(struct resource
), GFP_KERNEL
);
806 if (!nfit_mem
->flush_wpq
)
808 for (i
= 0; i
< flush
->hint_count
; i
++) {
809 struct resource
*res
= &nfit_mem
->flush_wpq
[i
];
811 res
->start
= flush
->hint_address
[i
];
812 res
->end
= res
->start
+ 8 - 1;
817 if (dcr
&& !nfit_mem
->dcr
) {
818 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
819 spa
->range_index
, dcr
);
823 if (type
== NFIT_SPA_DCR
) {
824 struct nfit_idt
*nfit_idt
;
827 /* multiple dimms may share a SPA when interleaved */
828 nfit_mem
->spa_dcr
= spa
;
829 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
830 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
831 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
832 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
834 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
837 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
840 * A single dimm may belong to multiple SPA-PM
841 * ranges, record at least one in addition to
844 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
851 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
853 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
854 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
855 u32 handleA
, handleB
;
857 handleA
= __to_nfit_memdev(a
)->device_handle
;
858 handleB
= __to_nfit_memdev(b
)->device_handle
;
859 if (handleA
< handleB
)
861 else if (handleA
> handleB
)
866 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
868 struct nfit_spa
*nfit_spa
;
871 * For each SPA-DCR or SPA-PMEM address range find its
872 * corresponding MEMDEV(s). From each MEMDEV find the
873 * corresponding DCR. Then, if we're operating on a SPA-DCR,
874 * try to find a SPA-BDW and a corresponding BDW that references
875 * the DCR. Throw it all into an nfit_mem object. Note, that
878 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
881 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
886 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
891 static ssize_t
revision_show(struct device
*dev
,
892 struct device_attribute
*attr
, char *buf
)
894 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
895 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
896 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
898 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
900 static DEVICE_ATTR_RO(revision
);
902 static ssize_t
hw_error_scrub_show(struct device
*dev
,
903 struct device_attribute
*attr
, char *buf
)
905 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
906 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
907 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
909 return sprintf(buf
, "%d\n", acpi_desc
->scrub_mode
);
913 * The 'hw_error_scrub' attribute can have the following values written to it:
914 * '0': Switch to the default mode where an exception will only insert
915 * the address of the memory error into the poison and badblocks lists.
916 * '1': Enable a full scrub to happen if an exception for a memory error is
919 static ssize_t
hw_error_scrub_store(struct device
*dev
,
920 struct device_attribute
*attr
, const char *buf
, size_t size
)
922 struct nvdimm_bus_descriptor
*nd_desc
;
926 rc
= kstrtol(buf
, 0, &val
);
931 nd_desc
= dev_get_drvdata(dev
);
933 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
936 case HW_ERROR_SCRUB_ON
:
937 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_ON
;
939 case HW_ERROR_SCRUB_OFF
:
940 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_OFF
;
952 static DEVICE_ATTR_RW(hw_error_scrub
);
955 * This shows the number of full Address Range Scrubs that have been
956 * completed since driver load time. Userspace can wait on this using
957 * select/poll etc. A '+' at the end indicates an ARS is in progress
959 static ssize_t
scrub_show(struct device
*dev
,
960 struct device_attribute
*attr
, char *buf
)
962 struct nvdimm_bus_descriptor
*nd_desc
;
966 nd_desc
= dev_get_drvdata(dev
);
968 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
970 rc
= sprintf(buf
, "%d%s", acpi_desc
->scrub_count
,
971 (work_busy(&acpi_desc
->work
)) ? "+\n" : "\n");
977 static ssize_t
scrub_store(struct device
*dev
,
978 struct device_attribute
*attr
, const char *buf
, size_t size
)
980 struct nvdimm_bus_descriptor
*nd_desc
;
984 rc
= kstrtol(buf
, 0, &val
);
991 nd_desc
= dev_get_drvdata(dev
);
993 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
995 rc
= acpi_nfit_ars_rescan(acpi_desc
);
1002 static DEVICE_ATTR_RW(scrub
);
1004 static bool ars_supported(struct nvdimm_bus
*nvdimm_bus
)
1006 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1007 const unsigned long mask
= 1 << ND_CMD_ARS_CAP
| 1 << ND_CMD_ARS_START
1008 | 1 << ND_CMD_ARS_STATUS
;
1010 return (nd_desc
->cmd_mask
& mask
) == mask
;
1013 static umode_t
nfit_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
1015 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1016 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1018 if (a
== &dev_attr_scrub
.attr
&& !ars_supported(nvdimm_bus
))
1023 static struct attribute
*acpi_nfit_attributes
[] = {
1024 &dev_attr_revision
.attr
,
1025 &dev_attr_scrub
.attr
,
1026 &dev_attr_hw_error_scrub
.attr
,
1030 static struct attribute_group acpi_nfit_attribute_group
= {
1032 .attrs
= acpi_nfit_attributes
,
1033 .is_visible
= nfit_visible
,
1036 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
1037 &nvdimm_bus_attribute_group
,
1038 &acpi_nfit_attribute_group
,
1042 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
1044 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1045 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1047 return __to_nfit_memdev(nfit_mem
);
1050 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
1052 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1053 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1055 return nfit_mem
->dcr
;
1058 static ssize_t
handle_show(struct device
*dev
,
1059 struct device_attribute
*attr
, char *buf
)
1061 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1063 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
1065 static DEVICE_ATTR_RO(handle
);
1067 static ssize_t
phys_id_show(struct device
*dev
,
1068 struct device_attribute
*attr
, char *buf
)
1070 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1072 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
1074 static DEVICE_ATTR_RO(phys_id
);
1076 static ssize_t
vendor_show(struct device
*dev
,
1077 struct device_attribute
*attr
, char *buf
)
1079 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1081 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
1083 static DEVICE_ATTR_RO(vendor
);
1085 static ssize_t
rev_id_show(struct device
*dev
,
1086 struct device_attribute
*attr
, char *buf
)
1088 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1090 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
1092 static DEVICE_ATTR_RO(rev_id
);
1094 static ssize_t
device_show(struct device
*dev
,
1095 struct device_attribute
*attr
, char *buf
)
1097 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1099 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
1101 static DEVICE_ATTR_RO(device
);
1103 static ssize_t
subsystem_vendor_show(struct device
*dev
,
1104 struct device_attribute
*attr
, char *buf
)
1106 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1108 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
1110 static DEVICE_ATTR_RO(subsystem_vendor
);
1112 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
1113 struct device_attribute
*attr
, char *buf
)
1115 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1117 return sprintf(buf
, "0x%04x\n",
1118 be16_to_cpu(dcr
->subsystem_revision_id
));
1120 static DEVICE_ATTR_RO(subsystem_rev_id
);
1122 static ssize_t
subsystem_device_show(struct device
*dev
,
1123 struct device_attribute
*attr
, char *buf
)
1125 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1127 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
1129 static DEVICE_ATTR_RO(subsystem_device
);
1131 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
1133 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1136 if (nfit_mem
->memdev_pmem
)
1138 if (nfit_mem
->memdev_bdw
)
1143 static ssize_t
format_show(struct device
*dev
,
1144 struct device_attribute
*attr
, char *buf
)
1146 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1148 return sprintf(buf
, "0x%04x\n", le16_to_cpu(dcr
->code
));
1150 static DEVICE_ATTR_RO(format
);
1152 static ssize_t
format1_show(struct device
*dev
,
1153 struct device_attribute
*attr
, char *buf
)
1156 ssize_t rc
= -ENXIO
;
1157 struct nfit_mem
*nfit_mem
;
1158 struct nfit_memdev
*nfit_memdev
;
1159 struct acpi_nfit_desc
*acpi_desc
;
1160 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1161 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1163 nfit_mem
= nvdimm_provider_data(nvdimm
);
1164 acpi_desc
= nfit_mem
->acpi_desc
;
1165 handle
= to_nfit_memdev(dev
)->device_handle
;
1167 /* assumes DIMMs have at most 2 published interface codes */
1168 mutex_lock(&acpi_desc
->init_mutex
);
1169 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1170 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1171 struct nfit_dcr
*nfit_dcr
;
1173 if (memdev
->device_handle
!= handle
)
1176 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1177 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
1179 if (nfit_dcr
->dcr
->code
== dcr
->code
)
1181 rc
= sprintf(buf
, "0x%04x\n",
1182 le16_to_cpu(nfit_dcr
->dcr
->code
));
1188 mutex_unlock(&acpi_desc
->init_mutex
);
1191 static DEVICE_ATTR_RO(format1
);
1193 static ssize_t
formats_show(struct device
*dev
,
1194 struct device_attribute
*attr
, char *buf
)
1196 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1198 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
1200 static DEVICE_ATTR_RO(formats
);
1202 static ssize_t
serial_show(struct device
*dev
,
1203 struct device_attribute
*attr
, char *buf
)
1205 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1207 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
1209 static DEVICE_ATTR_RO(serial
);
1211 static ssize_t
family_show(struct device
*dev
,
1212 struct device_attribute
*attr
, char *buf
)
1214 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1215 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1217 if (nfit_mem
->family
< 0)
1219 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1221 static DEVICE_ATTR_RO(family
);
1223 static ssize_t
dsm_mask_show(struct device
*dev
,
1224 struct device_attribute
*attr
, char *buf
)
1226 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1227 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1229 if (nfit_mem
->family
< 0)
1231 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1233 static DEVICE_ATTR_RO(dsm_mask
);
1235 static ssize_t
flags_show(struct device
*dev
,
1236 struct device_attribute
*attr
, char *buf
)
1238 u16 flags
= to_nfit_memdev(dev
)->flags
;
1240 return sprintf(buf
, "%s%s%s%s%s\n",
1241 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1242 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1243 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1244 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1245 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
1247 static DEVICE_ATTR_RO(flags
);
1249 static ssize_t
id_show(struct device
*dev
,
1250 struct device_attribute
*attr
, char *buf
)
1252 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1254 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1255 return sprintf(buf
, "%04x-%02x-%04x-%08x\n",
1256 be16_to_cpu(dcr
->vendor_id
),
1257 dcr
->manufacturing_location
,
1258 be16_to_cpu(dcr
->manufacturing_date
),
1259 be32_to_cpu(dcr
->serial_number
));
1261 return sprintf(buf
, "%04x-%08x\n",
1262 be16_to_cpu(dcr
->vendor_id
),
1263 be32_to_cpu(dcr
->serial_number
));
1265 static DEVICE_ATTR_RO(id
);
1267 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1268 &dev_attr_handle
.attr
,
1269 &dev_attr_phys_id
.attr
,
1270 &dev_attr_vendor
.attr
,
1271 &dev_attr_device
.attr
,
1272 &dev_attr_rev_id
.attr
,
1273 &dev_attr_subsystem_vendor
.attr
,
1274 &dev_attr_subsystem_device
.attr
,
1275 &dev_attr_subsystem_rev_id
.attr
,
1276 &dev_attr_format
.attr
,
1277 &dev_attr_formats
.attr
,
1278 &dev_attr_format1
.attr
,
1279 &dev_attr_serial
.attr
,
1280 &dev_attr_flags
.attr
,
1282 &dev_attr_family
.attr
,
1283 &dev_attr_dsm_mask
.attr
,
1287 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1288 struct attribute
*a
, int n
)
1290 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1291 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1293 if (!to_nfit_dcr(dev
))
1295 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1300 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
1302 .attrs
= acpi_nfit_dimm_attributes
,
1303 .is_visible
= acpi_nfit_dimm_attr_visible
,
1306 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1307 &nvdimm_attribute_group
,
1308 &nd_device_attribute_group
,
1309 &acpi_nfit_dimm_attribute_group
,
1313 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1316 struct nfit_mem
*nfit_mem
;
1318 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1319 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1320 return nfit_mem
->nvdimm
;
1325 void __acpi_nvdimm_notify(struct device
*dev
, u32 event
)
1327 struct nfit_mem
*nfit_mem
;
1328 struct acpi_nfit_desc
*acpi_desc
;
1330 dev_dbg(dev
->parent
, "%s: %s: event: %d\n", dev_name(dev
), __func__
,
1333 if (event
!= NFIT_NOTIFY_DIMM_HEALTH
) {
1334 dev_dbg(dev
->parent
, "%s: unknown event: %d\n", dev_name(dev
),
1339 acpi_desc
= dev_get_drvdata(dev
->parent
);
1344 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1347 nfit_mem
= dev_get_drvdata(dev
);
1348 if (nfit_mem
&& nfit_mem
->flags_attr
)
1349 sysfs_notify_dirent(nfit_mem
->flags_attr
);
1351 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify
);
1353 static void acpi_nvdimm_notify(acpi_handle handle
, u32 event
, void *data
)
1355 struct acpi_device
*adev
= data
;
1356 struct device
*dev
= &adev
->dev
;
1358 device_lock(dev
->parent
);
1359 __acpi_nvdimm_notify(dev
, event
);
1360 device_unlock(dev
->parent
);
1363 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1364 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1366 struct acpi_device
*adev
, *adev_dimm
;
1367 struct device
*dev
= acpi_desc
->dev
;
1368 unsigned long dsm_mask
;
1372 /* nfit test assumes 1:1 relationship between commands and dsms */
1373 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1374 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1375 adev
= to_acpi_dev(acpi_desc
);
1379 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1380 nfit_mem
->adev
= adev_dimm
;
1382 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1384 return force_enable_dimms
? 0 : -ENODEV
;
1387 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm
->handle
,
1388 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
, adev_dimm
))) {
1389 dev_err(dev
, "%s: notification registration failed\n",
1390 dev_name(&adev_dimm
->dev
));
1395 * Until standardization materializes we need to consider 4
1396 * different command sets. Note, that checking for function0 (bit0)
1397 * tells us if any commands are reachable through this uuid.
1399 for (i
= NVDIMM_FAMILY_INTEL
; i
<= NVDIMM_FAMILY_MSFT
; i
++)
1400 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1403 /* limit the supported commands to those that are publicly documented */
1404 nfit_mem
->family
= i
;
1405 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1407 if (disable_vendor_specific
)
1408 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1409 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
) {
1410 dsm_mask
= 0x1c3c76;
1411 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1413 if (disable_vendor_specific
)
1414 dsm_mask
&= ~(1 << 8);
1415 } else if (nfit_mem
->family
== NVDIMM_FAMILY_MSFT
) {
1416 dsm_mask
= 0xffffffff;
1418 dev_dbg(dev
, "unknown dimm command family\n");
1419 nfit_mem
->family
= -1;
1420 /* DSMs are optional, continue loading the driver... */
1424 uuid
= to_nfit_uuid(nfit_mem
->family
);
1425 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1426 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
1427 set_bit(i
, &nfit_mem
->dsm_mask
);
1432 static void shutdown_dimm_notify(void *data
)
1434 struct acpi_nfit_desc
*acpi_desc
= data
;
1435 struct nfit_mem
*nfit_mem
;
1437 mutex_lock(&acpi_desc
->init_mutex
);
1439 * Clear out the nfit_mem->flags_attr and shut down dimm event
1442 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1443 struct acpi_device
*adev_dimm
= nfit_mem
->adev
;
1445 if (nfit_mem
->flags_attr
) {
1446 sysfs_put(nfit_mem
->flags_attr
);
1447 nfit_mem
->flags_attr
= NULL
;
1450 acpi_remove_notify_handler(adev_dimm
->handle
,
1451 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
);
1453 mutex_unlock(&acpi_desc
->init_mutex
);
1456 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
1458 struct nfit_mem
*nfit_mem
;
1459 int dimm_count
= 0, rc
;
1460 struct nvdimm
*nvdimm
;
1462 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1463 struct acpi_nfit_flush_address
*flush
;
1464 unsigned long flags
= 0, cmd_mask
;
1468 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
1469 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
1475 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
1476 flags
|= NDD_ALIASING
;
1478 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1479 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
1480 flags
|= NDD_UNARMED
;
1482 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
1487 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1488 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1489 * userspace interface.
1491 cmd_mask
= 1UL << ND_CMD_CALL
;
1492 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
1493 cmd_mask
|= nfit_mem
->dsm_mask
;
1495 flush
= nfit_mem
->nfit_flush
? nfit_mem
->nfit_flush
->flush
1497 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
1498 acpi_nfit_dimm_attribute_groups
,
1499 flags
, cmd_mask
, flush
? flush
->hint_count
: 0,
1500 nfit_mem
->flush_wpq
);
1504 nfit_mem
->nvdimm
= nvdimm
;
1507 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
1510 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
1511 nvdimm_name(nvdimm
),
1512 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
1513 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
1514 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
1515 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
1519 rc
= nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
1524 * Now that dimms are successfully registered, and async registration
1525 * is flushed, attempt to enable event notification.
1527 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1528 struct kernfs_node
*nfit_kernfs
;
1530 nvdimm
= nfit_mem
->nvdimm
;
1531 nfit_kernfs
= sysfs_get_dirent(nvdimm_kobj(nvdimm
)->sd
, "nfit");
1533 nfit_mem
->flags_attr
= sysfs_get_dirent(nfit_kernfs
,
1535 sysfs_put(nfit_kernfs
);
1536 if (!nfit_mem
->flags_attr
)
1537 dev_warn(acpi_desc
->dev
, "%s: notifications disabled\n",
1538 nvdimm_name(nvdimm
));
1541 return devm_add_action_or_reset(acpi_desc
->dev
, shutdown_dimm_notify
,
1545 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
1547 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1548 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
1549 struct acpi_device
*adev
;
1552 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
1553 adev
= to_acpi_dev(acpi_desc
);
1557 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
1558 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
1559 set_bit(i
, &nd_desc
->cmd_mask
);
1562 static ssize_t
range_index_show(struct device
*dev
,
1563 struct device_attribute
*attr
, char *buf
)
1565 struct nd_region
*nd_region
= to_nd_region(dev
);
1566 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
1568 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
1570 static DEVICE_ATTR_RO(range_index
);
1572 static struct attribute
*acpi_nfit_region_attributes
[] = {
1573 &dev_attr_range_index
.attr
,
1577 static struct attribute_group acpi_nfit_region_attribute_group
= {
1579 .attrs
= acpi_nfit_region_attributes
,
1582 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
1583 &nd_region_attribute_group
,
1584 &nd_mapping_attribute_group
,
1585 &nd_device_attribute_group
,
1586 &nd_numa_attribute_group
,
1587 &acpi_nfit_region_attribute_group
,
1591 /* enough info to uniquely specify an interleave set */
1592 struct nfit_set_info
{
1593 struct nfit_set_info_map
{
1600 static size_t sizeof_nfit_set_info(int num_mappings
)
1602 return sizeof(struct nfit_set_info
)
1603 + num_mappings
* sizeof(struct nfit_set_info_map
);
1606 static int cmp_map(const void *m0
, const void *m1
)
1608 const struct nfit_set_info_map
*map0
= m0
;
1609 const struct nfit_set_info_map
*map1
= m1
;
1611 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
1615 /* Retrieve the nth entry referencing this spa */
1616 static struct acpi_nfit_memory_map
*memdev_from_spa(
1617 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
1619 struct nfit_memdev
*nfit_memdev
;
1621 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
1622 if (nfit_memdev
->memdev
->range_index
== range_index
)
1624 return nfit_memdev
->memdev
;
1628 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
1629 struct nd_region_desc
*ndr_desc
,
1630 struct acpi_nfit_system_address
*spa
)
1632 int i
, spa_type
= nfit_spa_type(spa
);
1633 struct device
*dev
= acpi_desc
->dev
;
1634 struct nd_interleave_set
*nd_set
;
1635 u16 nr
= ndr_desc
->num_mappings
;
1636 struct nfit_set_info
*info
;
1638 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1643 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1647 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1650 for (i
= 0; i
< nr
; i
++) {
1651 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
1652 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1653 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
1654 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1655 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1656 spa
->range_index
, i
);
1658 if (!memdev
|| !nfit_mem
->dcr
) {
1659 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1663 map
->region_offset
= memdev
->region_offset
;
1664 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1667 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1669 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1670 ndr_desc
->nd_set
= nd_set
;
1671 devm_kfree(dev
, info
);
1676 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1678 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1679 u32 sub_line_offset
, line_index
, line_offset
;
1680 u64 line_no
, table_skip_count
, table_offset
;
1682 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1683 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1684 line_offset
= idt
->line_offset
[line_index
]
1686 table_offset
= table_skip_count
* mmio
->table_size
;
1688 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1691 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1693 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1694 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1695 const u32 STATUS_MASK
= 0x80000037;
1697 if (mmio
->num_lines
)
1698 offset
= to_interleave_offset(offset
, mmio
);
1700 return readl(mmio
->addr
.base
+ offset
) & STATUS_MASK
;
1703 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1704 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1707 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1710 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1712 BCW_LEN_MASK
= (1ULL << 8) - 1,
1716 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1717 len
= len
>> L1_CACHE_SHIFT
;
1718 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1719 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1721 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1722 if (mmio
->num_lines
)
1723 offset
= to_interleave_offset(offset
, mmio
);
1725 writeq(cmd
, mmio
->addr
.base
+ offset
);
1726 nvdimm_flush(nfit_blk
->nd_region
);
1728 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
1729 readq(mmio
->addr
.base
+ offset
);
1732 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1733 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1736 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1737 unsigned int copied
= 0;
1741 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1742 + lane
* mmio
->size
;
1743 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1748 if (mmio
->num_lines
) {
1751 offset
= to_interleave_offset(base_offset
+ copied
,
1753 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1754 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1756 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1761 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1764 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
1765 mmio_flush_range((void __force
*)
1766 mmio
->addr
.aperture
+ offset
, c
);
1768 memcpy_from_pmem(iobuf
+ copied
,
1769 mmio
->addr
.aperture
+ offset
, c
);
1777 nvdimm_flush(nfit_blk
->nd_region
);
1779 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1783 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1784 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1786 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1787 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1788 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1789 unsigned int lane
, copied
= 0;
1792 lane
= nd_region_acquire_lane(nd_region
);
1794 u64 c
= min(len
, mmio
->size
);
1796 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1797 iobuf
+ copied
, c
, rw
, lane
);
1804 nd_region_release_lane(nd_region
, lane
);
1809 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1810 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1813 mmio
->num_lines
= idt
->line_count
;
1814 mmio
->line_size
= idt
->line_size
;
1815 if (interleave_ways
== 0)
1817 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1824 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1825 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1827 struct nd_cmd_dimm_flags flags
;
1830 memset(&flags
, 0, sizeof(flags
));
1831 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1832 sizeof(flags
), NULL
);
1834 if (rc
>= 0 && flags
.status
== 0)
1835 nfit_blk
->dimm_flags
= flags
.flags
;
1836 else if (rc
== -ENOTTY
) {
1837 /* fall back to a conservative default */
1838 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
1846 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1849 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1850 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1851 struct nfit_blk_mmio
*mmio
;
1852 struct nfit_blk
*nfit_blk
;
1853 struct nfit_mem
*nfit_mem
;
1854 struct nvdimm
*nvdimm
;
1857 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1858 nfit_mem
= nvdimm_provider_data(nvdimm
);
1859 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1860 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1861 nfit_mem
? "" : " nfit_mem",
1862 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1863 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1867 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1870 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1871 nfit_blk
->nd_region
= to_nd_region(dev
);
1873 /* map block aperture memory */
1874 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1875 mmio
= &nfit_blk
->mmio
[BDW
];
1876 mmio
->addr
.base
= devm_nvdimm_memremap(dev
, nfit_mem
->spa_bdw
->address
,
1877 nfit_mem
->spa_bdw
->length
, ARCH_MEMREMAP_PMEM
);
1878 if (!mmio
->addr
.base
) {
1879 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1880 nvdimm_name(nvdimm
));
1883 mmio
->size
= nfit_mem
->bdw
->size
;
1884 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1885 mmio
->idt
= nfit_mem
->idt_bdw
;
1886 mmio
->spa
= nfit_mem
->spa_bdw
;
1887 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1888 nfit_mem
->memdev_bdw
->interleave_ways
);
1890 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1891 __func__
, nvdimm_name(nvdimm
));
1895 /* map block control memory */
1896 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1897 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1898 mmio
= &nfit_blk
->mmio
[DCR
];
1899 mmio
->addr
.base
= devm_nvdimm_ioremap(dev
, nfit_mem
->spa_dcr
->address
,
1900 nfit_mem
->spa_dcr
->length
);
1901 if (!mmio
->addr
.base
) {
1902 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1903 nvdimm_name(nvdimm
));
1906 mmio
->size
= nfit_mem
->dcr
->window_size
;
1907 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1908 mmio
->idt
= nfit_mem
->idt_dcr
;
1909 mmio
->spa
= nfit_mem
->spa_dcr
;
1910 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1911 nfit_mem
->memdev_dcr
->interleave_ways
);
1913 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1914 __func__
, nvdimm_name(nvdimm
));
1918 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1920 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1921 __func__
, nvdimm_name(nvdimm
));
1925 if (nvdimm_has_flush(nfit_blk
->nd_region
) < 0)
1926 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1928 if (mmio
->line_size
== 0)
1931 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1932 + 8 > mmio
->line_size
) {
1933 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1935 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1936 + 8 > mmio
->line_size
) {
1937 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1944 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
1945 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
1947 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1948 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1951 cmd
->address
= spa
->address
;
1952 cmd
->length
= spa
->length
;
1953 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
1954 sizeof(*cmd
), &cmd_rc
);
1960 static int ars_start(struct acpi_nfit_desc
*acpi_desc
, struct nfit_spa
*nfit_spa
)
1964 struct nd_cmd_ars_start ars_start
;
1965 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1966 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1968 memset(&ars_start
, 0, sizeof(ars_start
));
1969 ars_start
.address
= spa
->address
;
1970 ars_start
.length
= spa
->length
;
1971 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
1972 ars_start
.type
= ND_ARS_PERSISTENT
;
1973 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
1974 ars_start
.type
= ND_ARS_VOLATILE
;
1978 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1979 sizeof(ars_start
), &cmd_rc
);
1986 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
1989 struct nd_cmd_ars_start ars_start
;
1990 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1991 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1993 memset(&ars_start
, 0, sizeof(ars_start
));
1994 ars_start
.address
= ars_status
->restart_address
;
1995 ars_start
.length
= ars_status
->restart_length
;
1996 ars_start
.type
= ars_status
->type
;
1997 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1998 sizeof(ars_start
), &cmd_rc
);
2004 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
2006 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
2007 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
2010 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
2011 acpi_desc
->ars_status_size
, &cmd_rc
);
2017 static int ars_status_process_records(struct acpi_nfit_desc
*acpi_desc
,
2018 struct nd_cmd_ars_status
*ars_status
)
2020 struct nvdimm_bus
*nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2025 * First record starts at 44 byte offset from the start of the
2028 if (ars_status
->out_length
< 44)
2030 for (i
= 0; i
< ars_status
->num_records
; i
++) {
2031 /* only process full records */
2032 if (ars_status
->out_length
2033 < 44 + sizeof(struct nd_ars_record
) * (i
+ 1))
2035 rc
= nvdimm_bus_add_poison(nvdimm_bus
,
2036 ars_status
->records
[i
].err_address
,
2037 ars_status
->records
[i
].length
);
2041 if (i
< ars_status
->num_records
)
2042 dev_warn(acpi_desc
->dev
, "detected truncated ars results\n");
2047 static void acpi_nfit_remove_resource(void *data
)
2049 struct resource
*res
= data
;
2051 remove_resource(res
);
2054 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
2055 struct nd_region_desc
*ndr_desc
)
2057 struct resource
*res
, *nd_res
= ndr_desc
->res
;
2060 /* No operation if the region is already registered as PMEM */
2061 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
2062 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
2063 if (is_pmem
== REGION_INTERSECTS
)
2066 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
2070 res
->name
= "Persistent Memory";
2071 res
->start
= nd_res
->start
;
2072 res
->end
= nd_res
->end
;
2073 res
->flags
= IORESOURCE_MEM
;
2074 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
2076 ret
= insert_resource(&iomem_resource
, res
);
2080 ret
= devm_add_action_or_reset(acpi_desc
->dev
,
2081 acpi_nfit_remove_resource
,
2089 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
2090 struct nd_mapping_desc
*mapping
, struct nd_region_desc
*ndr_desc
,
2091 struct acpi_nfit_memory_map
*memdev
,
2092 struct nfit_spa
*nfit_spa
)
2094 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
2095 memdev
->device_handle
);
2096 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2097 struct nd_blk_region_desc
*ndbr_desc
;
2098 struct nfit_mem
*nfit_mem
;
2102 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
2103 spa
->range_index
, memdev
->device_handle
);
2107 mapping
->nvdimm
= nvdimm
;
2108 switch (nfit_spa_type(spa
)) {
2110 case NFIT_SPA_VOLATILE
:
2111 mapping
->start
= memdev
->address
;
2112 mapping
->size
= memdev
->region_size
;
2115 nfit_mem
= nvdimm_provider_data(nvdimm
);
2116 if (!nfit_mem
|| !nfit_mem
->bdw
) {
2117 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
2118 spa
->range_index
, nvdimm_name(nvdimm
));
2120 mapping
->size
= nfit_mem
->bdw
->capacity
;
2121 mapping
->start
= nfit_mem
->bdw
->start_address
;
2122 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
2126 ndr_desc
->mapping
= mapping
;
2127 ndr_desc
->num_mappings
= blk_valid
;
2128 ndbr_desc
= to_blk_region_desc(ndr_desc
);
2129 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
2130 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
2131 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
2133 if (!nfit_spa
->nd_region
)
2141 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address
*spa
)
2143 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2144 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2145 nfit_spa_type(spa
) == NFIT_SPA_PDISK
||
2146 nfit_spa_type(spa
) == NFIT_SPA_PCD
);
2149 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
2150 struct nfit_spa
*nfit_spa
)
2152 static struct nd_mapping_desc mappings
[ND_MAX_MAPPINGS
];
2153 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2154 struct nd_blk_region_desc ndbr_desc
;
2155 struct nd_region_desc
*ndr_desc
;
2156 struct nfit_memdev
*nfit_memdev
;
2157 struct nvdimm_bus
*nvdimm_bus
;
2158 struct resource res
;
2161 if (nfit_spa
->nd_region
)
2164 if (spa
->range_index
== 0 && !nfit_spa_is_virtual(spa
)) {
2165 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
2170 memset(&res
, 0, sizeof(res
));
2171 memset(&mappings
, 0, sizeof(mappings
));
2172 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
2173 res
.start
= spa
->address
;
2174 res
.end
= res
.start
+ spa
->length
- 1;
2175 ndr_desc
= &ndbr_desc
.ndr_desc
;
2176 ndr_desc
->res
= &res
;
2177 ndr_desc
->provider_data
= nfit_spa
;
2178 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2179 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
2180 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2181 spa
->proximity_domain
);
2183 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2185 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2186 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2187 struct nd_mapping_desc
*mapping
;
2189 if (memdev
->range_index
!= spa
->range_index
)
2191 if (count
>= ND_MAX_MAPPINGS
) {
2192 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2193 spa
->range_index
, ND_MAX_MAPPINGS
);
2196 mapping
= &mappings
[count
++];
2197 rc
= acpi_nfit_init_mapping(acpi_desc
, mapping
, ndr_desc
,
2203 ndr_desc
->mapping
= mappings
;
2204 ndr_desc
->num_mappings
= count
;
2205 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2209 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2210 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2211 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2213 dev_warn(acpi_desc
->dev
,
2214 "failed to insert pmem resource to iomem: %d\n",
2219 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2221 if (!nfit_spa
->nd_region
)
2223 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
2224 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
2226 if (!nfit_spa
->nd_region
)
2228 } else if (nfit_spa_is_virtual(spa
)) {
2229 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2231 if (!nfit_spa
->nd_region
)
2237 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
2238 nfit_spa
->spa
->range_index
);
2242 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
,
2245 struct device
*dev
= acpi_desc
->dev
;
2246 struct nd_cmd_ars_status
*ars_status
;
2248 if (acpi_desc
->ars_status
&& acpi_desc
->ars_status_size
>= max_ars
) {
2249 memset(acpi_desc
->ars_status
, 0, acpi_desc
->ars_status_size
);
2253 if (acpi_desc
->ars_status
)
2254 devm_kfree(dev
, acpi_desc
->ars_status
);
2255 acpi_desc
->ars_status
= NULL
;
2256 ars_status
= devm_kzalloc(dev
, max_ars
, GFP_KERNEL
);
2259 acpi_desc
->ars_status
= ars_status
;
2260 acpi_desc
->ars_status_size
= max_ars
;
2264 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
,
2265 struct nfit_spa
*nfit_spa
)
2267 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2270 if (!nfit_spa
->max_ars
) {
2271 struct nd_cmd_ars_cap ars_cap
;
2273 memset(&ars_cap
, 0, sizeof(ars_cap
));
2274 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
2277 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
2278 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
2279 /* check that the supported scrub types match the spa type */
2280 if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
&&
2281 ((ars_cap
.status
>> 16) & ND_ARS_VOLATILE
) == 0)
2283 else if (nfit_spa_type(spa
) == NFIT_SPA_PM
&&
2284 ((ars_cap
.status
>> 16) & ND_ARS_PERSISTENT
) == 0)
2288 if (ars_status_alloc(acpi_desc
, nfit_spa
->max_ars
))
2291 rc
= ars_get_status(acpi_desc
);
2292 if (rc
< 0 && rc
!= -ENOSPC
)
2295 if (ars_status_process_records(acpi_desc
, acpi_desc
->ars_status
))
2301 static void acpi_nfit_async_scrub(struct acpi_nfit_desc
*acpi_desc
,
2302 struct nfit_spa
*nfit_spa
)
2304 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2305 unsigned int overflow_retry
= scrub_overflow_abort
;
2306 u64 init_ars_start
= 0, init_ars_len
= 0;
2307 struct device
*dev
= acpi_desc
->dev
;
2308 unsigned int tmo
= scrub_timeout
;
2311 if (!nfit_spa
->ars_required
|| !nfit_spa
->nd_region
)
2314 rc
= ars_start(acpi_desc
, nfit_spa
);
2316 * If we timed out the initial scan we'll still be busy here,
2317 * and will wait another timeout before giving up permanently.
2319 if (rc
< 0 && rc
!= -EBUSY
)
2323 u64 ars_start
, ars_len
;
2325 if (acpi_desc
->cancel
)
2327 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2330 if (rc
== -EBUSY
&& !tmo
) {
2331 dev_warn(dev
, "range %d ars timeout, aborting\n",
2338 * Note, entries may be appended to the list
2339 * while the lock is dropped, but the workqueue
2340 * being active prevents entries being deleted /
2343 mutex_unlock(&acpi_desc
->init_mutex
);
2346 mutex_lock(&acpi_desc
->init_mutex
);
2350 /* we got some results, but there are more pending... */
2351 if (rc
== -ENOSPC
&& overflow_retry
--) {
2352 if (!init_ars_len
) {
2353 init_ars_len
= acpi_desc
->ars_status
->length
;
2354 init_ars_start
= acpi_desc
->ars_status
->address
;
2356 rc
= ars_continue(acpi_desc
);
2360 dev_warn(dev
, "range %d ars continuation failed\n",
2366 ars_start
= init_ars_start
;
2367 ars_len
= init_ars_len
;
2369 ars_start
= acpi_desc
->ars_status
->address
;
2370 ars_len
= acpi_desc
->ars_status
->length
;
2372 dev_dbg(dev
, "spa range: %d ars from %#llx + %#llx complete\n",
2373 spa
->range_index
, ars_start
, ars_len
);
2374 /* notify the region about new poison entries */
2375 nvdimm_region_notify(nfit_spa
->nd_region
,
2376 NVDIMM_REVALIDATE_POISON
);
2381 static void acpi_nfit_scrub(struct work_struct
*work
)
2384 u64 init_scrub_length
= 0;
2385 struct nfit_spa
*nfit_spa
;
2386 u64 init_scrub_address
= 0;
2387 bool init_ars_done
= false;
2388 struct acpi_nfit_desc
*acpi_desc
;
2389 unsigned int tmo
= scrub_timeout
;
2390 unsigned int overflow_retry
= scrub_overflow_abort
;
2392 acpi_desc
= container_of(work
, typeof(*acpi_desc
), work
);
2393 dev
= acpi_desc
->dev
;
2396 * We scrub in 2 phases. The first phase waits for any platform
2397 * firmware initiated scrubs to complete and then we go search for the
2398 * affected spa regions to mark them scanned. In the second phase we
2399 * initiate a directed scrub for every range that was not scrubbed in
2400 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2401 * the first phase, but really only care about running phase 2, where
2402 * regions can be notified of new poison.
2405 /* process platform firmware initiated scrubs */
2407 mutex_lock(&acpi_desc
->init_mutex
);
2408 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2409 struct nd_cmd_ars_status
*ars_status
;
2410 struct acpi_nfit_system_address
*spa
;
2411 u64 ars_start
, ars_len
;
2414 if (acpi_desc
->cancel
)
2417 if (nfit_spa
->nd_region
)
2420 if (init_ars_done
) {
2422 * No need to re-query, we're now just
2423 * reconciling all the ranges covered by the
2428 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2430 if (rc
== -ENOTTY
) {
2431 /* no ars capability, just register spa and move on */
2432 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2436 if (rc
== -EBUSY
&& !tmo
) {
2437 /* fallthrough to directed scrub in phase 2 */
2438 dev_warn(dev
, "timeout awaiting ars results, continuing...\n");
2440 } else if (rc
== -EBUSY
) {
2441 mutex_unlock(&acpi_desc
->init_mutex
);
2447 /* we got some results, but there are more pending... */
2448 if (rc
== -ENOSPC
&& overflow_retry
--) {
2449 ars_status
= acpi_desc
->ars_status
;
2451 * Record the original scrub range, so that we
2452 * can recall all the ranges impacted by the
2455 if (!init_scrub_length
) {
2456 init_scrub_length
= ars_status
->length
;
2457 init_scrub_address
= ars_status
->address
;
2459 rc
= ars_continue(acpi_desc
);
2461 mutex_unlock(&acpi_desc
->init_mutex
);
2468 * Initial scrub failed, we'll give it one more
2474 /* We got some final results, record completed ranges */
2475 ars_status
= acpi_desc
->ars_status
;
2476 if (init_scrub_length
) {
2477 ars_start
= init_scrub_address
;
2478 ars_len
= ars_start
+ init_scrub_length
;
2480 ars_start
= ars_status
->address
;
2481 ars_len
= ars_status
->length
;
2483 spa
= nfit_spa
->spa
;
2485 if (!init_ars_done
) {
2486 init_ars_done
= true;
2487 dev_dbg(dev
, "init scrub %#llx + %#llx complete\n",
2488 ars_start
, ars_len
);
2490 if (ars_start
<= spa
->address
&& ars_start
+ ars_len
2491 >= spa
->address
+ spa
->length
)
2492 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2496 * For all the ranges not covered by an initial scrub we still
2497 * want to see if there are errors, but it's ok to discover them
2500 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2502 * Flag all the ranges that still need scrubbing, but
2503 * register them now to make data available.
2505 if (!nfit_spa
->nd_region
) {
2506 nfit_spa
->ars_required
= 1;
2507 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2511 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2512 acpi_nfit_async_scrub(acpi_desc
, nfit_spa
);
2513 acpi_desc
->scrub_count
++;
2514 if (acpi_desc
->scrub_count_state
)
2515 sysfs_notify_dirent(acpi_desc
->scrub_count_state
);
2516 mutex_unlock(&acpi_desc
->init_mutex
);
2519 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
2521 struct nfit_spa
*nfit_spa
;
2524 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2525 if (nfit_spa_type(nfit_spa
->spa
) == NFIT_SPA_DCR
) {
2526 /* BLK regions don't need to wait for ars results */
2527 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2532 queue_work(nfit_wq
, &acpi_desc
->work
);
2536 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
2537 struct nfit_table_prev
*prev
)
2539 struct device
*dev
= acpi_desc
->dev
;
2541 if (!list_empty(&prev
->spas
) ||
2542 !list_empty(&prev
->memdevs
) ||
2543 !list_empty(&prev
->dcrs
) ||
2544 !list_empty(&prev
->bdws
) ||
2545 !list_empty(&prev
->idts
) ||
2546 !list_empty(&prev
->flushes
)) {
2547 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
2553 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc
*acpi_desc
)
2555 struct device
*dev
= acpi_desc
->dev
;
2556 struct kernfs_node
*nfit
;
2557 struct device
*bus_dev
;
2559 if (!ars_supported(acpi_desc
->nvdimm_bus
))
2562 bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
2563 nfit
= sysfs_get_dirent(bus_dev
->kobj
.sd
, "nfit");
2565 dev_err(dev
, "sysfs_get_dirent 'nfit' failed\n");
2568 acpi_desc
->scrub_count_state
= sysfs_get_dirent(nfit
, "scrub");
2570 if (!acpi_desc
->scrub_count_state
) {
2571 dev_err(dev
, "sysfs_get_dirent 'scrub' failed\n");
2578 static void acpi_nfit_destruct(void *data
)
2580 struct acpi_nfit_desc
*acpi_desc
= data
;
2581 struct device
*bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
2584 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2587 mutex_lock(&acpi_desc_lock
);
2588 acpi_desc
->cancel
= 1;
2590 * Bounce the nvdimm bus lock to make sure any in-flight
2591 * acpi_nfit_ars_rescan() submissions have had a chance to
2592 * either submit or see ->cancel set.
2594 device_lock(bus_dev
);
2595 device_unlock(bus_dev
);
2597 flush_workqueue(nfit_wq
);
2598 if (acpi_desc
->scrub_count_state
)
2599 sysfs_put(acpi_desc
->scrub_count_state
);
2600 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
2601 acpi_desc
->nvdimm_bus
= NULL
;
2602 list_del(&acpi_desc
->list
);
2603 mutex_unlock(&acpi_desc_lock
);
2606 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, void *data
, acpi_size sz
)
2608 struct device
*dev
= acpi_desc
->dev
;
2609 struct nfit_table_prev prev
;
2613 if (!acpi_desc
->nvdimm_bus
) {
2614 acpi_nfit_init_dsms(acpi_desc
);
2616 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
,
2617 &acpi_desc
->nd_desc
);
2618 if (!acpi_desc
->nvdimm_bus
)
2621 rc
= devm_add_action_or_reset(dev
, acpi_nfit_destruct
,
2626 rc
= acpi_nfit_desc_init_scrub_attr(acpi_desc
);
2630 /* register this acpi_desc for mce notifications */
2631 mutex_lock(&acpi_desc_lock
);
2632 list_add_tail(&acpi_desc
->list
, &acpi_descs
);
2633 mutex_unlock(&acpi_desc_lock
);
2636 mutex_lock(&acpi_desc
->init_mutex
);
2638 INIT_LIST_HEAD(&prev
.spas
);
2639 INIT_LIST_HEAD(&prev
.memdevs
);
2640 INIT_LIST_HEAD(&prev
.dcrs
);
2641 INIT_LIST_HEAD(&prev
.bdws
);
2642 INIT_LIST_HEAD(&prev
.idts
);
2643 INIT_LIST_HEAD(&prev
.flushes
);
2645 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
2646 acpi_desc
->spas
.prev
);
2647 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
2648 acpi_desc
->memdevs
.prev
);
2649 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
2650 acpi_desc
->dcrs
.prev
);
2651 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
2652 acpi_desc
->bdws
.prev
);
2653 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
2654 acpi_desc
->idts
.prev
);
2655 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
2656 acpi_desc
->flushes
.prev
);
2659 while (!IS_ERR_OR_NULL(data
))
2660 data
= add_table(acpi_desc
, &prev
, data
, end
);
2663 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
2669 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
2673 rc
= nfit_mem_init(acpi_desc
);
2677 rc
= acpi_nfit_register_dimms(acpi_desc
);
2681 rc
= acpi_nfit_register_regions(acpi_desc
);
2684 mutex_unlock(&acpi_desc
->init_mutex
);
2687 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
2689 struct acpi_nfit_flush_work
{
2690 struct work_struct work
;
2691 struct completion cmp
;
2694 static void flush_probe(struct work_struct
*work
)
2696 struct acpi_nfit_flush_work
*flush
;
2698 flush
= container_of(work
, typeof(*flush
), work
);
2699 complete(&flush
->cmp
);
2702 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
2704 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2705 struct device
*dev
= acpi_desc
->dev
;
2706 struct acpi_nfit_flush_work flush
;
2708 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2713 * Scrub work could take 10s of seconds, userspace may give up so we
2714 * need to be interruptible while waiting.
2716 INIT_WORK_ONSTACK(&flush
.work
, flush_probe
);
2717 COMPLETION_INITIALIZER_ONSTACK(flush
.cmp
);
2718 queue_work(nfit_wq
, &flush
.work
);
2719 return wait_for_completion_interruptible(&flush
.cmp
);
2722 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
2723 struct nvdimm
*nvdimm
, unsigned int cmd
)
2725 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2729 if (cmd
!= ND_CMD_ARS_START
)
2733 * The kernel and userspace may race to initiate a scrub, but
2734 * the scrub thread is prepared to lose that initial race. It
2735 * just needs guarantees that any ars it initiates are not
2736 * interrupted by any intervening start reqeusts from userspace.
2738 if (work_busy(&acpi_desc
->work
))
2744 int acpi_nfit_ars_rescan(struct acpi_nfit_desc
*acpi_desc
)
2746 struct device
*dev
= acpi_desc
->dev
;
2747 struct nfit_spa
*nfit_spa
;
2749 if (work_busy(&acpi_desc
->work
))
2752 if (acpi_desc
->cancel
)
2755 mutex_lock(&acpi_desc
->init_mutex
);
2756 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2757 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2759 if (nfit_spa_type(spa
) != NFIT_SPA_PM
)
2762 nfit_spa
->ars_required
= 1;
2764 queue_work(nfit_wq
, &acpi_desc
->work
);
2765 dev_dbg(dev
, "%s: ars_scan triggered\n", __func__
);
2766 mutex_unlock(&acpi_desc
->init_mutex
);
2771 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
2773 struct nvdimm_bus_descriptor
*nd_desc
;
2775 dev_set_drvdata(dev
, acpi_desc
);
2776 acpi_desc
->dev
= dev
;
2777 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
2778 nd_desc
= &acpi_desc
->nd_desc
;
2779 nd_desc
->provider_name
= "ACPI.NFIT";
2780 nd_desc
->module
= THIS_MODULE
;
2781 nd_desc
->ndctl
= acpi_nfit_ctl
;
2782 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
2783 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
2784 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
2786 INIT_LIST_HEAD(&acpi_desc
->spas
);
2787 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
2788 INIT_LIST_HEAD(&acpi_desc
->bdws
);
2789 INIT_LIST_HEAD(&acpi_desc
->idts
);
2790 INIT_LIST_HEAD(&acpi_desc
->flushes
);
2791 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
2792 INIT_LIST_HEAD(&acpi_desc
->dimms
);
2793 INIT_LIST_HEAD(&acpi_desc
->list
);
2794 mutex_init(&acpi_desc
->init_mutex
);
2795 INIT_WORK(&acpi_desc
->work
, acpi_nfit_scrub
);
2797 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
2799 static int acpi_nfit_add(struct acpi_device
*adev
)
2801 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2802 struct acpi_nfit_desc
*acpi_desc
;
2803 struct device
*dev
= &adev
->dev
;
2804 struct acpi_table_header
*tbl
;
2805 acpi_status status
= AE_OK
;
2809 status
= acpi_get_table(ACPI_SIG_NFIT
, 0, &tbl
);
2810 if (ACPI_FAILURE(status
)) {
2811 /* This is ok, we could have an nvdimm hotplugged later */
2812 dev_dbg(dev
, "failed to find NFIT at startup\n");
2817 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2820 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
2822 /* Save the acpi header for exporting the revision via sysfs */
2823 acpi_desc
->acpi_header
= *tbl
;
2825 /* Evaluate _FIT and override with that if present */
2826 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2827 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
2828 union acpi_object
*obj
= buf
.pointer
;
2830 if (obj
->type
== ACPI_TYPE_BUFFER
)
2831 rc
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
2832 obj
->buffer
.length
);
2834 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
2835 __func__
, (int) obj
->type
);
2838 /* skip over the lead-in header table */
2839 rc
= acpi_nfit_init(acpi_desc
, (void *) tbl
2840 + sizeof(struct acpi_table_nfit
),
2841 sz
- sizeof(struct acpi_table_nfit
));
2845 static int acpi_nfit_remove(struct acpi_device
*adev
)
2847 /* see acpi_nfit_destruct */
2851 void __acpi_nfit_notify(struct device
*dev
, acpi_handle handle
, u32 event
)
2853 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
2854 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2855 union acpi_object
*obj
;
2859 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
2861 if (event
!= NFIT_NOTIFY_UPDATE
)
2865 /* dev->driver may be null if we're being removed */
2866 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
2871 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2874 acpi_nfit_desc_init(acpi_desc
, dev
);
2877 * Finish previous registration before considering new
2880 flush_workqueue(nfit_wq
);
2884 status
= acpi_evaluate_object(handle
, "_FIT", NULL
, &buf
);
2885 if (ACPI_FAILURE(status
)) {
2886 dev_err(dev
, "failed to evaluate _FIT\n");
2891 if (obj
->type
== ACPI_TYPE_BUFFER
) {
2892 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
2893 obj
->buffer
.length
);
2895 dev_err(dev
, "failed to merge updated NFIT\n");
2897 dev_err(dev
, "Invalid _FIT\n");
2900 EXPORT_SYMBOL_GPL(__acpi_nfit_notify
);
2902 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
2904 device_lock(&adev
->dev
);
2905 __acpi_nfit_notify(&adev
->dev
, adev
->handle
, event
);
2906 device_unlock(&adev
->dev
);
2909 static const struct acpi_device_id acpi_nfit_ids
[] = {
2913 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
2915 static struct acpi_driver acpi_nfit_driver
= {
2916 .name
= KBUILD_MODNAME
,
2917 .ids
= acpi_nfit_ids
,
2919 .add
= acpi_nfit_add
,
2920 .remove
= acpi_nfit_remove
,
2921 .notify
= acpi_nfit_notify
,
2925 static __init
int nfit_init(void)
2927 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
2928 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
2929 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
2930 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
2931 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
2932 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
2933 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
2935 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
2936 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
2937 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
2938 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
2939 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
2940 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
2941 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
2942 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
2943 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
2944 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
2945 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
2946 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
2947 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT
, nfit_uuid
[NFIT_DEV_DIMM_N_MSFT
]);
2949 nfit_wq
= create_singlethread_workqueue("nfit");
2953 nfit_mce_register();
2955 return acpi_bus_register_driver(&acpi_nfit_driver
);
2958 static __exit
void nfit_exit(void)
2960 nfit_mce_unregister();
2961 acpi_bus_unregister_driver(&acpi_nfit_driver
);
2962 destroy_workqueue(nfit_wq
);
2963 WARN_ON(!list_empty(&acpi_descs
));
2966 module_init(nfit_init
);
2967 module_exit(nfit_exit
);
2968 MODULE_LICENSE("GPL v2");
2969 MODULE_AUTHOR("Intel Corporation");