2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/pmem.h>
26 #include <asm/cacheflush.h>
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33 #include <linux/io-64-nonatomic-hi-lo.h>
35 static bool force_enable_dimms
;
36 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
37 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
39 static unsigned int scrub_timeout
= NFIT_ARS_TIMEOUT
;
40 module_param(scrub_timeout
, uint
, S_IRUGO
|S_IWUSR
);
41 MODULE_PARM_DESC(scrub_timeout
, "Initial scrub timeout in seconds");
43 /* after three payloads of overflow, it's dead jim */
44 static unsigned int scrub_overflow_abort
= 3;
45 module_param(scrub_overflow_abort
, uint
, S_IRUGO
|S_IWUSR
);
46 MODULE_PARM_DESC(scrub_overflow_abort
,
47 "Number of times we overflow ARS results before abort");
49 static bool disable_vendor_specific
;
50 module_param(disable_vendor_specific
, bool, S_IRUGO
);
51 MODULE_PARM_DESC(disable_vendor_specific
,
52 "Limit commands to the publicly specified set\n");
54 LIST_HEAD(acpi_descs
);
55 DEFINE_MUTEX(acpi_desc_lock
);
57 static struct workqueue_struct
*nfit_wq
;
59 struct nfit_table_prev
{
60 struct list_head spas
;
61 struct list_head memdevs
;
62 struct list_head dcrs
;
63 struct list_head bdws
;
64 struct list_head idts
;
65 struct list_head flushes
;
68 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
70 const u8
*to_nfit_uuid(enum nfit_uuids id
)
74 EXPORT_SYMBOL(to_nfit_uuid
);
76 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
77 struct nvdimm_bus_descriptor
*nd_desc
)
79 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
82 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
84 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
87 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
90 if (!nd_desc
->provider_name
91 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
94 return to_acpi_device(acpi_desc
->dev
);
97 static int xlat_status(void *buf
, unsigned int cmd
, u32 status
)
99 struct nd_cmd_clear_error
*clear_err
;
100 struct nd_cmd_ars_status
*ars_status
;
105 if ((status
& 0xffff) == NFIT_ARS_CAP_NONE
)
112 /* No supported scan types for this range */
113 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
114 if ((status
>> 16 & flags
) == 0)
117 case ND_CMD_ARS_START
:
118 /* ARS is in progress */
119 if ((status
& 0xffff) == NFIT_ARS_START_BUSY
)
126 case ND_CMD_ARS_STATUS
:
131 /* Check extended status (Upper two bytes) */
132 if (status
== NFIT_ARS_STATUS_DONE
)
135 /* ARS is in progress */
136 if (status
== NFIT_ARS_STATUS_BUSY
)
139 /* No ARS performed for the current boot */
140 if (status
== NFIT_ARS_STATUS_NONE
)
144 * ARS interrupted, either we overflowed or some other
145 * agent wants the scan to stop. If we didn't overflow
146 * then just continue with the returned results.
148 if (status
== NFIT_ARS_STATUS_INTR
) {
149 if (ars_status
->flags
& NFIT_ARS_F_OVERFLOW
)
158 case ND_CMD_CLEAR_ERROR
:
162 if (!clear_err
->cleared
)
164 if (clear_err
->length
> clear_err
->cleared
)
165 return clear_err
->cleared
;
171 /* all other non-zero status results in an error */
177 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
178 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
179 unsigned int buf_len
, int *cmd_rc
)
181 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
182 union acpi_object in_obj
, in_buf
, *out_obj
;
183 const struct nd_cmd_desc
*desc
= NULL
;
184 struct device
*dev
= acpi_desc
->dev
;
185 struct nd_cmd_pkg
*call_pkg
= NULL
;
186 const char *cmd_name
, *dimm_name
;
187 unsigned long cmd_mask
, dsm_mask
;
188 u32 offset
, fw_status
= 0;
195 if (cmd
== ND_CMD_CALL
) {
197 func
= call_pkg
->nd_command
;
201 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
202 struct acpi_device
*adev
= nfit_mem
->adev
;
206 if (call_pkg
&& nfit_mem
->family
!= call_pkg
->nd_family
)
209 dimm_name
= nvdimm_name(nvdimm
);
210 cmd_name
= nvdimm_cmd_name(cmd
);
211 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
212 dsm_mask
= nfit_mem
->dsm_mask
;
213 desc
= nd_cmd_dimm_desc(cmd
);
214 uuid
= to_nfit_uuid(nfit_mem
->family
);
215 handle
= adev
->handle
;
217 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
219 cmd_name
= nvdimm_bus_cmd_name(cmd
);
220 cmd_mask
= nd_desc
->cmd_mask
;
222 desc
= nd_cmd_bus_desc(cmd
);
223 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
224 handle
= adev
->handle
;
228 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
231 if (!test_bit(cmd
, &cmd_mask
) || !test_bit(func
, &dsm_mask
))
234 in_obj
.type
= ACPI_TYPE_PACKAGE
;
235 in_obj
.package
.count
= 1;
236 in_obj
.package
.elements
= &in_buf
;
237 in_buf
.type
= ACPI_TYPE_BUFFER
;
238 in_buf
.buffer
.pointer
= buf
;
239 in_buf
.buffer
.length
= 0;
241 /* libnvdimm has already validated the input envelope */
242 for (i
= 0; i
< desc
->in_num
; i
++)
243 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
247 /* skip over package wrapper */
248 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
249 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
252 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
253 dev_dbg(dev
, "%s:%s cmd: %d: func: %d input length: %d\n",
254 __func__
, dimm_name
, cmd
, func
,
255 in_buf
.buffer
.length
);
256 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
257 in_buf
.buffer
.pointer
,
258 min_t(u32
, 256, in_buf
.buffer
.length
), true);
261 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, func
, &in_obj
);
263 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
269 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
270 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
271 out_obj
->buffer
.pointer
,
272 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
276 * Need to support FW function w/o known size in advance.
277 * Caller can determine required size based upon nd_fw_size.
278 * If we return an error (like elsewhere) then caller wouldn't
279 * be able to rely upon data returned to make calculation.
284 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
285 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
286 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
291 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
292 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
293 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
294 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
295 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
296 out_obj
->buffer
.length
), true);
299 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
300 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
301 (u32
*) out_obj
->buffer
.pointer
);
303 if (offset
+ out_size
> out_obj
->buffer
.length
) {
304 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
305 __func__
, dimm_name
, cmd_name
, i
);
309 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
310 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
311 __func__
, dimm_name
, cmd_name
, i
);
315 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
316 out_obj
->buffer
.pointer
+ offset
, out_size
);
321 * Set fw_status for all the commands with a known format to be
322 * later interpreted by xlat_status().
324 if (i
>= 1 && ((cmd
>= ND_CMD_ARS_CAP
&& cmd
<= ND_CMD_CLEAR_ERROR
)
325 || (cmd
>= ND_CMD_SMART
&& cmd
<= ND_CMD_VENDOR
)))
326 fw_status
= *(u32
*) out_obj
->buffer
.pointer
;
328 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
331 * status valid, return the number of bytes left
332 * unfilled in the output buffer
334 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
336 *cmd_rc
= xlat_status(buf
, cmd
, fw_status
);
338 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
339 __func__
, dimm_name
, cmd_name
, buf_len
,
346 *cmd_rc
= xlat_status(buf
, cmd
, fw_status
);
355 static const char *spa_type_name(u16 type
)
357 static const char *to_name
[] = {
358 [NFIT_SPA_VOLATILE
] = "volatile",
359 [NFIT_SPA_PM
] = "pmem",
360 [NFIT_SPA_DCR
] = "dimm-control-region",
361 [NFIT_SPA_BDW
] = "block-data-window",
362 [NFIT_SPA_VDISK
] = "volatile-disk",
363 [NFIT_SPA_VCD
] = "volatile-cd",
364 [NFIT_SPA_PDISK
] = "persistent-disk",
365 [NFIT_SPA_PCD
] = "persistent-cd",
369 if (type
> NFIT_SPA_PCD
)
372 return to_name
[type
];
375 int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
379 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
380 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
385 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
386 struct nfit_table_prev
*prev
,
387 struct acpi_nfit_system_address
*spa
)
389 struct device
*dev
= acpi_desc
->dev
;
390 struct nfit_spa
*nfit_spa
;
392 if (spa
->header
.length
!= sizeof(*spa
))
395 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
396 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
397 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
402 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
) + sizeof(*spa
),
406 INIT_LIST_HEAD(&nfit_spa
->list
);
407 memcpy(nfit_spa
->spa
, spa
, sizeof(*spa
));
408 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
409 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
411 spa_type_name(nfit_spa_type(spa
)));
415 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
416 struct nfit_table_prev
*prev
,
417 struct acpi_nfit_memory_map
*memdev
)
419 struct device
*dev
= acpi_desc
->dev
;
420 struct nfit_memdev
*nfit_memdev
;
422 if (memdev
->header
.length
!= sizeof(*memdev
))
425 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
426 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
427 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
431 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
) + sizeof(*memdev
),
435 INIT_LIST_HEAD(&nfit_memdev
->list
);
436 memcpy(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
));
437 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
438 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
439 __func__
, memdev
->device_handle
, memdev
->range_index
,
440 memdev
->region_index
);
445 * An implementation may provide a truncated control region if no block windows
448 static size_t sizeof_dcr(struct acpi_nfit_control_region
*dcr
)
450 if (dcr
->header
.length
< offsetof(struct acpi_nfit_control_region
,
455 return offsetof(struct acpi_nfit_control_region
, window_size
);
458 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
459 struct nfit_table_prev
*prev
,
460 struct acpi_nfit_control_region
*dcr
)
462 struct device
*dev
= acpi_desc
->dev
;
463 struct nfit_dcr
*nfit_dcr
;
465 if (!sizeof_dcr(dcr
))
468 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
469 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
)) == 0) {
470 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
474 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
) + sizeof(*dcr
),
478 INIT_LIST_HEAD(&nfit_dcr
->list
);
479 memcpy(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
));
480 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
481 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
482 dcr
->region_index
, dcr
->windows
);
486 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
487 struct nfit_table_prev
*prev
,
488 struct acpi_nfit_data_region
*bdw
)
490 struct device
*dev
= acpi_desc
->dev
;
491 struct nfit_bdw
*nfit_bdw
;
493 if (bdw
->header
.length
!= sizeof(*bdw
))
495 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
496 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
497 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
501 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
) + sizeof(*bdw
),
505 INIT_LIST_HEAD(&nfit_bdw
->list
);
506 memcpy(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
));
507 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
508 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
509 bdw
->region_index
, bdw
->windows
);
513 static size_t sizeof_idt(struct acpi_nfit_interleave
*idt
)
515 if (idt
->header
.length
< sizeof(*idt
))
517 return sizeof(*idt
) + sizeof(u32
) * (idt
->line_count
- 1);
520 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
521 struct nfit_table_prev
*prev
,
522 struct acpi_nfit_interleave
*idt
)
524 struct device
*dev
= acpi_desc
->dev
;
525 struct nfit_idt
*nfit_idt
;
527 if (!sizeof_idt(idt
))
530 list_for_each_entry(nfit_idt
, &prev
->idts
, list
) {
531 if (sizeof_idt(nfit_idt
->idt
) != sizeof_idt(idt
))
534 if (memcmp(nfit_idt
->idt
, idt
, sizeof_idt(idt
)) == 0) {
535 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
540 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
) + sizeof_idt(idt
),
544 INIT_LIST_HEAD(&nfit_idt
->list
);
545 memcpy(nfit_idt
->idt
, idt
, sizeof_idt(idt
));
546 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
547 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
548 idt
->interleave_index
, idt
->line_count
);
552 static size_t sizeof_flush(struct acpi_nfit_flush_address
*flush
)
554 if (flush
->header
.length
< sizeof(*flush
))
556 return sizeof(*flush
) + sizeof(u64
) * (flush
->hint_count
- 1);
559 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
560 struct nfit_table_prev
*prev
,
561 struct acpi_nfit_flush_address
*flush
)
563 struct device
*dev
= acpi_desc
->dev
;
564 struct nfit_flush
*nfit_flush
;
566 if (!sizeof_flush(flush
))
569 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
) {
570 if (sizeof_flush(nfit_flush
->flush
) != sizeof_flush(flush
))
573 if (memcmp(nfit_flush
->flush
, flush
,
574 sizeof_flush(flush
)) == 0) {
575 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
580 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
)
581 + sizeof_flush(flush
), GFP_KERNEL
);
584 INIT_LIST_HEAD(&nfit_flush
->list
);
585 memcpy(nfit_flush
->flush
, flush
, sizeof_flush(flush
));
586 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
587 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
588 flush
->device_handle
, flush
->hint_count
);
592 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
593 struct nfit_table_prev
*prev
, void *table
, const void *end
)
595 struct device
*dev
= acpi_desc
->dev
;
596 struct acpi_nfit_header
*hdr
;
597 void *err
= ERR_PTR(-ENOMEM
);
604 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
610 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
611 if (!add_spa(acpi_desc
, prev
, table
))
614 case ACPI_NFIT_TYPE_MEMORY_MAP
:
615 if (!add_memdev(acpi_desc
, prev
, table
))
618 case ACPI_NFIT_TYPE_CONTROL_REGION
:
619 if (!add_dcr(acpi_desc
, prev
, table
))
622 case ACPI_NFIT_TYPE_DATA_REGION
:
623 if (!add_bdw(acpi_desc
, prev
, table
))
626 case ACPI_NFIT_TYPE_INTERLEAVE
:
627 if (!add_idt(acpi_desc
, prev
, table
))
630 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
631 if (!add_flush(acpi_desc
, prev
, table
))
634 case ACPI_NFIT_TYPE_SMBIOS
:
635 dev_dbg(dev
, "%s: smbios\n", __func__
);
638 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
642 return table
+ hdr
->length
;
645 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
646 struct nfit_mem
*nfit_mem
)
648 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
649 u16 dcr
= nfit_mem
->dcr
->region_index
;
650 struct nfit_spa
*nfit_spa
;
652 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
653 u16 range_index
= nfit_spa
->spa
->range_index
;
654 int type
= nfit_spa_type(nfit_spa
->spa
);
655 struct nfit_memdev
*nfit_memdev
;
657 if (type
!= NFIT_SPA_BDW
)
660 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
661 if (nfit_memdev
->memdev
->range_index
!= range_index
)
663 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
665 if (nfit_memdev
->memdev
->region_index
!= dcr
)
668 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
673 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
674 nfit_mem
->spa_dcr
->range_index
);
675 nfit_mem
->bdw
= NULL
;
678 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
679 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
681 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
682 struct nfit_memdev
*nfit_memdev
;
683 struct nfit_bdw
*nfit_bdw
;
684 struct nfit_idt
*nfit_idt
;
685 u16 idt_idx
, range_index
;
687 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
688 if (nfit_bdw
->bdw
->region_index
!= dcr
)
690 nfit_mem
->bdw
= nfit_bdw
->bdw
;
697 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
699 if (!nfit_mem
->spa_bdw
)
702 range_index
= nfit_mem
->spa_bdw
->range_index
;
703 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
704 if (nfit_memdev
->memdev
->range_index
!= range_index
||
705 nfit_memdev
->memdev
->region_index
!= dcr
)
707 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
708 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
709 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
710 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
712 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
719 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
720 struct acpi_nfit_system_address
*spa
)
722 struct nfit_mem
*nfit_mem
, *found
;
723 struct nfit_memdev
*nfit_memdev
;
724 int type
= nfit_spa_type(spa
);
734 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
735 struct nfit_flush
*nfit_flush
;
736 struct nfit_dcr
*nfit_dcr
;
740 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
743 dcr
= nfit_memdev
->memdev
->region_index
;
744 device_handle
= nfit_memdev
->memdev
->device_handle
;
745 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
746 if (__to_nfit_memdev(nfit_mem
)->device_handle
755 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
756 sizeof(*nfit_mem
), GFP_KERNEL
);
759 INIT_LIST_HEAD(&nfit_mem
->list
);
760 nfit_mem
->acpi_desc
= acpi_desc
;
761 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
764 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
765 if (nfit_dcr
->dcr
->region_index
!= dcr
)
768 * Record the control region for the dimm. For
769 * the ACPI 6.1 case, where there are separate
770 * control regions for the pmem vs blk
771 * interfaces, be sure to record the extended
775 nfit_mem
->dcr
= nfit_dcr
->dcr
;
776 else if (nfit_mem
->dcr
->windows
== 0
777 && nfit_dcr
->dcr
->windows
)
778 nfit_mem
->dcr
= nfit_dcr
->dcr
;
782 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
783 struct acpi_nfit_flush_address
*flush
;
786 if (nfit_flush
->flush
->device_handle
!= device_handle
)
788 nfit_mem
->nfit_flush
= nfit_flush
;
789 flush
= nfit_flush
->flush
;
790 nfit_mem
->flush_wpq
= devm_kzalloc(acpi_desc
->dev
,
792 * sizeof(struct resource
), GFP_KERNEL
);
793 if (!nfit_mem
->flush_wpq
)
795 for (i
= 0; i
< flush
->hint_count
; i
++) {
796 struct resource
*res
= &nfit_mem
->flush_wpq
[i
];
798 res
->start
= flush
->hint_address
[i
];
799 res
->end
= res
->start
+ 8 - 1;
804 if (dcr
&& !nfit_mem
->dcr
) {
805 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
806 spa
->range_index
, dcr
);
810 if (type
== NFIT_SPA_DCR
) {
811 struct nfit_idt
*nfit_idt
;
814 /* multiple dimms may share a SPA when interleaved */
815 nfit_mem
->spa_dcr
= spa
;
816 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
817 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
818 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
819 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
821 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
824 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
827 * A single dimm may belong to multiple SPA-PM
828 * ranges, record at least one in addition to
831 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
838 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
840 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
841 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
842 u32 handleA
, handleB
;
844 handleA
= __to_nfit_memdev(a
)->device_handle
;
845 handleB
= __to_nfit_memdev(b
)->device_handle
;
846 if (handleA
< handleB
)
848 else if (handleA
> handleB
)
853 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
855 struct nfit_spa
*nfit_spa
;
858 * For each SPA-DCR or SPA-PMEM address range find its
859 * corresponding MEMDEV(s). From each MEMDEV find the
860 * corresponding DCR. Then, if we're operating on a SPA-DCR,
861 * try to find a SPA-BDW and a corresponding BDW that references
862 * the DCR. Throw it all into an nfit_mem object. Note, that
865 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
868 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
873 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
878 static ssize_t
revision_show(struct device
*dev
,
879 struct device_attribute
*attr
, char *buf
)
881 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
882 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
883 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
885 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
887 static DEVICE_ATTR_RO(revision
);
889 static ssize_t
hw_error_scrub_show(struct device
*dev
,
890 struct device_attribute
*attr
, char *buf
)
892 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
893 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
894 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
896 return sprintf(buf
, "%d\n", acpi_desc
->scrub_mode
);
900 * The 'hw_error_scrub' attribute can have the following values written to it:
901 * '0': Switch to the default mode where an exception will only insert
902 * the address of the memory error into the poison and badblocks lists.
903 * '1': Enable a full scrub to happen if an exception for a memory error is
906 static ssize_t
hw_error_scrub_store(struct device
*dev
,
907 struct device_attribute
*attr
, const char *buf
, size_t size
)
909 struct nvdimm_bus_descriptor
*nd_desc
;
913 rc
= kstrtol(buf
, 0, &val
);
918 nd_desc
= dev_get_drvdata(dev
);
920 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
923 case HW_ERROR_SCRUB_ON
:
924 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_ON
;
926 case HW_ERROR_SCRUB_OFF
:
927 acpi_desc
->scrub_mode
= HW_ERROR_SCRUB_OFF
;
939 static DEVICE_ATTR_RW(hw_error_scrub
);
942 * This shows the number of full Address Range Scrubs that have been
943 * completed since driver load time. Userspace can wait on this using
944 * select/poll etc. A '+' at the end indicates an ARS is in progress
946 static ssize_t
scrub_show(struct device
*dev
,
947 struct device_attribute
*attr
, char *buf
)
949 struct nvdimm_bus_descriptor
*nd_desc
;
953 nd_desc
= dev_get_drvdata(dev
);
955 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
957 rc
= sprintf(buf
, "%d%s", acpi_desc
->scrub_count
,
958 (work_busy(&acpi_desc
->work
)) ? "+\n" : "\n");
964 static ssize_t
scrub_store(struct device
*dev
,
965 struct device_attribute
*attr
, const char *buf
, size_t size
)
967 struct nvdimm_bus_descriptor
*nd_desc
;
971 rc
= kstrtol(buf
, 0, &val
);
978 nd_desc
= dev_get_drvdata(dev
);
980 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
982 rc
= acpi_nfit_ars_rescan(acpi_desc
);
989 static DEVICE_ATTR_RW(scrub
);
991 static bool ars_supported(struct nvdimm_bus
*nvdimm_bus
)
993 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
994 const unsigned long mask
= 1 << ND_CMD_ARS_CAP
| 1 << ND_CMD_ARS_START
995 | 1 << ND_CMD_ARS_STATUS
;
997 return (nd_desc
->cmd_mask
& mask
) == mask
;
1000 static umode_t
nfit_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
1002 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1003 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
1005 if (a
== &dev_attr_scrub
.attr
&& !ars_supported(nvdimm_bus
))
1010 static struct attribute
*acpi_nfit_attributes
[] = {
1011 &dev_attr_revision
.attr
,
1012 &dev_attr_scrub
.attr
,
1013 &dev_attr_hw_error_scrub
.attr
,
1017 static struct attribute_group acpi_nfit_attribute_group
= {
1019 .attrs
= acpi_nfit_attributes
,
1020 .is_visible
= nfit_visible
,
1023 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
1024 &nvdimm_bus_attribute_group
,
1025 &acpi_nfit_attribute_group
,
1029 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
1031 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1032 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1034 return __to_nfit_memdev(nfit_mem
);
1037 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
1039 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1040 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1042 return nfit_mem
->dcr
;
1045 static ssize_t
handle_show(struct device
*dev
,
1046 struct device_attribute
*attr
, char *buf
)
1048 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1050 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
1052 static DEVICE_ATTR_RO(handle
);
1054 static ssize_t
phys_id_show(struct device
*dev
,
1055 struct device_attribute
*attr
, char *buf
)
1057 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
1059 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
1061 static DEVICE_ATTR_RO(phys_id
);
1063 static ssize_t
vendor_show(struct device
*dev
,
1064 struct device_attribute
*attr
, char *buf
)
1066 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1068 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
1070 static DEVICE_ATTR_RO(vendor
);
1072 static ssize_t
rev_id_show(struct device
*dev
,
1073 struct device_attribute
*attr
, char *buf
)
1075 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1077 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
1079 static DEVICE_ATTR_RO(rev_id
);
1081 static ssize_t
device_show(struct device
*dev
,
1082 struct device_attribute
*attr
, char *buf
)
1084 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1086 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
1088 static DEVICE_ATTR_RO(device
);
1090 static ssize_t
subsystem_vendor_show(struct device
*dev
,
1091 struct device_attribute
*attr
, char *buf
)
1093 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1095 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
1097 static DEVICE_ATTR_RO(subsystem_vendor
);
1099 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
1100 struct device_attribute
*attr
, char *buf
)
1102 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1104 return sprintf(buf
, "0x%04x\n",
1105 be16_to_cpu(dcr
->subsystem_revision_id
));
1107 static DEVICE_ATTR_RO(subsystem_rev_id
);
1109 static ssize_t
subsystem_device_show(struct device
*dev
,
1110 struct device_attribute
*attr
, char *buf
)
1112 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1114 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
1116 static DEVICE_ATTR_RO(subsystem_device
);
1118 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
1120 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1123 if (nfit_mem
->memdev_pmem
)
1125 if (nfit_mem
->memdev_bdw
)
1130 static ssize_t
format_show(struct device
*dev
,
1131 struct device_attribute
*attr
, char *buf
)
1133 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1135 return sprintf(buf
, "0x%04x\n", le16_to_cpu(dcr
->code
));
1137 static DEVICE_ATTR_RO(format
);
1139 static ssize_t
format1_show(struct device
*dev
,
1140 struct device_attribute
*attr
, char *buf
)
1143 ssize_t rc
= -ENXIO
;
1144 struct nfit_mem
*nfit_mem
;
1145 struct nfit_memdev
*nfit_memdev
;
1146 struct acpi_nfit_desc
*acpi_desc
;
1147 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1148 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1150 nfit_mem
= nvdimm_provider_data(nvdimm
);
1151 acpi_desc
= nfit_mem
->acpi_desc
;
1152 handle
= to_nfit_memdev(dev
)->device_handle
;
1154 /* assumes DIMMs have at most 2 published interface codes */
1155 mutex_lock(&acpi_desc
->init_mutex
);
1156 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1157 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1158 struct nfit_dcr
*nfit_dcr
;
1160 if (memdev
->device_handle
!= handle
)
1163 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1164 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
1166 if (nfit_dcr
->dcr
->code
== dcr
->code
)
1168 rc
= sprintf(buf
, "0x%04x\n",
1169 le16_to_cpu(nfit_dcr
->dcr
->code
));
1175 mutex_unlock(&acpi_desc
->init_mutex
);
1178 static DEVICE_ATTR_RO(format1
);
1180 static ssize_t
formats_show(struct device
*dev
,
1181 struct device_attribute
*attr
, char *buf
)
1183 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1185 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
1187 static DEVICE_ATTR_RO(formats
);
1189 static ssize_t
serial_show(struct device
*dev
,
1190 struct device_attribute
*attr
, char *buf
)
1192 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1194 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
1196 static DEVICE_ATTR_RO(serial
);
1198 static ssize_t
family_show(struct device
*dev
,
1199 struct device_attribute
*attr
, char *buf
)
1201 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1202 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1204 if (nfit_mem
->family
< 0)
1206 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1208 static DEVICE_ATTR_RO(family
);
1210 static ssize_t
dsm_mask_show(struct device
*dev
,
1211 struct device_attribute
*attr
, char *buf
)
1213 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1214 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1216 if (nfit_mem
->family
< 0)
1218 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1220 static DEVICE_ATTR_RO(dsm_mask
);
1222 static ssize_t
flags_show(struct device
*dev
,
1223 struct device_attribute
*attr
, char *buf
)
1225 u16 flags
= to_nfit_memdev(dev
)->flags
;
1227 return sprintf(buf
, "%s%s%s%s%s\n",
1228 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1229 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1230 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1231 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1232 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
1234 static DEVICE_ATTR_RO(flags
);
1236 static ssize_t
id_show(struct device
*dev
,
1237 struct device_attribute
*attr
, char *buf
)
1239 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1241 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1242 return sprintf(buf
, "%04x-%02x-%04x-%08x\n",
1243 be16_to_cpu(dcr
->vendor_id
),
1244 dcr
->manufacturing_location
,
1245 be16_to_cpu(dcr
->manufacturing_date
),
1246 be32_to_cpu(dcr
->serial_number
));
1248 return sprintf(buf
, "%04x-%08x\n",
1249 be16_to_cpu(dcr
->vendor_id
),
1250 be32_to_cpu(dcr
->serial_number
));
1252 static DEVICE_ATTR_RO(id
);
1254 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1255 &dev_attr_handle
.attr
,
1256 &dev_attr_phys_id
.attr
,
1257 &dev_attr_vendor
.attr
,
1258 &dev_attr_device
.attr
,
1259 &dev_attr_rev_id
.attr
,
1260 &dev_attr_subsystem_vendor
.attr
,
1261 &dev_attr_subsystem_device
.attr
,
1262 &dev_attr_subsystem_rev_id
.attr
,
1263 &dev_attr_format
.attr
,
1264 &dev_attr_formats
.attr
,
1265 &dev_attr_format1
.attr
,
1266 &dev_attr_serial
.attr
,
1267 &dev_attr_flags
.attr
,
1269 &dev_attr_family
.attr
,
1270 &dev_attr_dsm_mask
.attr
,
1274 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1275 struct attribute
*a
, int n
)
1277 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1278 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1280 if (!to_nfit_dcr(dev
))
1282 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1287 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
1289 .attrs
= acpi_nfit_dimm_attributes
,
1290 .is_visible
= acpi_nfit_dimm_attr_visible
,
1293 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1294 &nvdimm_attribute_group
,
1295 &nd_device_attribute_group
,
1296 &acpi_nfit_dimm_attribute_group
,
1300 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1303 struct nfit_mem
*nfit_mem
;
1305 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1306 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1307 return nfit_mem
->nvdimm
;
1312 void __acpi_nvdimm_notify(struct device
*dev
, u32 event
)
1314 struct nfit_mem
*nfit_mem
;
1315 struct acpi_nfit_desc
*acpi_desc
;
1317 dev_dbg(dev
->parent
, "%s: %s: event: %d\n", dev_name(dev
), __func__
,
1320 if (event
!= NFIT_NOTIFY_DIMM_HEALTH
) {
1321 dev_dbg(dev
->parent
, "%s: unknown event: %d\n", dev_name(dev
),
1326 acpi_desc
= dev_get_drvdata(dev
->parent
);
1331 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1334 nfit_mem
= dev_get_drvdata(dev
);
1335 if (nfit_mem
&& nfit_mem
->flags_attr
)
1336 sysfs_notify_dirent(nfit_mem
->flags_attr
);
1338 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify
);
1340 static void acpi_nvdimm_notify(acpi_handle handle
, u32 event
, void *data
)
1342 struct acpi_device
*adev
= data
;
1343 struct device
*dev
= &adev
->dev
;
1345 device_lock(dev
->parent
);
1346 __acpi_nvdimm_notify(dev
, event
);
1347 device_unlock(dev
->parent
);
1350 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1351 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1353 struct acpi_device
*adev
, *adev_dimm
;
1354 struct device
*dev
= acpi_desc
->dev
;
1355 unsigned long dsm_mask
;
1359 /* nfit test assumes 1:1 relationship between commands and dsms */
1360 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1361 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1362 adev
= to_acpi_dev(acpi_desc
);
1366 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1367 nfit_mem
->adev
= adev_dimm
;
1369 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1371 return force_enable_dimms
? 0 : -ENODEV
;
1374 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm
->handle
,
1375 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
, adev_dimm
))) {
1376 dev_err(dev
, "%s: notification registration failed\n",
1377 dev_name(&adev_dimm
->dev
));
1382 * Until standardization materializes we need to consider 4
1383 * different command sets. Note, that checking for function0 (bit0)
1384 * tells us if any commands are reachable through this uuid.
1386 for (i
= NVDIMM_FAMILY_INTEL
; i
<= NVDIMM_FAMILY_MSFT
; i
++)
1387 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1390 /* limit the supported commands to those that are publicly documented */
1391 nfit_mem
->family
= i
;
1392 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1394 if (disable_vendor_specific
)
1395 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1396 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
) {
1397 dsm_mask
= 0x1c3c76;
1398 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1400 if (disable_vendor_specific
)
1401 dsm_mask
&= ~(1 << 8);
1402 } else if (nfit_mem
->family
== NVDIMM_FAMILY_MSFT
) {
1403 dsm_mask
= 0xffffffff;
1405 dev_dbg(dev
, "unknown dimm command family\n");
1406 nfit_mem
->family
= -1;
1407 /* DSMs are optional, continue loading the driver... */
1411 uuid
= to_nfit_uuid(nfit_mem
->family
);
1412 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1413 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
1414 set_bit(i
, &nfit_mem
->dsm_mask
);
1419 static void shutdown_dimm_notify(void *data
)
1421 struct acpi_nfit_desc
*acpi_desc
= data
;
1422 struct nfit_mem
*nfit_mem
;
1424 mutex_lock(&acpi_desc
->init_mutex
);
1426 * Clear out the nfit_mem->flags_attr and shut down dimm event
1429 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1430 struct acpi_device
*adev_dimm
= nfit_mem
->adev
;
1432 if (nfit_mem
->flags_attr
) {
1433 sysfs_put(nfit_mem
->flags_attr
);
1434 nfit_mem
->flags_attr
= NULL
;
1437 acpi_remove_notify_handler(adev_dimm
->handle
,
1438 ACPI_DEVICE_NOTIFY
, acpi_nvdimm_notify
);
1440 mutex_unlock(&acpi_desc
->init_mutex
);
1443 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
1445 struct nfit_mem
*nfit_mem
;
1446 int dimm_count
= 0, rc
;
1447 struct nvdimm
*nvdimm
;
1449 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1450 struct acpi_nfit_flush_address
*flush
;
1451 unsigned long flags
= 0, cmd_mask
;
1455 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
1456 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
1462 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
1463 flags
|= NDD_ALIASING
;
1465 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1466 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
1467 flags
|= NDD_UNARMED
;
1469 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
1474 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1475 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1476 * userspace interface.
1478 cmd_mask
= 1UL << ND_CMD_CALL
;
1479 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
1480 cmd_mask
|= nfit_mem
->dsm_mask
;
1482 flush
= nfit_mem
->nfit_flush
? nfit_mem
->nfit_flush
->flush
1484 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
1485 acpi_nfit_dimm_attribute_groups
,
1486 flags
, cmd_mask
, flush
? flush
->hint_count
: 0,
1487 nfit_mem
->flush_wpq
);
1491 nfit_mem
->nvdimm
= nvdimm
;
1494 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
1497 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
1498 nvdimm_name(nvdimm
),
1499 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
1500 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
1501 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
1502 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
1506 rc
= nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
1511 * Now that dimms are successfully registered, and async registration
1512 * is flushed, attempt to enable event notification.
1514 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1515 struct kernfs_node
*nfit_kernfs
;
1517 nvdimm
= nfit_mem
->nvdimm
;
1518 nfit_kernfs
= sysfs_get_dirent(nvdimm_kobj(nvdimm
)->sd
, "nfit");
1520 nfit_mem
->flags_attr
= sysfs_get_dirent(nfit_kernfs
,
1522 sysfs_put(nfit_kernfs
);
1523 if (!nfit_mem
->flags_attr
)
1524 dev_warn(acpi_desc
->dev
, "%s: notifications disabled\n",
1525 nvdimm_name(nvdimm
));
1528 return devm_add_action_or_reset(acpi_desc
->dev
, shutdown_dimm_notify
,
1532 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
1534 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1535 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
1536 struct acpi_device
*adev
;
1539 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
1540 adev
= to_acpi_dev(acpi_desc
);
1544 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
1545 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
1546 set_bit(i
, &nd_desc
->cmd_mask
);
1549 static ssize_t
range_index_show(struct device
*dev
,
1550 struct device_attribute
*attr
, char *buf
)
1552 struct nd_region
*nd_region
= to_nd_region(dev
);
1553 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
1555 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
1557 static DEVICE_ATTR_RO(range_index
);
1559 static struct attribute
*acpi_nfit_region_attributes
[] = {
1560 &dev_attr_range_index
.attr
,
1564 static struct attribute_group acpi_nfit_region_attribute_group
= {
1566 .attrs
= acpi_nfit_region_attributes
,
1569 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
1570 &nd_region_attribute_group
,
1571 &nd_mapping_attribute_group
,
1572 &nd_device_attribute_group
,
1573 &nd_numa_attribute_group
,
1574 &acpi_nfit_region_attribute_group
,
1578 /* enough info to uniquely specify an interleave set */
1579 struct nfit_set_info
{
1580 struct nfit_set_info_map
{
1587 static size_t sizeof_nfit_set_info(int num_mappings
)
1589 return sizeof(struct nfit_set_info
)
1590 + num_mappings
* sizeof(struct nfit_set_info_map
);
1593 static int cmp_map(const void *m0
, const void *m1
)
1595 const struct nfit_set_info_map
*map0
= m0
;
1596 const struct nfit_set_info_map
*map1
= m1
;
1598 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
1602 /* Retrieve the nth entry referencing this spa */
1603 static struct acpi_nfit_memory_map
*memdev_from_spa(
1604 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
1606 struct nfit_memdev
*nfit_memdev
;
1608 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
1609 if (nfit_memdev
->memdev
->range_index
== range_index
)
1611 return nfit_memdev
->memdev
;
1615 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
1616 struct nd_region_desc
*ndr_desc
,
1617 struct acpi_nfit_system_address
*spa
)
1619 int i
, spa_type
= nfit_spa_type(spa
);
1620 struct device
*dev
= acpi_desc
->dev
;
1621 struct nd_interleave_set
*nd_set
;
1622 u16 nr
= ndr_desc
->num_mappings
;
1623 struct nfit_set_info
*info
;
1625 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1630 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1634 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1637 for (i
= 0; i
< nr
; i
++) {
1638 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
1639 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1640 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
1641 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1642 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1643 spa
->range_index
, i
);
1645 if (!memdev
|| !nfit_mem
->dcr
) {
1646 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1650 map
->region_offset
= memdev
->region_offset
;
1651 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1654 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1656 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1657 ndr_desc
->nd_set
= nd_set
;
1658 devm_kfree(dev
, info
);
1663 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1665 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1666 u32 sub_line_offset
, line_index
, line_offset
;
1667 u64 line_no
, table_skip_count
, table_offset
;
1669 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1670 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1671 line_offset
= idt
->line_offset
[line_index
]
1673 table_offset
= table_skip_count
* mmio
->table_size
;
1675 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1678 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1680 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1681 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1682 const u32 STATUS_MASK
= 0x80000037;
1684 if (mmio
->num_lines
)
1685 offset
= to_interleave_offset(offset
, mmio
);
1687 return readl(mmio
->addr
.base
+ offset
) & STATUS_MASK
;
1690 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1691 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1694 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1697 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1699 BCW_LEN_MASK
= (1ULL << 8) - 1,
1703 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1704 len
= len
>> L1_CACHE_SHIFT
;
1705 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1706 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1708 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1709 if (mmio
->num_lines
)
1710 offset
= to_interleave_offset(offset
, mmio
);
1712 writeq(cmd
, mmio
->addr
.base
+ offset
);
1713 nvdimm_flush(nfit_blk
->nd_region
);
1715 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
1716 readq(mmio
->addr
.base
+ offset
);
1719 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1720 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1723 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1724 unsigned int copied
= 0;
1728 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1729 + lane
* mmio
->size
;
1730 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1735 if (mmio
->num_lines
) {
1738 offset
= to_interleave_offset(base_offset
+ copied
,
1740 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1741 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1743 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1748 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1751 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
1752 mmio_flush_range((void __force
*)
1753 mmio
->addr
.aperture
+ offset
, c
);
1755 memcpy_from_pmem(iobuf
+ copied
,
1756 mmio
->addr
.aperture
+ offset
, c
);
1764 nvdimm_flush(nfit_blk
->nd_region
);
1766 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1770 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1771 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1773 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1774 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1775 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1776 unsigned int lane
, copied
= 0;
1779 lane
= nd_region_acquire_lane(nd_region
);
1781 u64 c
= min(len
, mmio
->size
);
1783 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1784 iobuf
+ copied
, c
, rw
, lane
);
1791 nd_region_release_lane(nd_region
, lane
);
1796 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1797 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1800 mmio
->num_lines
= idt
->line_count
;
1801 mmio
->line_size
= idt
->line_size
;
1802 if (interleave_ways
== 0)
1804 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1811 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1812 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1814 struct nd_cmd_dimm_flags flags
;
1817 memset(&flags
, 0, sizeof(flags
));
1818 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1819 sizeof(flags
), NULL
);
1821 if (rc
>= 0 && flags
.status
== 0)
1822 nfit_blk
->dimm_flags
= flags
.flags
;
1823 else if (rc
== -ENOTTY
) {
1824 /* fall back to a conservative default */
1825 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
1833 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1836 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1837 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1838 struct nfit_blk_mmio
*mmio
;
1839 struct nfit_blk
*nfit_blk
;
1840 struct nfit_mem
*nfit_mem
;
1841 struct nvdimm
*nvdimm
;
1844 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1845 nfit_mem
= nvdimm_provider_data(nvdimm
);
1846 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1847 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1848 nfit_mem
? "" : " nfit_mem",
1849 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1850 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1854 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1857 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1858 nfit_blk
->nd_region
= to_nd_region(dev
);
1860 /* map block aperture memory */
1861 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1862 mmio
= &nfit_blk
->mmio
[BDW
];
1863 mmio
->addr
.base
= devm_nvdimm_memremap(dev
, nfit_mem
->spa_bdw
->address
,
1864 nfit_mem
->spa_bdw
->length
, ARCH_MEMREMAP_PMEM
);
1865 if (!mmio
->addr
.base
) {
1866 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1867 nvdimm_name(nvdimm
));
1870 mmio
->size
= nfit_mem
->bdw
->size
;
1871 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1872 mmio
->idt
= nfit_mem
->idt_bdw
;
1873 mmio
->spa
= nfit_mem
->spa_bdw
;
1874 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1875 nfit_mem
->memdev_bdw
->interleave_ways
);
1877 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1878 __func__
, nvdimm_name(nvdimm
));
1882 /* map block control memory */
1883 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1884 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1885 mmio
= &nfit_blk
->mmio
[DCR
];
1886 mmio
->addr
.base
= devm_nvdimm_ioremap(dev
, nfit_mem
->spa_dcr
->address
,
1887 nfit_mem
->spa_dcr
->length
);
1888 if (!mmio
->addr
.base
) {
1889 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1890 nvdimm_name(nvdimm
));
1893 mmio
->size
= nfit_mem
->dcr
->window_size
;
1894 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1895 mmio
->idt
= nfit_mem
->idt_dcr
;
1896 mmio
->spa
= nfit_mem
->spa_dcr
;
1897 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1898 nfit_mem
->memdev_dcr
->interleave_ways
);
1900 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1901 __func__
, nvdimm_name(nvdimm
));
1905 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1907 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1908 __func__
, nvdimm_name(nvdimm
));
1912 if (nvdimm_has_flush(nfit_blk
->nd_region
) < 0)
1913 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1915 if (mmio
->line_size
== 0)
1918 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1919 + 8 > mmio
->line_size
) {
1920 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1922 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1923 + 8 > mmio
->line_size
) {
1924 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1931 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
1932 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
1934 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1935 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1938 cmd
->address
= spa
->address
;
1939 cmd
->length
= spa
->length
;
1940 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
1941 sizeof(*cmd
), &cmd_rc
);
1947 static int ars_start(struct acpi_nfit_desc
*acpi_desc
, struct nfit_spa
*nfit_spa
)
1951 struct nd_cmd_ars_start ars_start
;
1952 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1953 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1955 memset(&ars_start
, 0, sizeof(ars_start
));
1956 ars_start
.address
= spa
->address
;
1957 ars_start
.length
= spa
->length
;
1958 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
1959 ars_start
.type
= ND_ARS_PERSISTENT
;
1960 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
1961 ars_start
.type
= ND_ARS_VOLATILE
;
1965 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1966 sizeof(ars_start
), &cmd_rc
);
1973 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
1976 struct nd_cmd_ars_start ars_start
;
1977 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1978 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1980 memset(&ars_start
, 0, sizeof(ars_start
));
1981 ars_start
.address
= ars_status
->restart_address
;
1982 ars_start
.length
= ars_status
->restart_length
;
1983 ars_start
.type
= ars_status
->type
;
1984 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1985 sizeof(ars_start
), &cmd_rc
);
1991 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
1993 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1994 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1997 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
1998 acpi_desc
->ars_status_size
, &cmd_rc
);
2004 static int ars_status_process_records(struct nvdimm_bus
*nvdimm_bus
,
2005 struct nd_cmd_ars_status
*ars_status
)
2010 for (i
= 0; i
< ars_status
->num_records
; i
++) {
2011 rc
= nvdimm_bus_add_poison(nvdimm_bus
,
2012 ars_status
->records
[i
].err_address
,
2013 ars_status
->records
[i
].length
);
2021 static void acpi_nfit_remove_resource(void *data
)
2023 struct resource
*res
= data
;
2025 remove_resource(res
);
2028 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
2029 struct nd_region_desc
*ndr_desc
)
2031 struct resource
*res
, *nd_res
= ndr_desc
->res
;
2034 /* No operation if the region is already registered as PMEM */
2035 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
2036 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
2037 if (is_pmem
== REGION_INTERSECTS
)
2040 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
2044 res
->name
= "Persistent Memory";
2045 res
->start
= nd_res
->start
;
2046 res
->end
= nd_res
->end
;
2047 res
->flags
= IORESOURCE_MEM
;
2048 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
2050 ret
= insert_resource(&iomem_resource
, res
);
2054 ret
= devm_add_action_or_reset(acpi_desc
->dev
,
2055 acpi_nfit_remove_resource
,
2063 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
2064 struct nd_mapping_desc
*mapping
, struct nd_region_desc
*ndr_desc
,
2065 struct acpi_nfit_memory_map
*memdev
,
2066 struct nfit_spa
*nfit_spa
)
2068 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
2069 memdev
->device_handle
);
2070 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2071 struct nd_blk_region_desc
*ndbr_desc
;
2072 struct nfit_mem
*nfit_mem
;
2076 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
2077 spa
->range_index
, memdev
->device_handle
);
2081 mapping
->nvdimm
= nvdimm
;
2082 switch (nfit_spa_type(spa
)) {
2084 case NFIT_SPA_VOLATILE
:
2085 mapping
->start
= memdev
->address
;
2086 mapping
->size
= memdev
->region_size
;
2089 nfit_mem
= nvdimm_provider_data(nvdimm
);
2090 if (!nfit_mem
|| !nfit_mem
->bdw
) {
2091 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
2092 spa
->range_index
, nvdimm_name(nvdimm
));
2094 mapping
->size
= nfit_mem
->bdw
->capacity
;
2095 mapping
->start
= nfit_mem
->bdw
->start_address
;
2096 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
2100 ndr_desc
->mapping
= mapping
;
2101 ndr_desc
->num_mappings
= blk_valid
;
2102 ndbr_desc
= to_blk_region_desc(ndr_desc
);
2103 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
2104 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
2105 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
2107 if (!nfit_spa
->nd_region
)
2115 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address
*spa
)
2117 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
2118 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
2119 nfit_spa_type(spa
) == NFIT_SPA_PDISK
||
2120 nfit_spa_type(spa
) == NFIT_SPA_PCD
);
2123 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
2124 struct nfit_spa
*nfit_spa
)
2126 static struct nd_mapping_desc mappings
[ND_MAX_MAPPINGS
];
2127 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2128 struct nd_blk_region_desc ndbr_desc
;
2129 struct nd_region_desc
*ndr_desc
;
2130 struct nfit_memdev
*nfit_memdev
;
2131 struct nvdimm_bus
*nvdimm_bus
;
2132 struct resource res
;
2135 if (nfit_spa
->nd_region
)
2138 if (spa
->range_index
== 0 && !nfit_spa_is_virtual(spa
)) {
2139 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
2144 memset(&res
, 0, sizeof(res
));
2145 memset(&mappings
, 0, sizeof(mappings
));
2146 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
2147 res
.start
= spa
->address
;
2148 res
.end
= res
.start
+ spa
->length
- 1;
2149 ndr_desc
= &ndbr_desc
.ndr_desc
;
2150 ndr_desc
->res
= &res
;
2151 ndr_desc
->provider_data
= nfit_spa
;
2152 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2153 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
2154 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2155 spa
->proximity_domain
);
2157 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2159 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2160 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2161 struct nd_mapping_desc
*mapping
;
2163 if (memdev
->range_index
!= spa
->range_index
)
2165 if (count
>= ND_MAX_MAPPINGS
) {
2166 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2167 spa
->range_index
, ND_MAX_MAPPINGS
);
2170 mapping
= &mappings
[count
++];
2171 rc
= acpi_nfit_init_mapping(acpi_desc
, mapping
, ndr_desc
,
2177 ndr_desc
->mapping
= mappings
;
2178 ndr_desc
->num_mappings
= count
;
2179 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2183 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2184 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2185 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2187 dev_warn(acpi_desc
->dev
,
2188 "failed to insert pmem resource to iomem: %d\n",
2193 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2195 if (!nfit_spa
->nd_region
)
2197 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
2198 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
2200 if (!nfit_spa
->nd_region
)
2202 } else if (nfit_spa_is_virtual(spa
)) {
2203 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2205 if (!nfit_spa
->nd_region
)
2211 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
2212 nfit_spa
->spa
->range_index
);
2216 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
,
2219 struct device
*dev
= acpi_desc
->dev
;
2220 struct nd_cmd_ars_status
*ars_status
;
2222 if (acpi_desc
->ars_status
&& acpi_desc
->ars_status_size
>= max_ars
) {
2223 memset(acpi_desc
->ars_status
, 0, acpi_desc
->ars_status_size
);
2227 if (acpi_desc
->ars_status
)
2228 devm_kfree(dev
, acpi_desc
->ars_status
);
2229 acpi_desc
->ars_status
= NULL
;
2230 ars_status
= devm_kzalloc(dev
, max_ars
, GFP_KERNEL
);
2233 acpi_desc
->ars_status
= ars_status
;
2234 acpi_desc
->ars_status_size
= max_ars
;
2238 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
,
2239 struct nfit_spa
*nfit_spa
)
2241 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2244 if (!nfit_spa
->max_ars
) {
2245 struct nd_cmd_ars_cap ars_cap
;
2247 memset(&ars_cap
, 0, sizeof(ars_cap
));
2248 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
2251 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
2252 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
2253 /* check that the supported scrub types match the spa type */
2254 if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
&&
2255 ((ars_cap
.status
>> 16) & ND_ARS_VOLATILE
) == 0)
2257 else if (nfit_spa_type(spa
) == NFIT_SPA_PM
&&
2258 ((ars_cap
.status
>> 16) & ND_ARS_PERSISTENT
) == 0)
2262 if (ars_status_alloc(acpi_desc
, nfit_spa
->max_ars
))
2265 rc
= ars_get_status(acpi_desc
);
2266 if (rc
< 0 && rc
!= -ENOSPC
)
2269 if (ars_status_process_records(acpi_desc
->nvdimm_bus
,
2270 acpi_desc
->ars_status
))
2276 static void acpi_nfit_async_scrub(struct acpi_nfit_desc
*acpi_desc
,
2277 struct nfit_spa
*nfit_spa
)
2279 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2280 unsigned int overflow_retry
= scrub_overflow_abort
;
2281 u64 init_ars_start
= 0, init_ars_len
= 0;
2282 struct device
*dev
= acpi_desc
->dev
;
2283 unsigned int tmo
= scrub_timeout
;
2286 if (!nfit_spa
->ars_required
|| !nfit_spa
->nd_region
)
2289 rc
= ars_start(acpi_desc
, nfit_spa
);
2291 * If we timed out the initial scan we'll still be busy here,
2292 * and will wait another timeout before giving up permanently.
2294 if (rc
< 0 && rc
!= -EBUSY
)
2298 u64 ars_start
, ars_len
;
2300 if (acpi_desc
->cancel
)
2302 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2305 if (rc
== -EBUSY
&& !tmo
) {
2306 dev_warn(dev
, "range %d ars timeout, aborting\n",
2313 * Note, entries may be appended to the list
2314 * while the lock is dropped, but the workqueue
2315 * being active prevents entries being deleted /
2318 mutex_unlock(&acpi_desc
->init_mutex
);
2321 mutex_lock(&acpi_desc
->init_mutex
);
2325 /* we got some results, but there are more pending... */
2326 if (rc
== -ENOSPC
&& overflow_retry
--) {
2327 if (!init_ars_len
) {
2328 init_ars_len
= acpi_desc
->ars_status
->length
;
2329 init_ars_start
= acpi_desc
->ars_status
->address
;
2331 rc
= ars_continue(acpi_desc
);
2335 dev_warn(dev
, "range %d ars continuation failed\n",
2341 ars_start
= init_ars_start
;
2342 ars_len
= init_ars_len
;
2344 ars_start
= acpi_desc
->ars_status
->address
;
2345 ars_len
= acpi_desc
->ars_status
->length
;
2347 dev_dbg(dev
, "spa range: %d ars from %#llx + %#llx complete\n",
2348 spa
->range_index
, ars_start
, ars_len
);
2349 /* notify the region about new poison entries */
2350 nvdimm_region_notify(nfit_spa
->nd_region
,
2351 NVDIMM_REVALIDATE_POISON
);
2356 static void acpi_nfit_scrub(struct work_struct
*work
)
2359 u64 init_scrub_length
= 0;
2360 struct nfit_spa
*nfit_spa
;
2361 u64 init_scrub_address
= 0;
2362 bool init_ars_done
= false;
2363 struct acpi_nfit_desc
*acpi_desc
;
2364 unsigned int tmo
= scrub_timeout
;
2365 unsigned int overflow_retry
= scrub_overflow_abort
;
2367 acpi_desc
= container_of(work
, typeof(*acpi_desc
), work
);
2368 dev
= acpi_desc
->dev
;
2371 * We scrub in 2 phases. The first phase waits for any platform
2372 * firmware initiated scrubs to complete and then we go search for the
2373 * affected spa regions to mark them scanned. In the second phase we
2374 * initiate a directed scrub for every range that was not scrubbed in
2375 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2376 * the first phase, but really only care about running phase 2, where
2377 * regions can be notified of new poison.
2380 /* process platform firmware initiated scrubs */
2382 mutex_lock(&acpi_desc
->init_mutex
);
2383 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2384 struct nd_cmd_ars_status
*ars_status
;
2385 struct acpi_nfit_system_address
*spa
;
2386 u64 ars_start
, ars_len
;
2389 if (acpi_desc
->cancel
)
2392 if (nfit_spa
->nd_region
)
2395 if (init_ars_done
) {
2397 * No need to re-query, we're now just
2398 * reconciling all the ranges covered by the
2403 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2405 if (rc
== -ENOTTY
) {
2406 /* no ars capability, just register spa and move on */
2407 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2411 if (rc
== -EBUSY
&& !tmo
) {
2412 /* fallthrough to directed scrub in phase 2 */
2413 dev_warn(dev
, "timeout awaiting ars results, continuing...\n");
2415 } else if (rc
== -EBUSY
) {
2416 mutex_unlock(&acpi_desc
->init_mutex
);
2422 /* we got some results, but there are more pending... */
2423 if (rc
== -ENOSPC
&& overflow_retry
--) {
2424 ars_status
= acpi_desc
->ars_status
;
2426 * Record the original scrub range, so that we
2427 * can recall all the ranges impacted by the
2430 if (!init_scrub_length
) {
2431 init_scrub_length
= ars_status
->length
;
2432 init_scrub_address
= ars_status
->address
;
2434 rc
= ars_continue(acpi_desc
);
2436 mutex_unlock(&acpi_desc
->init_mutex
);
2443 * Initial scrub failed, we'll give it one more
2449 /* We got some final results, record completed ranges */
2450 ars_status
= acpi_desc
->ars_status
;
2451 if (init_scrub_length
) {
2452 ars_start
= init_scrub_address
;
2453 ars_len
= ars_start
+ init_scrub_length
;
2455 ars_start
= ars_status
->address
;
2456 ars_len
= ars_status
->length
;
2458 spa
= nfit_spa
->spa
;
2460 if (!init_ars_done
) {
2461 init_ars_done
= true;
2462 dev_dbg(dev
, "init scrub %#llx + %#llx complete\n",
2463 ars_start
, ars_len
);
2465 if (ars_start
<= spa
->address
&& ars_start
+ ars_len
2466 >= spa
->address
+ spa
->length
)
2467 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2471 * For all the ranges not covered by an initial scrub we still
2472 * want to see if there are errors, but it's ok to discover them
2475 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2477 * Flag all the ranges that still need scrubbing, but
2478 * register them now to make data available.
2480 if (!nfit_spa
->nd_region
) {
2481 nfit_spa
->ars_required
= 1;
2482 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2486 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2487 acpi_nfit_async_scrub(acpi_desc
, nfit_spa
);
2488 acpi_desc
->scrub_count
++;
2489 if (acpi_desc
->scrub_count_state
)
2490 sysfs_notify_dirent(acpi_desc
->scrub_count_state
);
2491 mutex_unlock(&acpi_desc
->init_mutex
);
2494 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
2496 struct nfit_spa
*nfit_spa
;
2499 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2500 if (nfit_spa_type(nfit_spa
->spa
) == NFIT_SPA_DCR
) {
2501 /* BLK regions don't need to wait for ars results */
2502 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2507 queue_work(nfit_wq
, &acpi_desc
->work
);
2511 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
2512 struct nfit_table_prev
*prev
)
2514 struct device
*dev
= acpi_desc
->dev
;
2516 if (!list_empty(&prev
->spas
) ||
2517 !list_empty(&prev
->memdevs
) ||
2518 !list_empty(&prev
->dcrs
) ||
2519 !list_empty(&prev
->bdws
) ||
2520 !list_empty(&prev
->idts
) ||
2521 !list_empty(&prev
->flushes
)) {
2522 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
2528 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc
*acpi_desc
)
2530 struct device
*dev
= acpi_desc
->dev
;
2531 struct kernfs_node
*nfit
;
2532 struct device
*bus_dev
;
2534 if (!ars_supported(acpi_desc
->nvdimm_bus
))
2537 bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
2538 nfit
= sysfs_get_dirent(bus_dev
->kobj
.sd
, "nfit");
2540 dev_err(dev
, "sysfs_get_dirent 'nfit' failed\n");
2543 acpi_desc
->scrub_count_state
= sysfs_get_dirent(nfit
, "scrub");
2545 if (!acpi_desc
->scrub_count_state
) {
2546 dev_err(dev
, "sysfs_get_dirent 'scrub' failed\n");
2553 static void acpi_nfit_destruct(void *data
)
2555 struct acpi_nfit_desc
*acpi_desc
= data
;
2556 struct device
*bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
2559 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2562 mutex_lock(&acpi_desc_lock
);
2563 acpi_desc
->cancel
= 1;
2565 * Bounce the nvdimm bus lock to make sure any in-flight
2566 * acpi_nfit_ars_rescan() submissions have had a chance to
2567 * either submit or see ->cancel set.
2569 device_lock(bus_dev
);
2570 device_unlock(bus_dev
);
2572 flush_workqueue(nfit_wq
);
2573 if (acpi_desc
->scrub_count_state
)
2574 sysfs_put(acpi_desc
->scrub_count_state
);
2575 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
2576 acpi_desc
->nvdimm_bus
= NULL
;
2577 list_del(&acpi_desc
->list
);
2578 mutex_unlock(&acpi_desc_lock
);
2581 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, void *data
, acpi_size sz
)
2583 struct device
*dev
= acpi_desc
->dev
;
2584 struct nfit_table_prev prev
;
2588 if (!acpi_desc
->nvdimm_bus
) {
2589 acpi_nfit_init_dsms(acpi_desc
);
2591 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
,
2592 &acpi_desc
->nd_desc
);
2593 if (!acpi_desc
->nvdimm_bus
)
2596 rc
= devm_add_action_or_reset(dev
, acpi_nfit_destruct
,
2601 rc
= acpi_nfit_desc_init_scrub_attr(acpi_desc
);
2605 /* register this acpi_desc for mce notifications */
2606 mutex_lock(&acpi_desc_lock
);
2607 list_add_tail(&acpi_desc
->list
, &acpi_descs
);
2608 mutex_unlock(&acpi_desc_lock
);
2611 mutex_lock(&acpi_desc
->init_mutex
);
2613 INIT_LIST_HEAD(&prev
.spas
);
2614 INIT_LIST_HEAD(&prev
.memdevs
);
2615 INIT_LIST_HEAD(&prev
.dcrs
);
2616 INIT_LIST_HEAD(&prev
.bdws
);
2617 INIT_LIST_HEAD(&prev
.idts
);
2618 INIT_LIST_HEAD(&prev
.flushes
);
2620 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
2621 acpi_desc
->spas
.prev
);
2622 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
2623 acpi_desc
->memdevs
.prev
);
2624 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
2625 acpi_desc
->dcrs
.prev
);
2626 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
2627 acpi_desc
->bdws
.prev
);
2628 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
2629 acpi_desc
->idts
.prev
);
2630 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
2631 acpi_desc
->flushes
.prev
);
2634 while (!IS_ERR_OR_NULL(data
))
2635 data
= add_table(acpi_desc
, &prev
, data
, end
);
2638 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
2644 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
2648 rc
= nfit_mem_init(acpi_desc
);
2652 rc
= acpi_nfit_register_dimms(acpi_desc
);
2656 rc
= acpi_nfit_register_regions(acpi_desc
);
2659 mutex_unlock(&acpi_desc
->init_mutex
);
2662 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
2664 struct acpi_nfit_flush_work
{
2665 struct work_struct work
;
2666 struct completion cmp
;
2669 static void flush_probe(struct work_struct
*work
)
2671 struct acpi_nfit_flush_work
*flush
;
2673 flush
= container_of(work
, typeof(*flush
), work
);
2674 complete(&flush
->cmp
);
2677 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
2679 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2680 struct device
*dev
= acpi_desc
->dev
;
2681 struct acpi_nfit_flush_work flush
;
2683 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2688 * Scrub work could take 10s of seconds, userspace may give up so we
2689 * need to be interruptible while waiting.
2691 INIT_WORK_ONSTACK(&flush
.work
, flush_probe
);
2692 COMPLETION_INITIALIZER_ONSTACK(flush
.cmp
);
2693 queue_work(nfit_wq
, &flush
.work
);
2694 return wait_for_completion_interruptible(&flush
.cmp
);
2697 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
2698 struct nvdimm
*nvdimm
, unsigned int cmd
)
2700 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2704 if (cmd
!= ND_CMD_ARS_START
)
2708 * The kernel and userspace may race to initiate a scrub, but
2709 * the scrub thread is prepared to lose that initial race. It
2710 * just needs guarantees that any ars it initiates are not
2711 * interrupted by any intervening start reqeusts from userspace.
2713 if (work_busy(&acpi_desc
->work
))
2719 int acpi_nfit_ars_rescan(struct acpi_nfit_desc
*acpi_desc
)
2721 struct device
*dev
= acpi_desc
->dev
;
2722 struct nfit_spa
*nfit_spa
;
2724 if (work_busy(&acpi_desc
->work
))
2727 if (acpi_desc
->cancel
)
2730 mutex_lock(&acpi_desc
->init_mutex
);
2731 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2732 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2734 if (nfit_spa_type(spa
) != NFIT_SPA_PM
)
2737 nfit_spa
->ars_required
= 1;
2739 queue_work(nfit_wq
, &acpi_desc
->work
);
2740 dev_dbg(dev
, "%s: ars_scan triggered\n", __func__
);
2741 mutex_unlock(&acpi_desc
->init_mutex
);
2746 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
2748 struct nvdimm_bus_descriptor
*nd_desc
;
2750 dev_set_drvdata(dev
, acpi_desc
);
2751 acpi_desc
->dev
= dev
;
2752 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
2753 nd_desc
= &acpi_desc
->nd_desc
;
2754 nd_desc
->provider_name
= "ACPI.NFIT";
2755 nd_desc
->module
= THIS_MODULE
;
2756 nd_desc
->ndctl
= acpi_nfit_ctl
;
2757 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
2758 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
2759 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
2761 INIT_LIST_HEAD(&acpi_desc
->spas
);
2762 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
2763 INIT_LIST_HEAD(&acpi_desc
->bdws
);
2764 INIT_LIST_HEAD(&acpi_desc
->idts
);
2765 INIT_LIST_HEAD(&acpi_desc
->flushes
);
2766 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
2767 INIT_LIST_HEAD(&acpi_desc
->dimms
);
2768 INIT_LIST_HEAD(&acpi_desc
->list
);
2769 mutex_init(&acpi_desc
->init_mutex
);
2770 INIT_WORK(&acpi_desc
->work
, acpi_nfit_scrub
);
2772 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
2774 static int acpi_nfit_add(struct acpi_device
*adev
)
2776 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2777 struct acpi_nfit_desc
*acpi_desc
;
2778 struct device
*dev
= &adev
->dev
;
2779 struct acpi_table_header
*tbl
;
2780 acpi_status status
= AE_OK
;
2784 status
= acpi_get_table_with_size(ACPI_SIG_NFIT
, 0, &tbl
, &sz
);
2785 if (ACPI_FAILURE(status
)) {
2786 /* This is ok, we could have an nvdimm hotplugged later */
2787 dev_dbg(dev
, "failed to find NFIT at startup\n");
2791 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2794 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
2796 /* Save the acpi header for exporting the revision via sysfs */
2797 acpi_desc
->acpi_header
= *tbl
;
2799 /* Evaluate _FIT and override with that if present */
2800 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2801 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
2802 union acpi_object
*obj
= buf
.pointer
;
2804 if (obj
->type
== ACPI_TYPE_BUFFER
)
2805 rc
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
2806 obj
->buffer
.length
);
2808 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
2809 __func__
, (int) obj
->type
);
2812 /* skip over the lead-in header table */
2813 rc
= acpi_nfit_init(acpi_desc
, (void *) tbl
2814 + sizeof(struct acpi_table_nfit
),
2815 sz
- sizeof(struct acpi_table_nfit
));
2819 static int acpi_nfit_remove(struct acpi_device
*adev
)
2821 /* see acpi_nfit_destruct */
2825 void __acpi_nfit_notify(struct device
*dev
, acpi_handle handle
, u32 event
)
2827 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(dev
);
2828 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2829 union acpi_object
*obj
;
2833 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
2835 if (event
!= NFIT_NOTIFY_UPDATE
)
2839 /* dev->driver may be null if we're being removed */
2840 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
2845 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2848 acpi_nfit_desc_init(acpi_desc
, dev
);
2851 * Finish previous registration before considering new
2854 flush_workqueue(nfit_wq
);
2858 status
= acpi_evaluate_object(handle
, "_FIT", NULL
, &buf
);
2859 if (ACPI_FAILURE(status
)) {
2860 dev_err(dev
, "failed to evaluate _FIT\n");
2865 if (obj
->type
== ACPI_TYPE_BUFFER
) {
2866 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
2867 obj
->buffer
.length
);
2869 dev_err(dev
, "failed to merge updated NFIT\n");
2871 dev_err(dev
, "Invalid _FIT\n");
2874 EXPORT_SYMBOL_GPL(__acpi_nfit_notify
);
2876 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
2878 device_lock(&adev
->dev
);
2879 __acpi_nfit_notify(&adev
->dev
, adev
->handle
, event
);
2880 device_unlock(&adev
->dev
);
2883 static const struct acpi_device_id acpi_nfit_ids
[] = {
2887 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
2889 static struct acpi_driver acpi_nfit_driver
= {
2890 .name
= KBUILD_MODNAME
,
2891 .ids
= acpi_nfit_ids
,
2893 .add
= acpi_nfit_add
,
2894 .remove
= acpi_nfit_remove
,
2895 .notify
= acpi_nfit_notify
,
2899 static __init
int nfit_init(void)
2901 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
2902 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
2903 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
2904 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
2905 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
2906 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
2907 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
2909 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
2910 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
2911 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
2912 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
2913 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
2914 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
2915 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
2916 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
2917 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
2918 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
2919 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
2920 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
2921 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT
, nfit_uuid
[NFIT_DEV_DIMM_N_MSFT
]);
2923 nfit_wq
= create_singlethread_workqueue("nfit");
2927 nfit_mce_register();
2929 return acpi_bus_register_driver(&acpi_nfit_driver
);
2932 static __exit
void nfit_exit(void)
2934 nfit_mce_unregister();
2935 acpi_bus_unregister_driver(&acpi_nfit_driver
);
2936 destroy_workqueue(nfit_wq
);
2937 WARN_ON(!list_empty(&acpi_descs
));
2940 module_init(nfit_init
);
2941 module_exit(nfit_exit
);
2942 MODULE_LICENSE("GPL v2");
2943 MODULE_AUTHOR("Intel Corporation");