2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/pmem.h>
26 #include <asm/cacheflush.h>
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
33 #include <linux/io-64-nonatomic-hi-lo.h>
35 static bool force_enable_dimms
;
36 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
37 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
39 static unsigned int scrub_timeout
= NFIT_ARS_TIMEOUT
;
40 module_param(scrub_timeout
, uint
, S_IRUGO
|S_IWUSR
);
41 MODULE_PARM_DESC(scrub_timeout
, "Initial scrub timeout in seconds");
43 /* after three payloads of overflow, it's dead jim */
44 static unsigned int scrub_overflow_abort
= 3;
45 module_param(scrub_overflow_abort
, uint
, S_IRUGO
|S_IWUSR
);
46 MODULE_PARM_DESC(scrub_overflow_abort
,
47 "Number of times we overflow ARS results before abort");
49 static bool disable_vendor_specific
;
50 module_param(disable_vendor_specific
, bool, S_IRUGO
);
51 MODULE_PARM_DESC(disable_vendor_specific
,
52 "Limit commands to the publicly specified set\n");
54 LIST_HEAD(acpi_descs
);
55 DEFINE_MUTEX(acpi_desc_lock
);
57 static struct workqueue_struct
*nfit_wq
;
59 struct nfit_table_prev
{
60 struct list_head spas
;
61 struct list_head memdevs
;
62 struct list_head dcrs
;
63 struct list_head bdws
;
64 struct list_head idts
;
65 struct list_head flushes
;
68 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
70 const u8
*to_nfit_uuid(enum nfit_uuids id
)
74 EXPORT_SYMBOL(to_nfit_uuid
);
76 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
77 struct nvdimm_bus_descriptor
*nd_desc
)
79 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
82 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
84 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
87 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
90 if (!nd_desc
->provider_name
91 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
94 return to_acpi_device(acpi_desc
->dev
);
97 static int xlat_status(void *buf
, unsigned int cmd
)
99 struct nd_cmd_clear_error
*clear_err
;
100 struct nd_cmd_ars_status
*ars_status
;
101 struct nd_cmd_ars_start
*ars_start
;
102 struct nd_cmd_ars_cap
*ars_cap
;
108 if ((ars_cap
->status
& 0xffff) == NFIT_ARS_CAP_NONE
)
112 if (ars_cap
->status
& 0xffff)
115 /* No supported scan types for this range */
116 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
117 if ((ars_cap
->status
>> 16 & flags
) == 0)
120 case ND_CMD_ARS_START
:
122 /* ARS is in progress */
123 if ((ars_start
->status
& 0xffff) == NFIT_ARS_START_BUSY
)
127 if (ars_start
->status
& 0xffff)
130 case ND_CMD_ARS_STATUS
:
133 if (ars_status
->status
& 0xffff)
135 /* Check extended status (Upper two bytes) */
136 if (ars_status
->status
== NFIT_ARS_STATUS_DONE
)
139 /* ARS is in progress */
140 if (ars_status
->status
== NFIT_ARS_STATUS_BUSY
)
143 /* No ARS performed for the current boot */
144 if (ars_status
->status
== NFIT_ARS_STATUS_NONE
)
148 * ARS interrupted, either we overflowed or some other
149 * agent wants the scan to stop. If we didn't overflow
150 * then just continue with the returned results.
152 if (ars_status
->status
== NFIT_ARS_STATUS_INTR
) {
153 if (ars_status
->flags
& NFIT_ARS_F_OVERFLOW
)
159 if (ars_status
->status
>> 16)
162 case ND_CMD_CLEAR_ERROR
:
164 if (clear_err
->status
& 0xffff)
166 if (!clear_err
->cleared
)
168 if (clear_err
->length
> clear_err
->cleared
)
169 return clear_err
->cleared
;
178 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
179 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
180 unsigned int buf_len
, int *cmd_rc
)
182 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
183 union acpi_object in_obj
, in_buf
, *out_obj
;
184 const struct nd_cmd_desc
*desc
= NULL
;
185 struct device
*dev
= acpi_desc
->dev
;
186 struct nd_cmd_pkg
*call_pkg
= NULL
;
187 const char *cmd_name
, *dimm_name
;
188 unsigned long cmd_mask
, dsm_mask
;
196 if (cmd
== ND_CMD_CALL
) {
198 func
= call_pkg
->nd_command
;
202 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
203 struct acpi_device
*adev
= nfit_mem
->adev
;
207 if (call_pkg
&& nfit_mem
->family
!= call_pkg
->nd_family
)
210 dimm_name
= nvdimm_name(nvdimm
);
211 cmd_name
= nvdimm_cmd_name(cmd
);
212 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
213 dsm_mask
= nfit_mem
->dsm_mask
;
214 desc
= nd_cmd_dimm_desc(cmd
);
215 uuid
= to_nfit_uuid(nfit_mem
->family
);
216 handle
= adev
->handle
;
218 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
220 cmd_name
= nvdimm_bus_cmd_name(cmd
);
221 cmd_mask
= nd_desc
->cmd_mask
;
223 desc
= nd_cmd_bus_desc(cmd
);
224 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
225 handle
= adev
->handle
;
229 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
232 if (!test_bit(cmd
, &cmd_mask
) || !test_bit(func
, &dsm_mask
))
235 in_obj
.type
= ACPI_TYPE_PACKAGE
;
236 in_obj
.package
.count
= 1;
237 in_obj
.package
.elements
= &in_buf
;
238 in_buf
.type
= ACPI_TYPE_BUFFER
;
239 in_buf
.buffer
.pointer
= buf
;
240 in_buf
.buffer
.length
= 0;
242 /* libnvdimm has already validated the input envelope */
243 for (i
= 0; i
< desc
->in_num
; i
++)
244 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
248 /* skip over package wrapper */
249 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
250 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
253 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
254 dev_dbg(dev
, "%s:%s cmd: %d: func: %d input length: %d\n",
255 __func__
, dimm_name
, cmd
, func
,
256 in_buf
.buffer
.length
);
257 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
258 in_buf
.buffer
.pointer
,
259 min_t(u32
, 256, in_buf
.buffer
.length
), true);
262 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, func
, &in_obj
);
264 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
270 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
271 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
272 out_obj
->buffer
.pointer
,
273 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
277 * Need to support FW function w/o known size in advance.
278 * Caller can determine required size based upon nd_fw_size.
279 * If we return an error (like elsewhere) then caller wouldn't
280 * be able to rely upon data returned to make calculation.
285 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
286 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
287 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
292 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
293 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
294 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
295 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
296 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
297 out_obj
->buffer
.length
), true);
300 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
301 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
302 (u32
*) out_obj
->buffer
.pointer
);
304 if (offset
+ out_size
> out_obj
->buffer
.length
) {
305 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
306 __func__
, dimm_name
, cmd_name
, i
);
310 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
311 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
312 __func__
, dimm_name
, cmd_name
, i
);
316 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
317 out_obj
->buffer
.pointer
+ offset
, out_size
);
320 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
323 * status valid, return the number of bytes left
324 * unfilled in the output buffer
326 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
328 *cmd_rc
= xlat_status(buf
, cmd
);
330 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
331 __func__
, dimm_name
, cmd_name
, buf_len
,
338 *cmd_rc
= xlat_status(buf
, cmd
);
347 static const char *spa_type_name(u16 type
)
349 static const char *to_name
[] = {
350 [NFIT_SPA_VOLATILE
] = "volatile",
351 [NFIT_SPA_PM
] = "pmem",
352 [NFIT_SPA_DCR
] = "dimm-control-region",
353 [NFIT_SPA_BDW
] = "block-data-window",
354 [NFIT_SPA_VDISK
] = "volatile-disk",
355 [NFIT_SPA_VCD
] = "volatile-cd",
356 [NFIT_SPA_PDISK
] = "persistent-disk",
357 [NFIT_SPA_PCD
] = "persistent-cd",
361 if (type
> NFIT_SPA_PCD
)
364 return to_name
[type
];
367 int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
371 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
372 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
377 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
378 struct nfit_table_prev
*prev
,
379 struct acpi_nfit_system_address
*spa
)
381 struct device
*dev
= acpi_desc
->dev
;
382 struct nfit_spa
*nfit_spa
;
384 if (spa
->header
.length
!= sizeof(*spa
))
387 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
388 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
389 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
394 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
) + sizeof(*spa
),
398 INIT_LIST_HEAD(&nfit_spa
->list
);
399 memcpy(nfit_spa
->spa
, spa
, sizeof(*spa
));
400 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
401 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
403 spa_type_name(nfit_spa_type(spa
)));
407 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
408 struct nfit_table_prev
*prev
,
409 struct acpi_nfit_memory_map
*memdev
)
411 struct device
*dev
= acpi_desc
->dev
;
412 struct nfit_memdev
*nfit_memdev
;
414 if (memdev
->header
.length
!= sizeof(*memdev
))
417 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
418 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
419 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
423 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
) + sizeof(*memdev
),
427 INIT_LIST_HEAD(&nfit_memdev
->list
);
428 memcpy(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
));
429 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
430 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
431 __func__
, memdev
->device_handle
, memdev
->range_index
,
432 memdev
->region_index
);
437 * An implementation may provide a truncated control region if no block windows
440 static size_t sizeof_dcr(struct acpi_nfit_control_region
*dcr
)
442 if (dcr
->header
.length
< offsetof(struct acpi_nfit_control_region
,
447 return offsetof(struct acpi_nfit_control_region
, window_size
);
450 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
451 struct nfit_table_prev
*prev
,
452 struct acpi_nfit_control_region
*dcr
)
454 struct device
*dev
= acpi_desc
->dev
;
455 struct nfit_dcr
*nfit_dcr
;
457 if (!sizeof_dcr(dcr
))
460 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
461 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
)) == 0) {
462 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
466 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
) + sizeof(*dcr
),
470 INIT_LIST_HEAD(&nfit_dcr
->list
);
471 memcpy(nfit_dcr
->dcr
, dcr
, sizeof_dcr(dcr
));
472 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
473 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
474 dcr
->region_index
, dcr
->windows
);
478 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
479 struct nfit_table_prev
*prev
,
480 struct acpi_nfit_data_region
*bdw
)
482 struct device
*dev
= acpi_desc
->dev
;
483 struct nfit_bdw
*nfit_bdw
;
485 if (bdw
->header
.length
!= sizeof(*bdw
))
487 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
488 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
489 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
493 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
) + sizeof(*bdw
),
497 INIT_LIST_HEAD(&nfit_bdw
->list
);
498 memcpy(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
));
499 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
500 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
501 bdw
->region_index
, bdw
->windows
);
505 static size_t sizeof_idt(struct acpi_nfit_interleave
*idt
)
507 if (idt
->header
.length
< sizeof(*idt
))
509 return sizeof(*idt
) + sizeof(u32
) * (idt
->line_count
- 1);
512 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
513 struct nfit_table_prev
*prev
,
514 struct acpi_nfit_interleave
*idt
)
516 struct device
*dev
= acpi_desc
->dev
;
517 struct nfit_idt
*nfit_idt
;
519 if (!sizeof_idt(idt
))
522 list_for_each_entry(nfit_idt
, &prev
->idts
, list
) {
523 if (sizeof_idt(nfit_idt
->idt
) != sizeof_idt(idt
))
526 if (memcmp(nfit_idt
->idt
, idt
, sizeof_idt(idt
)) == 0) {
527 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
532 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
) + sizeof_idt(idt
),
536 INIT_LIST_HEAD(&nfit_idt
->list
);
537 memcpy(nfit_idt
->idt
, idt
, sizeof_idt(idt
));
538 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
539 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
540 idt
->interleave_index
, idt
->line_count
);
544 static size_t sizeof_flush(struct acpi_nfit_flush_address
*flush
)
546 if (flush
->header
.length
< sizeof(*flush
))
548 return sizeof(*flush
) + sizeof(u64
) * (flush
->hint_count
- 1);
551 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
552 struct nfit_table_prev
*prev
,
553 struct acpi_nfit_flush_address
*flush
)
555 struct device
*dev
= acpi_desc
->dev
;
556 struct nfit_flush
*nfit_flush
;
558 if (!sizeof_flush(flush
))
561 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
) {
562 if (sizeof_flush(nfit_flush
->flush
) != sizeof_flush(flush
))
565 if (memcmp(nfit_flush
->flush
, flush
,
566 sizeof_flush(flush
)) == 0) {
567 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
572 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
)
573 + sizeof_flush(flush
), GFP_KERNEL
);
576 INIT_LIST_HEAD(&nfit_flush
->list
);
577 memcpy(nfit_flush
->flush
, flush
, sizeof_flush(flush
));
578 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
579 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
580 flush
->device_handle
, flush
->hint_count
);
584 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
585 struct nfit_table_prev
*prev
, void *table
, const void *end
)
587 struct device
*dev
= acpi_desc
->dev
;
588 struct acpi_nfit_header
*hdr
;
589 void *err
= ERR_PTR(-ENOMEM
);
596 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
602 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
603 if (!add_spa(acpi_desc
, prev
, table
))
606 case ACPI_NFIT_TYPE_MEMORY_MAP
:
607 if (!add_memdev(acpi_desc
, prev
, table
))
610 case ACPI_NFIT_TYPE_CONTROL_REGION
:
611 if (!add_dcr(acpi_desc
, prev
, table
))
614 case ACPI_NFIT_TYPE_DATA_REGION
:
615 if (!add_bdw(acpi_desc
, prev
, table
))
618 case ACPI_NFIT_TYPE_INTERLEAVE
:
619 if (!add_idt(acpi_desc
, prev
, table
))
622 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
623 if (!add_flush(acpi_desc
, prev
, table
))
626 case ACPI_NFIT_TYPE_SMBIOS
:
627 dev_dbg(dev
, "%s: smbios\n", __func__
);
630 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
634 return table
+ hdr
->length
;
637 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
638 struct nfit_mem
*nfit_mem
)
640 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
641 u16 dcr
= nfit_mem
->dcr
->region_index
;
642 struct nfit_spa
*nfit_spa
;
644 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
645 u16 range_index
= nfit_spa
->spa
->range_index
;
646 int type
= nfit_spa_type(nfit_spa
->spa
);
647 struct nfit_memdev
*nfit_memdev
;
649 if (type
!= NFIT_SPA_BDW
)
652 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
653 if (nfit_memdev
->memdev
->range_index
!= range_index
)
655 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
657 if (nfit_memdev
->memdev
->region_index
!= dcr
)
660 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
665 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
666 nfit_mem
->spa_dcr
->range_index
);
667 nfit_mem
->bdw
= NULL
;
670 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
671 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
673 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
674 struct nfit_memdev
*nfit_memdev
;
675 struct nfit_bdw
*nfit_bdw
;
676 struct nfit_idt
*nfit_idt
;
677 u16 idt_idx
, range_index
;
679 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
680 if (nfit_bdw
->bdw
->region_index
!= dcr
)
682 nfit_mem
->bdw
= nfit_bdw
->bdw
;
689 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
691 if (!nfit_mem
->spa_bdw
)
694 range_index
= nfit_mem
->spa_bdw
->range_index
;
695 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
696 if (nfit_memdev
->memdev
->range_index
!= range_index
||
697 nfit_memdev
->memdev
->region_index
!= dcr
)
699 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
700 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
701 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
702 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
704 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
711 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
712 struct acpi_nfit_system_address
*spa
)
714 struct nfit_mem
*nfit_mem
, *found
;
715 struct nfit_memdev
*nfit_memdev
;
716 int type
= nfit_spa_type(spa
);
726 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
727 struct nfit_flush
*nfit_flush
;
728 struct nfit_dcr
*nfit_dcr
;
732 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
735 dcr
= nfit_memdev
->memdev
->region_index
;
736 device_handle
= nfit_memdev
->memdev
->device_handle
;
737 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
738 if (__to_nfit_memdev(nfit_mem
)->device_handle
747 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
748 sizeof(*nfit_mem
), GFP_KERNEL
);
751 INIT_LIST_HEAD(&nfit_mem
->list
);
752 nfit_mem
->acpi_desc
= acpi_desc
;
753 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
756 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
757 if (nfit_dcr
->dcr
->region_index
!= dcr
)
760 * Record the control region for the dimm. For
761 * the ACPI 6.1 case, where there are separate
762 * control regions for the pmem vs blk
763 * interfaces, be sure to record the extended
767 nfit_mem
->dcr
= nfit_dcr
->dcr
;
768 else if (nfit_mem
->dcr
->windows
== 0
769 && nfit_dcr
->dcr
->windows
)
770 nfit_mem
->dcr
= nfit_dcr
->dcr
;
774 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
775 struct acpi_nfit_flush_address
*flush
;
778 if (nfit_flush
->flush
->device_handle
!= device_handle
)
780 nfit_mem
->nfit_flush
= nfit_flush
;
781 flush
= nfit_flush
->flush
;
782 nfit_mem
->flush_wpq
= devm_kzalloc(acpi_desc
->dev
,
784 * sizeof(struct resource
), GFP_KERNEL
);
785 if (!nfit_mem
->flush_wpq
)
787 for (i
= 0; i
< flush
->hint_count
; i
++) {
788 struct resource
*res
= &nfit_mem
->flush_wpq
[i
];
790 res
->start
= flush
->hint_address
[i
];
791 res
->end
= res
->start
+ 8 - 1;
796 if (dcr
&& !nfit_mem
->dcr
) {
797 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
798 spa
->range_index
, dcr
);
802 if (type
== NFIT_SPA_DCR
) {
803 struct nfit_idt
*nfit_idt
;
806 /* multiple dimms may share a SPA when interleaved */
807 nfit_mem
->spa_dcr
= spa
;
808 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
809 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
810 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
811 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
813 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
816 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
819 * A single dimm may belong to multiple SPA-PM
820 * ranges, record at least one in addition to
823 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
830 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
832 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
833 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
834 u32 handleA
, handleB
;
836 handleA
= __to_nfit_memdev(a
)->device_handle
;
837 handleB
= __to_nfit_memdev(b
)->device_handle
;
838 if (handleA
< handleB
)
840 else if (handleA
> handleB
)
845 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
847 struct nfit_spa
*nfit_spa
;
850 * For each SPA-DCR or SPA-PMEM address range find its
851 * corresponding MEMDEV(s). From each MEMDEV find the
852 * corresponding DCR. Then, if we're operating on a SPA-DCR,
853 * try to find a SPA-BDW and a corresponding BDW that references
854 * the DCR. Throw it all into an nfit_mem object. Note, that
857 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
860 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
865 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
870 static ssize_t
revision_show(struct device
*dev
,
871 struct device_attribute
*attr
, char *buf
)
873 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
874 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
875 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
877 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
879 static DEVICE_ATTR_RO(revision
);
882 * This shows the number of full Address Range Scrubs that have been
883 * completed since driver load time. Userspace can wait on this using
884 * select/poll etc. A '+' at the end indicates an ARS is in progress
886 static ssize_t
scrub_show(struct device
*dev
,
887 struct device_attribute
*attr
, char *buf
)
889 struct nvdimm_bus_descriptor
*nd_desc
;
893 nd_desc
= dev_get_drvdata(dev
);
895 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
897 rc
= sprintf(buf
, "%d%s", acpi_desc
->scrub_count
,
898 (work_busy(&acpi_desc
->work
)) ? "+\n" : "\n");
904 static ssize_t
scrub_store(struct device
*dev
,
905 struct device_attribute
*attr
, const char *buf
, size_t size
)
907 struct nvdimm_bus_descriptor
*nd_desc
;
911 rc
= kstrtol(buf
, 0, &val
);
918 nd_desc
= dev_get_drvdata(dev
);
920 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
922 rc
= acpi_nfit_ars_rescan(acpi_desc
);
929 static DEVICE_ATTR_RW(scrub
);
931 static bool ars_supported(struct nvdimm_bus
*nvdimm_bus
)
933 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
934 const unsigned long mask
= 1 << ND_CMD_ARS_CAP
| 1 << ND_CMD_ARS_START
935 | 1 << ND_CMD_ARS_STATUS
;
937 return (nd_desc
->cmd_mask
& mask
) == mask
;
940 static umode_t
nfit_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
942 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
943 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
945 if (a
== &dev_attr_scrub
.attr
&& !ars_supported(nvdimm_bus
))
950 static struct attribute
*acpi_nfit_attributes
[] = {
951 &dev_attr_revision
.attr
,
952 &dev_attr_scrub
.attr
,
956 static struct attribute_group acpi_nfit_attribute_group
= {
958 .attrs
= acpi_nfit_attributes
,
959 .is_visible
= nfit_visible
,
962 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
963 &nvdimm_bus_attribute_group
,
964 &acpi_nfit_attribute_group
,
968 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
970 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
971 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
973 return __to_nfit_memdev(nfit_mem
);
976 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
978 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
979 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
981 return nfit_mem
->dcr
;
984 static ssize_t
handle_show(struct device
*dev
,
985 struct device_attribute
*attr
, char *buf
)
987 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
989 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
991 static DEVICE_ATTR_RO(handle
);
993 static ssize_t
phys_id_show(struct device
*dev
,
994 struct device_attribute
*attr
, char *buf
)
996 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
998 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
1000 static DEVICE_ATTR_RO(phys_id
);
1002 static ssize_t
vendor_show(struct device
*dev
,
1003 struct device_attribute
*attr
, char *buf
)
1005 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1007 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
1009 static DEVICE_ATTR_RO(vendor
);
1011 static ssize_t
rev_id_show(struct device
*dev
,
1012 struct device_attribute
*attr
, char *buf
)
1014 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1016 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
1018 static DEVICE_ATTR_RO(rev_id
);
1020 static ssize_t
device_show(struct device
*dev
,
1021 struct device_attribute
*attr
, char *buf
)
1023 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1025 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
1027 static DEVICE_ATTR_RO(device
);
1029 static ssize_t
subsystem_vendor_show(struct device
*dev
,
1030 struct device_attribute
*attr
, char *buf
)
1032 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1034 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
1036 static DEVICE_ATTR_RO(subsystem_vendor
);
1038 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
1039 struct device_attribute
*attr
, char *buf
)
1041 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1043 return sprintf(buf
, "0x%04x\n",
1044 be16_to_cpu(dcr
->subsystem_revision_id
));
1046 static DEVICE_ATTR_RO(subsystem_rev_id
);
1048 static ssize_t
subsystem_device_show(struct device
*dev
,
1049 struct device_attribute
*attr
, char *buf
)
1051 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1053 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
1055 static DEVICE_ATTR_RO(subsystem_device
);
1057 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
1059 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1062 if (nfit_mem
->memdev_pmem
)
1064 if (nfit_mem
->memdev_bdw
)
1069 static ssize_t
format_show(struct device
*dev
,
1070 struct device_attribute
*attr
, char *buf
)
1072 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1074 return sprintf(buf
, "0x%04x\n", le16_to_cpu(dcr
->code
));
1076 static DEVICE_ATTR_RO(format
);
1078 static ssize_t
format1_show(struct device
*dev
,
1079 struct device_attribute
*attr
, char *buf
)
1082 ssize_t rc
= -ENXIO
;
1083 struct nfit_mem
*nfit_mem
;
1084 struct nfit_memdev
*nfit_memdev
;
1085 struct acpi_nfit_desc
*acpi_desc
;
1086 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1087 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1089 nfit_mem
= nvdimm_provider_data(nvdimm
);
1090 acpi_desc
= nfit_mem
->acpi_desc
;
1091 handle
= to_nfit_memdev(dev
)->device_handle
;
1093 /* assumes DIMMs have at most 2 published interface codes */
1094 mutex_lock(&acpi_desc
->init_mutex
);
1095 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1096 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1097 struct nfit_dcr
*nfit_dcr
;
1099 if (memdev
->device_handle
!= handle
)
1102 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
1103 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
1105 if (nfit_dcr
->dcr
->code
== dcr
->code
)
1107 rc
= sprintf(buf
, "0x%04x\n",
1108 le16_to_cpu(nfit_dcr
->dcr
->code
));
1114 mutex_unlock(&acpi_desc
->init_mutex
);
1117 static DEVICE_ATTR_RO(format1
);
1119 static ssize_t
formats_show(struct device
*dev
,
1120 struct device_attribute
*attr
, char *buf
)
1122 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1124 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
1126 static DEVICE_ATTR_RO(formats
);
1128 static ssize_t
serial_show(struct device
*dev
,
1129 struct device_attribute
*attr
, char *buf
)
1131 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1133 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
1135 static DEVICE_ATTR_RO(serial
);
1137 static ssize_t
family_show(struct device
*dev
,
1138 struct device_attribute
*attr
, char *buf
)
1140 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1141 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1143 if (nfit_mem
->family
< 0)
1145 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1147 static DEVICE_ATTR_RO(family
);
1149 static ssize_t
dsm_mask_show(struct device
*dev
,
1150 struct device_attribute
*attr
, char *buf
)
1152 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1153 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1155 if (nfit_mem
->family
< 0)
1157 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1159 static DEVICE_ATTR_RO(dsm_mask
);
1161 static ssize_t
flags_show(struct device
*dev
,
1162 struct device_attribute
*attr
, char *buf
)
1164 u16 flags
= to_nfit_memdev(dev
)->flags
;
1166 return sprintf(buf
, "%s%s%s%s%s\n",
1167 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1168 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1169 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1170 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1171 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
1173 static DEVICE_ATTR_RO(flags
);
1175 static ssize_t
id_show(struct device
*dev
,
1176 struct device_attribute
*attr
, char *buf
)
1178 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1180 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1181 return sprintf(buf
, "%04x-%02x-%04x-%08x\n",
1182 be16_to_cpu(dcr
->vendor_id
),
1183 dcr
->manufacturing_location
,
1184 be16_to_cpu(dcr
->manufacturing_date
),
1185 be32_to_cpu(dcr
->serial_number
));
1187 return sprintf(buf
, "%04x-%08x\n",
1188 be16_to_cpu(dcr
->vendor_id
),
1189 be32_to_cpu(dcr
->serial_number
));
1191 static DEVICE_ATTR_RO(id
);
1193 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1194 &dev_attr_handle
.attr
,
1195 &dev_attr_phys_id
.attr
,
1196 &dev_attr_vendor
.attr
,
1197 &dev_attr_device
.attr
,
1198 &dev_attr_rev_id
.attr
,
1199 &dev_attr_subsystem_vendor
.attr
,
1200 &dev_attr_subsystem_device
.attr
,
1201 &dev_attr_subsystem_rev_id
.attr
,
1202 &dev_attr_format
.attr
,
1203 &dev_attr_formats
.attr
,
1204 &dev_attr_format1
.attr
,
1205 &dev_attr_serial
.attr
,
1206 &dev_attr_flags
.attr
,
1208 &dev_attr_family
.attr
,
1209 &dev_attr_dsm_mask
.attr
,
1213 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1214 struct attribute
*a
, int n
)
1216 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1217 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1219 if (!to_nfit_dcr(dev
))
1221 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1226 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
1228 .attrs
= acpi_nfit_dimm_attributes
,
1229 .is_visible
= acpi_nfit_dimm_attr_visible
,
1232 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1233 &nvdimm_attribute_group
,
1234 &nd_device_attribute_group
,
1235 &acpi_nfit_dimm_attribute_group
,
1239 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1242 struct nfit_mem
*nfit_mem
;
1244 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1245 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1246 return nfit_mem
->nvdimm
;
1251 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1252 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1254 struct acpi_device
*adev
, *adev_dimm
;
1255 struct device
*dev
= acpi_desc
->dev
;
1256 unsigned long dsm_mask
;
1260 /* nfit test assumes 1:1 relationship between commands and dsms */
1261 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1262 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1263 adev
= to_acpi_dev(acpi_desc
);
1267 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1268 nfit_mem
->adev
= adev_dimm
;
1270 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1272 return force_enable_dimms
? 0 : -ENODEV
;
1276 * Until standardization materializes we need to consider 4
1277 * different command sets. Note, that checking for function0 (bit0)
1278 * tells us if any commands are reachable through this uuid.
1280 for (i
= NVDIMM_FAMILY_INTEL
; i
<= NVDIMM_FAMILY_MSFT
; i
++)
1281 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1284 /* limit the supported commands to those that are publicly documented */
1285 nfit_mem
->family
= i
;
1286 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1288 if (disable_vendor_specific
)
1289 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1290 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
) {
1291 dsm_mask
= 0x1c3c76;
1292 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1294 if (disable_vendor_specific
)
1295 dsm_mask
&= ~(1 << 8);
1296 } else if (nfit_mem
->family
== NVDIMM_FAMILY_MSFT
) {
1297 dsm_mask
= 0xffffffff;
1299 dev_dbg(dev
, "unknown dimm command family\n");
1300 nfit_mem
->family
= -1;
1301 /* DSMs are optional, continue loading the driver... */
1305 uuid
= to_nfit_uuid(nfit_mem
->family
);
1306 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1307 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
1308 set_bit(i
, &nfit_mem
->dsm_mask
);
1313 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
1315 struct nfit_mem
*nfit_mem
;
1318 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1319 struct acpi_nfit_flush_address
*flush
;
1320 unsigned long flags
= 0, cmd_mask
;
1321 struct nvdimm
*nvdimm
;
1326 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
1327 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
1333 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
1334 flags
|= NDD_ALIASING
;
1336 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1337 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
1338 flags
|= NDD_UNARMED
;
1340 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
1345 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1346 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1347 * userspace interface.
1349 cmd_mask
= 1UL << ND_CMD_CALL
;
1350 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
1351 cmd_mask
|= nfit_mem
->dsm_mask
;
1353 flush
= nfit_mem
->nfit_flush
? nfit_mem
->nfit_flush
->flush
1355 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
1356 acpi_nfit_dimm_attribute_groups
,
1357 flags
, cmd_mask
, flush
? flush
->hint_count
: 0,
1358 nfit_mem
->flush_wpq
);
1362 nfit_mem
->nvdimm
= nvdimm
;
1365 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
1368 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
1369 nvdimm_name(nvdimm
),
1370 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
1371 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
1372 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
1373 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
1377 return nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
1380 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
1382 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1383 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
1384 struct acpi_device
*adev
;
1387 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
1388 adev
= to_acpi_dev(acpi_desc
);
1392 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
1393 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
1394 set_bit(i
, &nd_desc
->cmd_mask
);
1397 static ssize_t
range_index_show(struct device
*dev
,
1398 struct device_attribute
*attr
, char *buf
)
1400 struct nd_region
*nd_region
= to_nd_region(dev
);
1401 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
1403 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
1405 static DEVICE_ATTR_RO(range_index
);
1407 static struct attribute
*acpi_nfit_region_attributes
[] = {
1408 &dev_attr_range_index
.attr
,
1412 static struct attribute_group acpi_nfit_region_attribute_group
= {
1414 .attrs
= acpi_nfit_region_attributes
,
1417 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
1418 &nd_region_attribute_group
,
1419 &nd_mapping_attribute_group
,
1420 &nd_device_attribute_group
,
1421 &nd_numa_attribute_group
,
1422 &acpi_nfit_region_attribute_group
,
1426 /* enough info to uniquely specify an interleave set */
1427 struct nfit_set_info
{
1428 struct nfit_set_info_map
{
1435 static size_t sizeof_nfit_set_info(int num_mappings
)
1437 return sizeof(struct nfit_set_info
)
1438 + num_mappings
* sizeof(struct nfit_set_info_map
);
1441 static int cmp_map(const void *m0
, const void *m1
)
1443 const struct nfit_set_info_map
*map0
= m0
;
1444 const struct nfit_set_info_map
*map1
= m1
;
1446 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
1450 /* Retrieve the nth entry referencing this spa */
1451 static struct acpi_nfit_memory_map
*memdev_from_spa(
1452 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
1454 struct nfit_memdev
*nfit_memdev
;
1456 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
1457 if (nfit_memdev
->memdev
->range_index
== range_index
)
1459 return nfit_memdev
->memdev
;
1463 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
1464 struct nd_region_desc
*ndr_desc
,
1465 struct acpi_nfit_system_address
*spa
)
1467 int i
, spa_type
= nfit_spa_type(spa
);
1468 struct device
*dev
= acpi_desc
->dev
;
1469 struct nd_interleave_set
*nd_set
;
1470 u16 nr
= ndr_desc
->num_mappings
;
1471 struct nfit_set_info
*info
;
1473 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1478 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1482 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1485 for (i
= 0; i
< nr
; i
++) {
1486 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
1487 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1488 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1489 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1490 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1491 spa
->range_index
, i
);
1493 if (!memdev
|| !nfit_mem
->dcr
) {
1494 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1498 map
->region_offset
= memdev
->region_offset
;
1499 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1502 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1504 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1505 ndr_desc
->nd_set
= nd_set
;
1506 devm_kfree(dev
, info
);
1511 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1513 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1514 u32 sub_line_offset
, line_index
, line_offset
;
1515 u64 line_no
, table_skip_count
, table_offset
;
1517 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1518 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1519 line_offset
= idt
->line_offset
[line_index
]
1521 table_offset
= table_skip_count
* mmio
->table_size
;
1523 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1526 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1528 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1529 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1530 const u32 STATUS_MASK
= 0x80000037;
1532 if (mmio
->num_lines
)
1533 offset
= to_interleave_offset(offset
, mmio
);
1535 return readl(mmio
->addr
.base
+ offset
) & STATUS_MASK
;
1538 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1539 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1542 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1545 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1547 BCW_LEN_MASK
= (1ULL << 8) - 1,
1551 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1552 len
= len
>> L1_CACHE_SHIFT
;
1553 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1554 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1556 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1557 if (mmio
->num_lines
)
1558 offset
= to_interleave_offset(offset
, mmio
);
1560 writeq(cmd
, mmio
->addr
.base
+ offset
);
1561 nvdimm_flush(nfit_blk
->nd_region
);
1563 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
1564 readq(mmio
->addr
.base
+ offset
);
1567 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1568 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1571 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1572 unsigned int copied
= 0;
1576 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1577 + lane
* mmio
->size
;
1578 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1583 if (mmio
->num_lines
) {
1586 offset
= to_interleave_offset(base_offset
+ copied
,
1588 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1589 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1591 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1596 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1599 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
1600 mmio_flush_range((void __force
*)
1601 mmio
->addr
.aperture
+ offset
, c
);
1603 memcpy_from_pmem(iobuf
+ copied
,
1604 mmio
->addr
.aperture
+ offset
, c
);
1612 nvdimm_flush(nfit_blk
->nd_region
);
1614 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1618 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1619 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1621 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1622 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1623 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1624 unsigned int lane
, copied
= 0;
1627 lane
= nd_region_acquire_lane(nd_region
);
1629 u64 c
= min(len
, mmio
->size
);
1631 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1632 iobuf
+ copied
, c
, rw
, lane
);
1639 nd_region_release_lane(nd_region
, lane
);
1644 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1645 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1648 mmio
->num_lines
= idt
->line_count
;
1649 mmio
->line_size
= idt
->line_size
;
1650 if (interleave_ways
== 0)
1652 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1659 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1660 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1662 struct nd_cmd_dimm_flags flags
;
1665 memset(&flags
, 0, sizeof(flags
));
1666 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1667 sizeof(flags
), NULL
);
1669 if (rc
>= 0 && flags
.status
== 0)
1670 nfit_blk
->dimm_flags
= flags
.flags
;
1671 else if (rc
== -ENOTTY
) {
1672 /* fall back to a conservative default */
1673 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
1681 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1684 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1685 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1686 struct nfit_blk_mmio
*mmio
;
1687 struct nfit_blk
*nfit_blk
;
1688 struct nfit_mem
*nfit_mem
;
1689 struct nvdimm
*nvdimm
;
1692 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1693 nfit_mem
= nvdimm_provider_data(nvdimm
);
1694 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1695 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1696 nfit_mem
? "" : " nfit_mem",
1697 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1698 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1702 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1705 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1706 nfit_blk
->nd_region
= to_nd_region(dev
);
1708 /* map block aperture memory */
1709 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1710 mmio
= &nfit_blk
->mmio
[BDW
];
1711 mmio
->addr
.base
= devm_nvdimm_memremap(dev
, nfit_mem
->spa_bdw
->address
,
1712 nfit_mem
->spa_bdw
->length
, ARCH_MEMREMAP_PMEM
);
1713 if (!mmio
->addr
.base
) {
1714 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1715 nvdimm_name(nvdimm
));
1718 mmio
->size
= nfit_mem
->bdw
->size
;
1719 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1720 mmio
->idt
= nfit_mem
->idt_bdw
;
1721 mmio
->spa
= nfit_mem
->spa_bdw
;
1722 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1723 nfit_mem
->memdev_bdw
->interleave_ways
);
1725 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1726 __func__
, nvdimm_name(nvdimm
));
1730 /* map block control memory */
1731 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1732 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1733 mmio
= &nfit_blk
->mmio
[DCR
];
1734 mmio
->addr
.base
= devm_nvdimm_ioremap(dev
, nfit_mem
->spa_dcr
->address
,
1735 nfit_mem
->spa_dcr
->length
);
1736 if (!mmio
->addr
.base
) {
1737 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1738 nvdimm_name(nvdimm
));
1741 mmio
->size
= nfit_mem
->dcr
->window_size
;
1742 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1743 mmio
->idt
= nfit_mem
->idt_dcr
;
1744 mmio
->spa
= nfit_mem
->spa_dcr
;
1745 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1746 nfit_mem
->memdev_dcr
->interleave_ways
);
1748 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1749 __func__
, nvdimm_name(nvdimm
));
1753 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1755 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1756 __func__
, nvdimm_name(nvdimm
));
1760 if (nvdimm_has_flush(nfit_blk
->nd_region
) < 0)
1761 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1763 if (mmio
->line_size
== 0)
1766 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1767 + 8 > mmio
->line_size
) {
1768 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1770 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1771 + 8 > mmio
->line_size
) {
1772 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1779 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
1780 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
1782 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1783 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1786 cmd
->address
= spa
->address
;
1787 cmd
->length
= spa
->length
;
1788 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
1789 sizeof(*cmd
), &cmd_rc
);
1795 static int ars_start(struct acpi_nfit_desc
*acpi_desc
, struct nfit_spa
*nfit_spa
)
1799 struct nd_cmd_ars_start ars_start
;
1800 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1801 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1803 memset(&ars_start
, 0, sizeof(ars_start
));
1804 ars_start
.address
= spa
->address
;
1805 ars_start
.length
= spa
->length
;
1806 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
1807 ars_start
.type
= ND_ARS_PERSISTENT
;
1808 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
1809 ars_start
.type
= ND_ARS_VOLATILE
;
1813 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1814 sizeof(ars_start
), &cmd_rc
);
1821 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
1824 struct nd_cmd_ars_start ars_start
;
1825 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1826 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1828 memset(&ars_start
, 0, sizeof(ars_start
));
1829 ars_start
.address
= ars_status
->restart_address
;
1830 ars_start
.length
= ars_status
->restart_length
;
1831 ars_start
.type
= ars_status
->type
;
1832 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1833 sizeof(ars_start
), &cmd_rc
);
1839 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
1841 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1842 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1845 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
1846 acpi_desc
->ars_status_size
, &cmd_rc
);
1852 static int ars_status_process_records(struct nvdimm_bus
*nvdimm_bus
,
1853 struct nd_cmd_ars_status
*ars_status
)
1858 for (i
= 0; i
< ars_status
->num_records
; i
++) {
1859 rc
= nvdimm_bus_add_poison(nvdimm_bus
,
1860 ars_status
->records
[i
].err_address
,
1861 ars_status
->records
[i
].length
);
1869 static void acpi_nfit_remove_resource(void *data
)
1871 struct resource
*res
= data
;
1873 remove_resource(res
);
1876 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
1877 struct nd_region_desc
*ndr_desc
)
1879 struct resource
*res
, *nd_res
= ndr_desc
->res
;
1882 /* No operation if the region is already registered as PMEM */
1883 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
1884 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
1885 if (is_pmem
== REGION_INTERSECTS
)
1888 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
1892 res
->name
= "Persistent Memory";
1893 res
->start
= nd_res
->start
;
1894 res
->end
= nd_res
->end
;
1895 res
->flags
= IORESOURCE_MEM
;
1896 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
1898 ret
= insert_resource(&iomem_resource
, res
);
1902 ret
= devm_add_action_or_reset(acpi_desc
->dev
,
1903 acpi_nfit_remove_resource
,
1911 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
1912 struct nd_mapping
*nd_mapping
, struct nd_region_desc
*ndr_desc
,
1913 struct acpi_nfit_memory_map
*memdev
,
1914 struct nfit_spa
*nfit_spa
)
1916 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
1917 memdev
->device_handle
);
1918 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1919 struct nd_blk_region_desc
*ndbr_desc
;
1920 struct nfit_mem
*nfit_mem
;
1924 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
1925 spa
->range_index
, memdev
->device_handle
);
1929 nd_mapping
->nvdimm
= nvdimm
;
1930 switch (nfit_spa_type(spa
)) {
1932 case NFIT_SPA_VOLATILE
:
1933 nd_mapping
->start
= memdev
->address
;
1934 nd_mapping
->size
= memdev
->region_size
;
1937 nfit_mem
= nvdimm_provider_data(nvdimm
);
1938 if (!nfit_mem
|| !nfit_mem
->bdw
) {
1939 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
1940 spa
->range_index
, nvdimm_name(nvdimm
));
1942 nd_mapping
->size
= nfit_mem
->bdw
->capacity
;
1943 nd_mapping
->start
= nfit_mem
->bdw
->start_address
;
1944 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
1948 ndr_desc
->nd_mapping
= nd_mapping
;
1949 ndr_desc
->num_mappings
= blk_valid
;
1950 ndbr_desc
= to_blk_region_desc(ndr_desc
);
1951 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
1952 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
1953 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
1955 if (!nfit_spa
->nd_region
)
1963 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address
*spa
)
1965 return (nfit_spa_type(spa
) == NFIT_SPA_VDISK
||
1966 nfit_spa_type(spa
) == NFIT_SPA_VCD
||
1967 nfit_spa_type(spa
) == NFIT_SPA_PDISK
||
1968 nfit_spa_type(spa
) == NFIT_SPA_PCD
);
1971 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
1972 struct nfit_spa
*nfit_spa
)
1974 static struct nd_mapping nd_mappings
[ND_MAX_MAPPINGS
];
1975 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1976 struct nd_blk_region_desc ndbr_desc
;
1977 struct nd_region_desc
*ndr_desc
;
1978 struct nfit_memdev
*nfit_memdev
;
1979 struct nvdimm_bus
*nvdimm_bus
;
1980 struct resource res
;
1983 if (nfit_spa
->nd_region
)
1986 if (spa
->range_index
== 0 && !nfit_spa_is_virtual(spa
)) {
1987 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
1992 memset(&res
, 0, sizeof(res
));
1993 memset(&nd_mappings
, 0, sizeof(nd_mappings
));
1994 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
1995 res
.start
= spa
->address
;
1996 res
.end
= res
.start
+ spa
->length
- 1;
1997 ndr_desc
= &ndbr_desc
.ndr_desc
;
1998 ndr_desc
->res
= &res
;
1999 ndr_desc
->provider_data
= nfit_spa
;
2000 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2001 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
2002 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2003 spa
->proximity_domain
);
2005 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2007 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2008 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2009 struct nd_mapping
*nd_mapping
;
2011 if (memdev
->range_index
!= spa
->range_index
)
2013 if (count
>= ND_MAX_MAPPINGS
) {
2014 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2015 spa
->range_index
, ND_MAX_MAPPINGS
);
2018 nd_mapping
= &nd_mappings
[count
++];
2019 rc
= acpi_nfit_init_mapping(acpi_desc
, nd_mapping
, ndr_desc
,
2025 ndr_desc
->nd_mapping
= nd_mappings
;
2026 ndr_desc
->num_mappings
= count
;
2027 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2031 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2032 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2033 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2035 dev_warn(acpi_desc
->dev
,
2036 "failed to insert pmem resource to iomem: %d\n",
2041 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2043 if (!nfit_spa
->nd_region
)
2045 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
2046 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
2048 if (!nfit_spa
->nd_region
)
2050 } else if (nfit_spa_is_virtual(spa
)) {
2051 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2053 if (!nfit_spa
->nd_region
)
2059 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
2060 nfit_spa
->spa
->range_index
);
2064 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
,
2067 struct device
*dev
= acpi_desc
->dev
;
2068 struct nd_cmd_ars_status
*ars_status
;
2070 if (acpi_desc
->ars_status
&& acpi_desc
->ars_status_size
>= max_ars
) {
2071 memset(acpi_desc
->ars_status
, 0, acpi_desc
->ars_status_size
);
2075 if (acpi_desc
->ars_status
)
2076 devm_kfree(dev
, acpi_desc
->ars_status
);
2077 acpi_desc
->ars_status
= NULL
;
2078 ars_status
= devm_kzalloc(dev
, max_ars
, GFP_KERNEL
);
2081 acpi_desc
->ars_status
= ars_status
;
2082 acpi_desc
->ars_status_size
= max_ars
;
2086 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
,
2087 struct nfit_spa
*nfit_spa
)
2089 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2092 if (!nfit_spa
->max_ars
) {
2093 struct nd_cmd_ars_cap ars_cap
;
2095 memset(&ars_cap
, 0, sizeof(ars_cap
));
2096 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
2099 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
2100 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
2101 /* check that the supported scrub types match the spa type */
2102 if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
&&
2103 ((ars_cap
.status
>> 16) & ND_ARS_VOLATILE
) == 0)
2105 else if (nfit_spa_type(spa
) == NFIT_SPA_PM
&&
2106 ((ars_cap
.status
>> 16) & ND_ARS_PERSISTENT
) == 0)
2110 if (ars_status_alloc(acpi_desc
, nfit_spa
->max_ars
))
2113 rc
= ars_get_status(acpi_desc
);
2114 if (rc
< 0 && rc
!= -ENOSPC
)
2117 if (ars_status_process_records(acpi_desc
->nvdimm_bus
,
2118 acpi_desc
->ars_status
))
2124 static void acpi_nfit_async_scrub(struct acpi_nfit_desc
*acpi_desc
,
2125 struct nfit_spa
*nfit_spa
)
2127 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2128 unsigned int overflow_retry
= scrub_overflow_abort
;
2129 u64 init_ars_start
= 0, init_ars_len
= 0;
2130 struct device
*dev
= acpi_desc
->dev
;
2131 unsigned int tmo
= scrub_timeout
;
2134 if (!nfit_spa
->ars_required
|| !nfit_spa
->nd_region
)
2137 rc
= ars_start(acpi_desc
, nfit_spa
);
2139 * If we timed out the initial scan we'll still be busy here,
2140 * and will wait another timeout before giving up permanently.
2142 if (rc
< 0 && rc
!= -EBUSY
)
2146 u64 ars_start
, ars_len
;
2148 if (acpi_desc
->cancel
)
2150 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2153 if (rc
== -EBUSY
&& !tmo
) {
2154 dev_warn(dev
, "range %d ars timeout, aborting\n",
2161 * Note, entries may be appended to the list
2162 * while the lock is dropped, but the workqueue
2163 * being active prevents entries being deleted /
2166 mutex_unlock(&acpi_desc
->init_mutex
);
2169 mutex_lock(&acpi_desc
->init_mutex
);
2173 /* we got some results, but there are more pending... */
2174 if (rc
== -ENOSPC
&& overflow_retry
--) {
2175 if (!init_ars_len
) {
2176 init_ars_len
= acpi_desc
->ars_status
->length
;
2177 init_ars_start
= acpi_desc
->ars_status
->address
;
2179 rc
= ars_continue(acpi_desc
);
2183 dev_warn(dev
, "range %d ars continuation failed\n",
2189 ars_start
= init_ars_start
;
2190 ars_len
= init_ars_len
;
2192 ars_start
= acpi_desc
->ars_status
->address
;
2193 ars_len
= acpi_desc
->ars_status
->length
;
2195 dev_dbg(dev
, "spa range: %d ars from %#llx + %#llx complete\n",
2196 spa
->range_index
, ars_start
, ars_len
);
2197 /* notify the region about new poison entries */
2198 nvdimm_region_notify(nfit_spa
->nd_region
,
2199 NVDIMM_REVALIDATE_POISON
);
2204 static void acpi_nfit_scrub(struct work_struct
*work
)
2207 u64 init_scrub_length
= 0;
2208 struct nfit_spa
*nfit_spa
;
2209 u64 init_scrub_address
= 0;
2210 bool init_ars_done
= false;
2211 struct acpi_nfit_desc
*acpi_desc
;
2212 unsigned int tmo
= scrub_timeout
;
2213 unsigned int overflow_retry
= scrub_overflow_abort
;
2215 acpi_desc
= container_of(work
, typeof(*acpi_desc
), work
);
2216 dev
= acpi_desc
->dev
;
2219 * We scrub in 2 phases. The first phase waits for any platform
2220 * firmware initiated scrubs to complete and then we go search for the
2221 * affected spa regions to mark them scanned. In the second phase we
2222 * initiate a directed scrub for every range that was not scrubbed in
2223 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2224 * the first phase, but really only care about running phase 2, where
2225 * regions can be notified of new poison.
2228 /* process platform firmware initiated scrubs */
2230 mutex_lock(&acpi_desc
->init_mutex
);
2231 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2232 struct nd_cmd_ars_status
*ars_status
;
2233 struct acpi_nfit_system_address
*spa
;
2234 u64 ars_start
, ars_len
;
2237 if (acpi_desc
->cancel
)
2240 if (nfit_spa
->nd_region
)
2243 if (init_ars_done
) {
2245 * No need to re-query, we're now just
2246 * reconciling all the ranges covered by the
2251 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2253 if (rc
== -ENOTTY
) {
2254 /* no ars capability, just register spa and move on */
2255 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2259 if (rc
== -EBUSY
&& !tmo
) {
2260 /* fallthrough to directed scrub in phase 2 */
2261 dev_warn(dev
, "timeout awaiting ars results, continuing...\n");
2263 } else if (rc
== -EBUSY
) {
2264 mutex_unlock(&acpi_desc
->init_mutex
);
2270 /* we got some results, but there are more pending... */
2271 if (rc
== -ENOSPC
&& overflow_retry
--) {
2272 ars_status
= acpi_desc
->ars_status
;
2274 * Record the original scrub range, so that we
2275 * can recall all the ranges impacted by the
2278 if (!init_scrub_length
) {
2279 init_scrub_length
= ars_status
->length
;
2280 init_scrub_address
= ars_status
->address
;
2282 rc
= ars_continue(acpi_desc
);
2284 mutex_unlock(&acpi_desc
->init_mutex
);
2291 * Initial scrub failed, we'll give it one more
2297 /* We got some final results, record completed ranges */
2298 ars_status
= acpi_desc
->ars_status
;
2299 if (init_scrub_length
) {
2300 ars_start
= init_scrub_address
;
2301 ars_len
= ars_start
+ init_scrub_length
;
2303 ars_start
= ars_status
->address
;
2304 ars_len
= ars_status
->length
;
2306 spa
= nfit_spa
->spa
;
2308 if (!init_ars_done
) {
2309 init_ars_done
= true;
2310 dev_dbg(dev
, "init scrub %#llx + %#llx complete\n",
2311 ars_start
, ars_len
);
2313 if (ars_start
<= spa
->address
&& ars_start
+ ars_len
2314 >= spa
->address
+ spa
->length
)
2315 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2319 * For all the ranges not covered by an initial scrub we still
2320 * want to see if there are errors, but it's ok to discover them
2323 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2325 * Flag all the ranges that still need scrubbing, but
2326 * register them now to make data available.
2328 if (!nfit_spa
->nd_region
) {
2329 nfit_spa
->ars_required
= 1;
2330 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2334 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2335 acpi_nfit_async_scrub(acpi_desc
, nfit_spa
);
2336 acpi_desc
->scrub_count
++;
2337 if (acpi_desc
->scrub_count_state
)
2338 sysfs_notify_dirent(acpi_desc
->scrub_count_state
);
2339 mutex_unlock(&acpi_desc
->init_mutex
);
2342 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
2344 struct nfit_spa
*nfit_spa
;
2347 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2348 if (nfit_spa_type(nfit_spa
->spa
) == NFIT_SPA_DCR
) {
2349 /* BLK regions don't need to wait for ars results */
2350 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2355 queue_work(nfit_wq
, &acpi_desc
->work
);
2359 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
2360 struct nfit_table_prev
*prev
)
2362 struct device
*dev
= acpi_desc
->dev
;
2364 if (!list_empty(&prev
->spas
) ||
2365 !list_empty(&prev
->memdevs
) ||
2366 !list_empty(&prev
->dcrs
) ||
2367 !list_empty(&prev
->bdws
) ||
2368 !list_empty(&prev
->idts
) ||
2369 !list_empty(&prev
->flushes
)) {
2370 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
2376 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc
*acpi_desc
)
2378 struct device
*dev
= acpi_desc
->dev
;
2379 struct kernfs_node
*nfit
;
2380 struct device
*bus_dev
;
2382 if (!ars_supported(acpi_desc
->nvdimm_bus
))
2385 bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
2386 nfit
= sysfs_get_dirent(bus_dev
->kobj
.sd
, "nfit");
2388 dev_err(dev
, "sysfs_get_dirent 'nfit' failed\n");
2391 acpi_desc
->scrub_count_state
= sysfs_get_dirent(nfit
, "scrub");
2393 if (!acpi_desc
->scrub_count_state
) {
2394 dev_err(dev
, "sysfs_get_dirent 'scrub' failed\n");
2401 static void acpi_nfit_destruct(void *data
)
2403 struct acpi_nfit_desc
*acpi_desc
= data
;
2404 struct device
*bus_dev
= to_nvdimm_bus_dev(acpi_desc
->nvdimm_bus
);
2407 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2410 mutex_lock(&acpi_desc_lock
);
2411 acpi_desc
->cancel
= 1;
2413 * Bounce the nvdimm bus lock to make sure any in-flight
2414 * acpi_nfit_ars_rescan() submissions have had a chance to
2415 * either submit or see ->cancel set.
2417 device_lock(bus_dev
);
2418 device_unlock(bus_dev
);
2420 flush_workqueue(nfit_wq
);
2421 if (acpi_desc
->scrub_count_state
)
2422 sysfs_put(acpi_desc
->scrub_count_state
);
2423 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
2424 acpi_desc
->nvdimm_bus
= NULL
;
2425 list_del(&acpi_desc
->list
);
2426 mutex_unlock(&acpi_desc_lock
);
2429 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, void *data
, acpi_size sz
)
2431 struct device
*dev
= acpi_desc
->dev
;
2432 struct nfit_table_prev prev
;
2436 if (!acpi_desc
->nvdimm_bus
) {
2437 acpi_nfit_init_dsms(acpi_desc
);
2439 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
,
2440 &acpi_desc
->nd_desc
);
2441 if (!acpi_desc
->nvdimm_bus
)
2444 rc
= devm_add_action_or_reset(dev
, acpi_nfit_destruct
,
2449 rc
= acpi_nfit_desc_init_scrub_attr(acpi_desc
);
2453 /* register this acpi_desc for mce notifications */
2454 mutex_lock(&acpi_desc_lock
);
2455 list_add_tail(&acpi_desc
->list
, &acpi_descs
);
2456 mutex_unlock(&acpi_desc_lock
);
2459 mutex_lock(&acpi_desc
->init_mutex
);
2461 INIT_LIST_HEAD(&prev
.spas
);
2462 INIT_LIST_HEAD(&prev
.memdevs
);
2463 INIT_LIST_HEAD(&prev
.dcrs
);
2464 INIT_LIST_HEAD(&prev
.bdws
);
2465 INIT_LIST_HEAD(&prev
.idts
);
2466 INIT_LIST_HEAD(&prev
.flushes
);
2468 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
2469 acpi_desc
->spas
.prev
);
2470 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
2471 acpi_desc
->memdevs
.prev
);
2472 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
2473 acpi_desc
->dcrs
.prev
);
2474 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
2475 acpi_desc
->bdws
.prev
);
2476 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
2477 acpi_desc
->idts
.prev
);
2478 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
2479 acpi_desc
->flushes
.prev
);
2482 while (!IS_ERR_OR_NULL(data
))
2483 data
= add_table(acpi_desc
, &prev
, data
, end
);
2486 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
2492 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
2496 rc
= nfit_mem_init(acpi_desc
);
2500 rc
= acpi_nfit_register_dimms(acpi_desc
);
2504 rc
= acpi_nfit_register_regions(acpi_desc
);
2507 mutex_unlock(&acpi_desc
->init_mutex
);
2510 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
2512 struct acpi_nfit_flush_work
{
2513 struct work_struct work
;
2514 struct completion cmp
;
2517 static void flush_probe(struct work_struct
*work
)
2519 struct acpi_nfit_flush_work
*flush
;
2521 flush
= container_of(work
, typeof(*flush
), work
);
2522 complete(&flush
->cmp
);
2525 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
2527 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2528 struct device
*dev
= acpi_desc
->dev
;
2529 struct acpi_nfit_flush_work flush
;
2531 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2536 * Scrub work could take 10s of seconds, userspace may give up so we
2537 * need to be interruptible while waiting.
2539 INIT_WORK_ONSTACK(&flush
.work
, flush_probe
);
2540 COMPLETION_INITIALIZER_ONSTACK(flush
.cmp
);
2541 queue_work(nfit_wq
, &flush
.work
);
2542 return wait_for_completion_interruptible(&flush
.cmp
);
2545 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
2546 struct nvdimm
*nvdimm
, unsigned int cmd
)
2548 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2552 if (cmd
!= ND_CMD_ARS_START
)
2556 * The kernel and userspace may race to initiate a scrub, but
2557 * the scrub thread is prepared to lose that initial race. It
2558 * just needs guarantees that any ars it initiates are not
2559 * interrupted by any intervening start reqeusts from userspace.
2561 if (work_busy(&acpi_desc
->work
))
2567 int acpi_nfit_ars_rescan(struct acpi_nfit_desc
*acpi_desc
)
2569 struct device
*dev
= acpi_desc
->dev
;
2570 struct nfit_spa
*nfit_spa
;
2572 if (work_busy(&acpi_desc
->work
))
2575 if (acpi_desc
->cancel
)
2578 mutex_lock(&acpi_desc
->init_mutex
);
2579 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2580 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2582 if (nfit_spa_type(spa
) != NFIT_SPA_PM
)
2585 nfit_spa
->ars_required
= 1;
2587 queue_work(nfit_wq
, &acpi_desc
->work
);
2588 dev_dbg(dev
, "%s: ars_scan triggered\n", __func__
);
2589 mutex_unlock(&acpi_desc
->init_mutex
);
2594 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
2596 struct nvdimm_bus_descriptor
*nd_desc
;
2598 dev_set_drvdata(dev
, acpi_desc
);
2599 acpi_desc
->dev
= dev
;
2600 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
2601 nd_desc
= &acpi_desc
->nd_desc
;
2602 nd_desc
->provider_name
= "ACPI.NFIT";
2603 nd_desc
->module
= THIS_MODULE
;
2604 nd_desc
->ndctl
= acpi_nfit_ctl
;
2605 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
2606 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
2607 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
2609 INIT_LIST_HEAD(&acpi_desc
->spas
);
2610 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
2611 INIT_LIST_HEAD(&acpi_desc
->bdws
);
2612 INIT_LIST_HEAD(&acpi_desc
->idts
);
2613 INIT_LIST_HEAD(&acpi_desc
->flushes
);
2614 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
2615 INIT_LIST_HEAD(&acpi_desc
->dimms
);
2616 INIT_LIST_HEAD(&acpi_desc
->list
);
2617 mutex_init(&acpi_desc
->init_mutex
);
2618 INIT_WORK(&acpi_desc
->work
, acpi_nfit_scrub
);
2620 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
2622 static int acpi_nfit_add(struct acpi_device
*adev
)
2624 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2625 struct acpi_nfit_desc
*acpi_desc
;
2626 struct device
*dev
= &adev
->dev
;
2627 struct acpi_table_header
*tbl
;
2628 acpi_status status
= AE_OK
;
2632 status
= acpi_get_table_with_size(ACPI_SIG_NFIT
, 0, &tbl
, &sz
);
2633 if (ACPI_FAILURE(status
)) {
2634 /* This is ok, we could have an nvdimm hotplugged later */
2635 dev_dbg(dev
, "failed to find NFIT at startup\n");
2639 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2642 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
2644 /* Save the acpi header for exporting the revision via sysfs */
2645 acpi_desc
->acpi_header
= *tbl
;
2647 /* Evaluate _FIT and override with that if present */
2648 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2649 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
2650 union acpi_object
*obj
= buf
.pointer
;
2652 if (obj
->type
== ACPI_TYPE_BUFFER
)
2653 rc
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
2654 obj
->buffer
.length
);
2656 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
2657 __func__
, (int) obj
->type
);
2660 /* skip over the lead-in header table */
2661 rc
= acpi_nfit_init(acpi_desc
, (void *) tbl
2662 + sizeof(struct acpi_table_nfit
),
2663 sz
- sizeof(struct acpi_table_nfit
));
2667 static int acpi_nfit_remove(struct acpi_device
*adev
)
2669 /* see acpi_nfit_destruct */
2673 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
2675 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
2676 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2677 struct device
*dev
= &adev
->dev
;
2678 union acpi_object
*obj
;
2682 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
2686 /* dev->driver may be null if we're being removed */
2687 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
2692 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2695 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
2698 * Finish previous registration before considering new
2701 flush_workqueue(nfit_wq
);
2705 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2706 if (ACPI_FAILURE(status
)) {
2707 dev_err(dev
, "failed to evaluate _FIT\n");
2712 if (obj
->type
== ACPI_TYPE_BUFFER
) {
2713 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.pointer
,
2714 obj
->buffer
.length
);
2716 dev_err(dev
, "failed to merge updated NFIT\n");
2718 dev_err(dev
, "Invalid _FIT\n");
2725 static const struct acpi_device_id acpi_nfit_ids
[] = {
2729 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
2731 static struct acpi_driver acpi_nfit_driver
= {
2732 .name
= KBUILD_MODNAME
,
2733 .ids
= acpi_nfit_ids
,
2735 .add
= acpi_nfit_add
,
2736 .remove
= acpi_nfit_remove
,
2737 .notify
= acpi_nfit_notify
,
2741 static __init
int nfit_init(void)
2743 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
2744 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
2745 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
2746 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
2747 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
2748 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
2749 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
2751 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
2752 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
2753 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
2754 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
2755 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
2756 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
2757 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
2758 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
2759 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
2760 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
2761 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
2762 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
2763 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT
, nfit_uuid
[NFIT_DEV_DIMM_N_MSFT
]);
2765 nfit_wq
= create_singlethread_workqueue("nfit");
2769 nfit_mce_register();
2771 return acpi_bus_register_driver(&acpi_nfit_driver
);
2774 static __exit
void nfit_exit(void)
2776 nfit_mce_unregister();
2777 acpi_bus_unregister_driver(&acpi_nfit_driver
);
2778 destroy_workqueue(nfit_wq
);
2779 WARN_ON(!list_empty(&acpi_descs
));
2782 module_init(nfit_init
);
2783 module_exit(nfit_exit
);
2784 MODULE_LICENSE("GPL v2");
2785 MODULE_AUTHOR("Intel Corporation");