2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/delay.h>
19 #include <linux/list.h>
20 #include <linux/acpi.h>
21 #include <linux/sort.h>
22 #include <linux/pmem.h>
25 #include <asm/cacheflush.h>
29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
32 #include <linux/io-64-nonatomic-hi-lo.h>
34 static bool force_enable_dimms
;
35 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
36 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
38 static unsigned int scrub_timeout
= NFIT_ARS_TIMEOUT
;
39 module_param(scrub_timeout
, uint
, S_IRUGO
|S_IWUSR
);
40 MODULE_PARM_DESC(scrub_timeout
, "Initial scrub timeout in seconds");
42 /* after three payloads of overflow, it's dead jim */
43 static unsigned int scrub_overflow_abort
= 3;
44 module_param(scrub_overflow_abort
, uint
, S_IRUGO
|S_IWUSR
);
45 MODULE_PARM_DESC(scrub_overflow_abort
,
46 "Number of times we overflow ARS results before abort");
48 static bool disable_vendor_specific
;
49 module_param(disable_vendor_specific
, bool, S_IRUGO
);
50 MODULE_PARM_DESC(disable_vendor_specific
,
51 "Limit commands to the publicly specified set\n");
53 static struct workqueue_struct
*nfit_wq
;
55 struct nfit_table_prev
{
56 struct list_head spas
;
57 struct list_head memdevs
;
58 struct list_head dcrs
;
59 struct list_head bdws
;
60 struct list_head idts
;
61 struct list_head flushes
;
64 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
66 const u8
*to_nfit_uuid(enum nfit_uuids id
)
70 EXPORT_SYMBOL(to_nfit_uuid
);
72 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
73 struct nvdimm_bus_descriptor
*nd_desc
)
75 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
78 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
80 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
83 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
86 if (!nd_desc
->provider_name
87 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
90 return to_acpi_device(acpi_desc
->dev
);
93 static int xlat_status(void *buf
, unsigned int cmd
)
95 struct nd_cmd_clear_error
*clear_err
;
96 struct nd_cmd_ars_status
*ars_status
;
97 struct nd_cmd_ars_start
*ars_start
;
98 struct nd_cmd_ars_cap
*ars_cap
;
104 if ((ars_cap
->status
& 0xffff) == NFIT_ARS_CAP_NONE
)
108 if (ars_cap
->status
& 0xffff)
111 /* No supported scan types for this range */
112 flags
= ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
;
113 if ((ars_cap
->status
>> 16 & flags
) == 0)
116 case ND_CMD_ARS_START
:
118 /* ARS is in progress */
119 if ((ars_start
->status
& 0xffff) == NFIT_ARS_START_BUSY
)
123 if (ars_start
->status
& 0xffff)
126 case ND_CMD_ARS_STATUS
:
129 if (ars_status
->status
& 0xffff)
131 /* Check extended status (Upper two bytes) */
132 if (ars_status
->status
== NFIT_ARS_STATUS_DONE
)
135 /* ARS is in progress */
136 if (ars_status
->status
== NFIT_ARS_STATUS_BUSY
)
139 /* No ARS performed for the current boot */
140 if (ars_status
->status
== NFIT_ARS_STATUS_NONE
)
144 * ARS interrupted, either we overflowed or some other
145 * agent wants the scan to stop. If we didn't overflow
146 * then just continue with the returned results.
148 if (ars_status
->status
== NFIT_ARS_STATUS_INTR
) {
149 if (ars_status
->flags
& NFIT_ARS_F_OVERFLOW
)
155 if (ars_status
->status
>> 16)
158 case ND_CMD_CLEAR_ERROR
:
160 if (clear_err
->status
& 0xffff)
162 if (!clear_err
->cleared
)
164 if (clear_err
->length
> clear_err
->cleared
)
165 return clear_err
->cleared
;
174 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
175 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
176 unsigned int buf_len
, int *cmd_rc
)
178 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
179 union acpi_object in_obj
, in_buf
, *out_obj
;
180 const struct nd_cmd_desc
*desc
= NULL
;
181 struct device
*dev
= acpi_desc
->dev
;
182 struct nd_cmd_pkg
*call_pkg
= NULL
;
183 const char *cmd_name
, *dimm_name
;
184 unsigned long cmd_mask
, dsm_mask
;
192 if (cmd
== ND_CMD_CALL
) {
194 func
= call_pkg
->nd_command
;
198 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
199 struct acpi_device
*adev
= nfit_mem
->adev
;
203 if (call_pkg
&& nfit_mem
->family
!= call_pkg
->nd_family
)
206 dimm_name
= nvdimm_name(nvdimm
);
207 cmd_name
= nvdimm_cmd_name(cmd
);
208 cmd_mask
= nvdimm_cmd_mask(nvdimm
);
209 dsm_mask
= nfit_mem
->dsm_mask
;
210 desc
= nd_cmd_dimm_desc(cmd
);
211 uuid
= to_nfit_uuid(nfit_mem
->family
);
212 handle
= adev
->handle
;
214 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
216 cmd_name
= nvdimm_bus_cmd_name(cmd
);
217 cmd_mask
= nd_desc
->cmd_mask
;
219 desc
= nd_cmd_bus_desc(cmd
);
220 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
221 handle
= adev
->handle
;
225 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
228 if (!test_bit(cmd
, &cmd_mask
) || !test_bit(func
, &dsm_mask
))
231 in_obj
.type
= ACPI_TYPE_PACKAGE
;
232 in_obj
.package
.count
= 1;
233 in_obj
.package
.elements
= &in_buf
;
234 in_buf
.type
= ACPI_TYPE_BUFFER
;
235 in_buf
.buffer
.pointer
= buf
;
236 in_buf
.buffer
.length
= 0;
238 /* libnvdimm has already validated the input envelope */
239 for (i
= 0; i
< desc
->in_num
; i
++)
240 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
244 /* skip over package wrapper */
245 in_buf
.buffer
.pointer
= (void *) &call_pkg
->nd_payload
;
246 in_buf
.buffer
.length
= call_pkg
->nd_size_in
;
249 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
250 dev_dbg(dev
, "%s:%s cmd: %d: func: %d input length: %d\n",
251 __func__
, dimm_name
, cmd
, func
,
252 in_buf
.buffer
.length
);
253 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 4, 4,
254 in_buf
.buffer
.pointer
,
255 min_t(u32
, 256, in_buf
.buffer
.length
), true);
258 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, func
, &in_obj
);
260 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
266 call_pkg
->nd_fw_size
= out_obj
->buffer
.length
;
267 memcpy(call_pkg
->nd_payload
+ call_pkg
->nd_size_in
,
268 out_obj
->buffer
.pointer
,
269 min(call_pkg
->nd_fw_size
, call_pkg
->nd_size_out
));
273 * Need to support FW function w/o known size in advance.
274 * Caller can determine required size based upon nd_fw_size.
275 * If we return an error (like elsewhere) then caller wouldn't
276 * be able to rely upon data returned to make calculation.
281 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
282 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
283 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
288 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
289 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
290 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
291 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
292 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
293 out_obj
->buffer
.length
), true);
296 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
297 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
298 (u32
*) out_obj
->buffer
.pointer
);
300 if (offset
+ out_size
> out_obj
->buffer
.length
) {
301 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
302 __func__
, dimm_name
, cmd_name
, i
);
306 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
307 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
308 __func__
, dimm_name
, cmd_name
, i
);
312 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
313 out_obj
->buffer
.pointer
+ offset
, out_size
);
316 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
319 * status valid, return the number of bytes left
320 * unfilled in the output buffer
322 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
324 *cmd_rc
= xlat_status(buf
, cmd
);
326 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
327 __func__
, dimm_name
, cmd_name
, buf_len
,
334 *cmd_rc
= xlat_status(buf
, cmd
);
343 static const char *spa_type_name(u16 type
)
345 static const char *to_name
[] = {
346 [NFIT_SPA_VOLATILE
] = "volatile",
347 [NFIT_SPA_PM
] = "pmem",
348 [NFIT_SPA_DCR
] = "dimm-control-region",
349 [NFIT_SPA_BDW
] = "block-data-window",
350 [NFIT_SPA_VDISK
] = "volatile-disk",
351 [NFIT_SPA_VCD
] = "volatile-cd",
352 [NFIT_SPA_PDISK
] = "persistent-disk",
353 [NFIT_SPA_PCD
] = "persistent-cd",
357 if (type
> NFIT_SPA_PCD
)
360 return to_name
[type
];
363 static int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
367 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
368 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
373 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
374 struct nfit_table_prev
*prev
,
375 struct acpi_nfit_system_address
*spa
)
377 size_t length
= min_t(size_t, sizeof(*spa
), spa
->header
.length
);
378 struct device
*dev
= acpi_desc
->dev
;
379 struct nfit_spa
*nfit_spa
;
381 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
382 if (memcmp(nfit_spa
->spa
, spa
, length
) == 0) {
383 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
388 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
), GFP_KERNEL
);
391 INIT_LIST_HEAD(&nfit_spa
->list
);
393 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
394 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
396 spa_type_name(nfit_spa_type(spa
)));
400 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
401 struct nfit_table_prev
*prev
,
402 struct acpi_nfit_memory_map
*memdev
)
404 size_t length
= min_t(size_t, sizeof(*memdev
), memdev
->header
.length
);
405 struct device
*dev
= acpi_desc
->dev
;
406 struct nfit_memdev
*nfit_memdev
;
408 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
409 if (memcmp(nfit_memdev
->memdev
, memdev
, length
) == 0) {
410 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
414 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
), GFP_KERNEL
);
417 INIT_LIST_HEAD(&nfit_memdev
->list
);
418 nfit_memdev
->memdev
= memdev
;
419 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
420 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
421 __func__
, memdev
->device_handle
, memdev
->range_index
,
422 memdev
->region_index
);
426 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
427 struct nfit_table_prev
*prev
,
428 struct acpi_nfit_control_region
*dcr
)
430 size_t length
= min_t(size_t, sizeof(*dcr
), dcr
->header
.length
);
431 struct device
*dev
= acpi_desc
->dev
;
432 struct nfit_dcr
*nfit_dcr
;
434 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
435 if (memcmp(nfit_dcr
->dcr
, dcr
, length
) == 0) {
436 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
440 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
), GFP_KERNEL
);
443 INIT_LIST_HEAD(&nfit_dcr
->list
);
445 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
446 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
447 dcr
->region_index
, dcr
->windows
);
451 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
452 struct nfit_table_prev
*prev
,
453 struct acpi_nfit_data_region
*bdw
)
455 size_t length
= min_t(size_t, sizeof(*bdw
), bdw
->header
.length
);
456 struct device
*dev
= acpi_desc
->dev
;
457 struct nfit_bdw
*nfit_bdw
;
459 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
460 if (memcmp(nfit_bdw
->bdw
, bdw
, length
) == 0) {
461 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
465 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
), GFP_KERNEL
);
468 INIT_LIST_HEAD(&nfit_bdw
->list
);
470 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
471 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
472 bdw
->region_index
, bdw
->windows
);
476 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
477 struct nfit_table_prev
*prev
,
478 struct acpi_nfit_interleave
*idt
)
480 size_t length
= min_t(size_t, sizeof(*idt
), idt
->header
.length
);
481 struct device
*dev
= acpi_desc
->dev
;
482 struct nfit_idt
*nfit_idt
;
484 list_for_each_entry(nfit_idt
, &prev
->idts
, list
)
485 if (memcmp(nfit_idt
->idt
, idt
, length
) == 0) {
486 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
490 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
), GFP_KERNEL
);
493 INIT_LIST_HEAD(&nfit_idt
->list
);
495 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
496 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
497 idt
->interleave_index
, idt
->line_count
);
501 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
502 struct nfit_table_prev
*prev
,
503 struct acpi_nfit_flush_address
*flush
)
505 size_t length
= min_t(size_t, sizeof(*flush
), flush
->header
.length
);
506 struct device
*dev
= acpi_desc
->dev
;
507 struct nfit_flush
*nfit_flush
;
509 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
)
510 if (memcmp(nfit_flush
->flush
, flush
, length
) == 0) {
511 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
515 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
), GFP_KERNEL
);
518 INIT_LIST_HEAD(&nfit_flush
->list
);
519 nfit_flush
->flush
= flush
;
520 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
521 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
522 flush
->device_handle
, flush
->hint_count
);
526 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
527 struct nfit_table_prev
*prev
, void *table
, const void *end
)
529 struct device
*dev
= acpi_desc
->dev
;
530 struct acpi_nfit_header
*hdr
;
531 void *err
= ERR_PTR(-ENOMEM
);
538 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
544 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
545 if (!add_spa(acpi_desc
, prev
, table
))
548 case ACPI_NFIT_TYPE_MEMORY_MAP
:
549 if (!add_memdev(acpi_desc
, prev
, table
))
552 case ACPI_NFIT_TYPE_CONTROL_REGION
:
553 if (!add_dcr(acpi_desc
, prev
, table
))
556 case ACPI_NFIT_TYPE_DATA_REGION
:
557 if (!add_bdw(acpi_desc
, prev
, table
))
560 case ACPI_NFIT_TYPE_INTERLEAVE
:
561 if (!add_idt(acpi_desc
, prev
, table
))
564 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
565 if (!add_flush(acpi_desc
, prev
, table
))
568 case ACPI_NFIT_TYPE_SMBIOS
:
569 dev_dbg(dev
, "%s: smbios\n", __func__
);
572 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
576 return table
+ hdr
->length
;
579 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
580 struct nfit_mem
*nfit_mem
)
582 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
583 u16 dcr
= nfit_mem
->dcr
->region_index
;
584 struct nfit_spa
*nfit_spa
;
586 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
587 u16 range_index
= nfit_spa
->spa
->range_index
;
588 int type
= nfit_spa_type(nfit_spa
->spa
);
589 struct nfit_memdev
*nfit_memdev
;
591 if (type
!= NFIT_SPA_BDW
)
594 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
595 if (nfit_memdev
->memdev
->range_index
!= range_index
)
597 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
599 if (nfit_memdev
->memdev
->region_index
!= dcr
)
602 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
607 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
608 nfit_mem
->spa_dcr
->range_index
);
609 nfit_mem
->bdw
= NULL
;
612 static void nfit_mem_init_bdw(struct acpi_nfit_desc
*acpi_desc
,
613 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
615 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
616 struct nfit_memdev
*nfit_memdev
;
617 struct nfit_flush
*nfit_flush
;
618 struct nfit_bdw
*nfit_bdw
;
619 struct nfit_idt
*nfit_idt
;
620 u16 idt_idx
, range_index
;
622 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
623 if (nfit_bdw
->bdw
->region_index
!= dcr
)
625 nfit_mem
->bdw
= nfit_bdw
->bdw
;
632 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
634 if (!nfit_mem
->spa_bdw
)
637 range_index
= nfit_mem
->spa_bdw
->range_index
;
638 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
639 if (nfit_memdev
->memdev
->range_index
!= range_index
||
640 nfit_memdev
->memdev
->region_index
!= dcr
)
642 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
643 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
644 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
645 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
647 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
651 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
652 if (nfit_flush
->flush
->device_handle
!=
653 nfit_memdev
->memdev
->device_handle
)
655 nfit_mem
->nfit_flush
= nfit_flush
;
662 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
663 struct acpi_nfit_system_address
*spa
)
665 struct nfit_mem
*nfit_mem
, *found
;
666 struct nfit_memdev
*nfit_memdev
;
667 int type
= nfit_spa_type(spa
);
677 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
678 struct nfit_dcr
*nfit_dcr
;
682 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
685 dcr
= nfit_memdev
->memdev
->region_index
;
686 device_handle
= nfit_memdev
->memdev
->device_handle
;
687 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
688 if (__to_nfit_memdev(nfit_mem
)->device_handle
697 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
698 sizeof(*nfit_mem
), GFP_KERNEL
);
701 INIT_LIST_HEAD(&nfit_mem
->list
);
702 nfit_mem
->acpi_desc
= acpi_desc
;
703 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
706 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
707 if (nfit_dcr
->dcr
->region_index
!= dcr
)
710 * Record the control region for the dimm. For
711 * the ACPI 6.1 case, where there are separate
712 * control regions for the pmem vs blk
713 * interfaces, be sure to record the extended
717 nfit_mem
->dcr
= nfit_dcr
->dcr
;
718 else if (nfit_mem
->dcr
->windows
== 0
719 && nfit_dcr
->dcr
->windows
)
720 nfit_mem
->dcr
= nfit_dcr
->dcr
;
724 if (dcr
&& !nfit_mem
->dcr
) {
725 dev_err(acpi_desc
->dev
, "SPA %d missing DCR %d\n",
726 spa
->range_index
, dcr
);
730 if (type
== NFIT_SPA_DCR
) {
731 struct nfit_idt
*nfit_idt
;
734 /* multiple dimms may share a SPA when interleaved */
735 nfit_mem
->spa_dcr
= spa
;
736 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
737 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
738 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
739 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
741 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
744 nfit_mem_init_bdw(acpi_desc
, nfit_mem
, spa
);
747 * A single dimm may belong to multiple SPA-PM
748 * ranges, record at least one in addition to
751 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
758 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
760 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
761 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
762 u32 handleA
, handleB
;
764 handleA
= __to_nfit_memdev(a
)->device_handle
;
765 handleB
= __to_nfit_memdev(b
)->device_handle
;
766 if (handleA
< handleB
)
768 else if (handleA
> handleB
)
773 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
775 struct nfit_spa
*nfit_spa
;
778 * For each SPA-DCR or SPA-PMEM address range find its
779 * corresponding MEMDEV(s). From each MEMDEV find the
780 * corresponding DCR. Then, if we're operating on a SPA-DCR,
781 * try to find a SPA-BDW and a corresponding BDW that references
782 * the DCR. Throw it all into an nfit_mem object. Note, that
785 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
788 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
793 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
798 static ssize_t
revision_show(struct device
*dev
,
799 struct device_attribute
*attr
, char *buf
)
801 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
802 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
803 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
805 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
807 static DEVICE_ATTR_RO(revision
);
809 static struct attribute
*acpi_nfit_attributes
[] = {
810 &dev_attr_revision
.attr
,
814 static struct attribute_group acpi_nfit_attribute_group
= {
816 .attrs
= acpi_nfit_attributes
,
819 static const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
820 &nvdimm_bus_attribute_group
,
821 &acpi_nfit_attribute_group
,
825 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
827 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
828 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
830 return __to_nfit_memdev(nfit_mem
);
833 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
835 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
836 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
838 return nfit_mem
->dcr
;
841 static ssize_t
handle_show(struct device
*dev
,
842 struct device_attribute
*attr
, char *buf
)
844 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
846 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
848 static DEVICE_ATTR_RO(handle
);
850 static ssize_t
phys_id_show(struct device
*dev
,
851 struct device_attribute
*attr
, char *buf
)
853 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
855 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
857 static DEVICE_ATTR_RO(phys_id
);
859 static ssize_t
vendor_show(struct device
*dev
,
860 struct device_attribute
*attr
, char *buf
)
862 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
864 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->vendor_id
));
866 static DEVICE_ATTR_RO(vendor
);
868 static ssize_t
rev_id_show(struct device
*dev
,
869 struct device_attribute
*attr
, char *buf
)
871 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
873 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->revision_id
));
875 static DEVICE_ATTR_RO(rev_id
);
877 static ssize_t
device_show(struct device
*dev
,
878 struct device_attribute
*attr
, char *buf
)
880 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
882 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->device_id
));
884 static DEVICE_ATTR_RO(device
);
886 static ssize_t
subsystem_vendor_show(struct device
*dev
,
887 struct device_attribute
*attr
, char *buf
)
889 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
891 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_vendor_id
));
893 static DEVICE_ATTR_RO(subsystem_vendor
);
895 static ssize_t
subsystem_rev_id_show(struct device
*dev
,
896 struct device_attribute
*attr
, char *buf
)
898 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
900 return sprintf(buf
, "0x%04x\n",
901 be16_to_cpu(dcr
->subsystem_revision_id
));
903 static DEVICE_ATTR_RO(subsystem_rev_id
);
905 static ssize_t
subsystem_device_show(struct device
*dev
,
906 struct device_attribute
*attr
, char *buf
)
908 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
910 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->subsystem_device_id
));
912 static DEVICE_ATTR_RO(subsystem_device
);
914 static int num_nvdimm_formats(struct nvdimm
*nvdimm
)
916 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
919 if (nfit_mem
->memdev_pmem
)
921 if (nfit_mem
->memdev_bdw
)
926 static ssize_t
format_show(struct device
*dev
,
927 struct device_attribute
*attr
, char *buf
)
929 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
931 return sprintf(buf
, "0x%04x\n", be16_to_cpu(dcr
->code
));
933 static DEVICE_ATTR_RO(format
);
935 static ssize_t
format1_show(struct device
*dev
,
936 struct device_attribute
*attr
, char *buf
)
940 struct nfit_mem
*nfit_mem
;
941 struct nfit_memdev
*nfit_memdev
;
942 struct acpi_nfit_desc
*acpi_desc
;
943 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
944 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
946 nfit_mem
= nvdimm_provider_data(nvdimm
);
947 acpi_desc
= nfit_mem
->acpi_desc
;
948 handle
= to_nfit_memdev(dev
)->device_handle
;
950 /* assumes DIMMs have at most 2 published interface codes */
951 mutex_lock(&acpi_desc
->init_mutex
);
952 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
953 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
954 struct nfit_dcr
*nfit_dcr
;
956 if (memdev
->device_handle
!= handle
)
959 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
960 if (nfit_dcr
->dcr
->region_index
!= memdev
->region_index
)
962 if (nfit_dcr
->dcr
->code
== dcr
->code
)
964 rc
= sprintf(buf
, "%#x\n",
965 be16_to_cpu(nfit_dcr
->dcr
->code
));
971 mutex_unlock(&acpi_desc
->init_mutex
);
974 static DEVICE_ATTR_RO(format1
);
976 static ssize_t
formats_show(struct device
*dev
,
977 struct device_attribute
*attr
, char *buf
)
979 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
981 return sprintf(buf
, "%d\n", num_nvdimm_formats(nvdimm
));
983 static DEVICE_ATTR_RO(formats
);
985 static ssize_t
serial_show(struct device
*dev
,
986 struct device_attribute
*attr
, char *buf
)
988 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
990 return sprintf(buf
, "0x%08x\n", be32_to_cpu(dcr
->serial_number
));
992 static DEVICE_ATTR_RO(serial
);
994 static ssize_t
family_show(struct device
*dev
,
995 struct device_attribute
*attr
, char *buf
)
997 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
998 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1000 if (nfit_mem
->family
< 0)
1002 return sprintf(buf
, "%d\n", nfit_mem
->family
);
1004 static DEVICE_ATTR_RO(family
);
1006 static ssize_t
dsm_mask_show(struct device
*dev
,
1007 struct device_attribute
*attr
, char *buf
)
1009 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1010 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1012 if (nfit_mem
->family
< 0)
1014 return sprintf(buf
, "%#lx\n", nfit_mem
->dsm_mask
);
1016 static DEVICE_ATTR_RO(dsm_mask
);
1018 static ssize_t
flags_show(struct device
*dev
,
1019 struct device_attribute
*attr
, char *buf
)
1021 u16 flags
= to_nfit_memdev(dev
)->flags
;
1023 return sprintf(buf
, "%s%s%s%s%s\n",
1024 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
1025 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
1026 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
1027 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
1028 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
1030 static DEVICE_ATTR_RO(flags
);
1032 static ssize_t
id_show(struct device
*dev
,
1033 struct device_attribute
*attr
, char *buf
)
1035 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
1037 if (dcr
->valid_fields
& ACPI_NFIT_CONTROL_MFG_INFO_VALID
)
1038 return sprintf(buf
, "%04x-%02x-%04x-%08x\n",
1039 be16_to_cpu(dcr
->vendor_id
),
1040 dcr
->manufacturing_location
,
1041 be16_to_cpu(dcr
->manufacturing_date
),
1042 be32_to_cpu(dcr
->serial_number
));
1044 return sprintf(buf
, "%04x-%08x\n",
1045 be16_to_cpu(dcr
->vendor_id
),
1046 be32_to_cpu(dcr
->serial_number
));
1048 static DEVICE_ATTR_RO(id
);
1050 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
1051 &dev_attr_handle
.attr
,
1052 &dev_attr_phys_id
.attr
,
1053 &dev_attr_vendor
.attr
,
1054 &dev_attr_device
.attr
,
1055 &dev_attr_rev_id
.attr
,
1056 &dev_attr_subsystem_vendor
.attr
,
1057 &dev_attr_subsystem_device
.attr
,
1058 &dev_attr_subsystem_rev_id
.attr
,
1059 &dev_attr_format
.attr
,
1060 &dev_attr_formats
.attr
,
1061 &dev_attr_format1
.attr
,
1062 &dev_attr_serial
.attr
,
1063 &dev_attr_flags
.attr
,
1065 &dev_attr_family
.attr
,
1066 &dev_attr_dsm_mask
.attr
,
1070 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
1071 struct attribute
*a
, int n
)
1073 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1074 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
1076 if (!to_nfit_dcr(dev
))
1078 if (a
== &dev_attr_format1
.attr
&& num_nvdimm_formats(nvdimm
) <= 1)
1083 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
1085 .attrs
= acpi_nfit_dimm_attributes
,
1086 .is_visible
= acpi_nfit_dimm_attr_visible
,
1089 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
1090 &nvdimm_attribute_group
,
1091 &nd_device_attribute_group
,
1092 &acpi_nfit_dimm_attribute_group
,
1096 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
1099 struct nfit_mem
*nfit_mem
;
1101 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
1102 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
1103 return nfit_mem
->nvdimm
;
1108 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
1109 struct nfit_mem
*nfit_mem
, u32 device_handle
)
1111 struct acpi_device
*adev
, *adev_dimm
;
1112 struct device
*dev
= acpi_desc
->dev
;
1113 unsigned long dsm_mask
;
1117 /* nfit test assumes 1:1 relationship between commands and dsms */
1118 nfit_mem
->dsm_mask
= acpi_desc
->dimm_cmd_force_en
;
1119 nfit_mem
->family
= NVDIMM_FAMILY_INTEL
;
1120 adev
= to_acpi_dev(acpi_desc
);
1124 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
1125 nfit_mem
->adev
= adev_dimm
;
1127 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1129 return force_enable_dimms
? 0 : -ENODEV
;
1133 * Until standardization materializes we need to consider up to 3
1134 * different command sets. Note, that checking for function0 (bit0)
1135 * tells us if any commands are reachable through this uuid.
1137 for (i
= NVDIMM_FAMILY_INTEL
; i
<= NVDIMM_FAMILY_HPE2
; i
++)
1138 if (acpi_check_dsm(adev_dimm
->handle
, to_nfit_uuid(i
), 1, 1))
1141 /* limit the supported commands to those that are publicly documented */
1142 nfit_mem
->family
= i
;
1143 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
) {
1145 if (disable_vendor_specific
)
1146 dsm_mask
&= ~(1 << ND_CMD_VENDOR
);
1147 } else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE1
)
1148 dsm_mask
= 0x1c3c76;
1149 else if (nfit_mem
->family
== NVDIMM_FAMILY_HPE2
) {
1151 if (disable_vendor_specific
)
1152 dsm_mask
&= ~(1 << 8);
1154 dev_err(dev
, "unknown dimm command family\n");
1155 nfit_mem
->family
= -1;
1156 return force_enable_dimms
? 0 : -ENODEV
;
1159 uuid
= to_nfit_uuid(nfit_mem
->family
);
1160 for_each_set_bit(i
, &dsm_mask
, BITS_PER_LONG
)
1161 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
1162 set_bit(i
, &nfit_mem
->dsm_mask
);
1167 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
1169 struct nfit_mem
*nfit_mem
;
1172 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
1173 unsigned long flags
= 0, cmd_mask
;
1174 struct nvdimm
*nvdimm
;
1179 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
1180 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
1186 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
1187 flags
|= NDD_ALIASING
;
1189 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
1190 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
1191 flags
|= NDD_UNARMED
;
1193 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
1198 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1199 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1200 * userspace interface.
1202 cmd_mask
= 1UL << ND_CMD_CALL
;
1203 if (nfit_mem
->family
== NVDIMM_FAMILY_INTEL
)
1204 cmd_mask
|= nfit_mem
->dsm_mask
;
1206 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
1207 acpi_nfit_dimm_attribute_groups
,
1212 nfit_mem
->nvdimm
= nvdimm
;
1215 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
1218 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
1219 nvdimm_name(nvdimm
),
1220 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
1221 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
1222 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
1223 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
1227 return nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
1230 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
1232 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1233 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
1234 struct acpi_device
*adev
;
1237 nd_desc
->cmd_mask
= acpi_desc
->bus_cmd_force_en
;
1238 adev
= to_acpi_dev(acpi_desc
);
1242 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_CLEAR_ERROR
; i
++)
1243 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
1244 set_bit(i
, &nd_desc
->cmd_mask
);
1247 static ssize_t
range_index_show(struct device
*dev
,
1248 struct device_attribute
*attr
, char *buf
)
1250 struct nd_region
*nd_region
= to_nd_region(dev
);
1251 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
1253 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
1255 static DEVICE_ATTR_RO(range_index
);
1257 static struct attribute
*acpi_nfit_region_attributes
[] = {
1258 &dev_attr_range_index
.attr
,
1262 static struct attribute_group acpi_nfit_region_attribute_group
= {
1264 .attrs
= acpi_nfit_region_attributes
,
1267 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
1268 &nd_region_attribute_group
,
1269 &nd_mapping_attribute_group
,
1270 &nd_device_attribute_group
,
1271 &nd_numa_attribute_group
,
1272 &acpi_nfit_region_attribute_group
,
1276 /* enough info to uniquely specify an interleave set */
1277 struct nfit_set_info
{
1278 struct nfit_set_info_map
{
1285 static size_t sizeof_nfit_set_info(int num_mappings
)
1287 return sizeof(struct nfit_set_info
)
1288 + num_mappings
* sizeof(struct nfit_set_info_map
);
1291 static int cmp_map(const void *m0
, const void *m1
)
1293 const struct nfit_set_info_map
*map0
= m0
;
1294 const struct nfit_set_info_map
*map1
= m1
;
1296 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
1300 /* Retrieve the nth entry referencing this spa */
1301 static struct acpi_nfit_memory_map
*memdev_from_spa(
1302 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
1304 struct nfit_memdev
*nfit_memdev
;
1306 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
1307 if (nfit_memdev
->memdev
->range_index
== range_index
)
1309 return nfit_memdev
->memdev
;
1313 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
1314 struct nd_region_desc
*ndr_desc
,
1315 struct acpi_nfit_system_address
*spa
)
1317 int i
, spa_type
= nfit_spa_type(spa
);
1318 struct device
*dev
= acpi_desc
->dev
;
1319 struct nd_interleave_set
*nd_set
;
1320 u16 nr
= ndr_desc
->num_mappings
;
1321 struct nfit_set_info
*info
;
1323 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1328 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1332 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1335 for (i
= 0; i
< nr
; i
++) {
1336 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
1337 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1338 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1339 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1340 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1341 spa
->range_index
, i
);
1343 if (!memdev
|| !nfit_mem
->dcr
) {
1344 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1348 map
->region_offset
= memdev
->region_offset
;
1349 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1352 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1354 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1355 ndr_desc
->nd_set
= nd_set
;
1356 devm_kfree(dev
, info
);
1361 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1363 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1364 u32 sub_line_offset
, line_index
, line_offset
;
1365 u64 line_no
, table_skip_count
, table_offset
;
1367 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1368 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1369 line_offset
= idt
->line_offset
[line_index
]
1371 table_offset
= table_skip_count
* mmio
->table_size
;
1373 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1376 static void wmb_blk(struct nfit_blk
*nfit_blk
)
1379 if (nfit_blk
->nvdimm_flush
) {
1381 * The first wmb() is needed to 'sfence' all previous writes
1382 * such that they are architecturally visible for the platform
1383 * buffer flush. Note that we've already arranged for pmem
1384 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1385 * final wmb() ensures ordering for the NVDIMM flush write.
1388 writeq(1, nfit_blk
->nvdimm_flush
);
1394 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1396 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1397 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1399 if (mmio
->num_lines
)
1400 offset
= to_interleave_offset(offset
, mmio
);
1402 return readl(mmio
->addr
.base
+ offset
);
1405 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1406 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1409 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1412 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1414 BCW_LEN_MASK
= (1ULL << 8) - 1,
1418 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1419 len
= len
>> L1_CACHE_SHIFT
;
1420 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1421 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1423 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1424 if (mmio
->num_lines
)
1425 offset
= to_interleave_offset(offset
, mmio
);
1427 writeq(cmd
, mmio
->addr
.base
+ offset
);
1430 if (nfit_blk
->dimm_flags
& NFIT_BLK_DCR_LATCH
)
1431 readq(mmio
->addr
.base
+ offset
);
1434 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1435 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1438 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1439 unsigned int copied
= 0;
1443 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1444 + lane
* mmio
->size
;
1445 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1450 if (mmio
->num_lines
) {
1453 offset
= to_interleave_offset(base_offset
+ copied
,
1455 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1456 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1458 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1463 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1466 if (nfit_blk
->dimm_flags
& NFIT_BLK_READ_FLUSH
)
1467 mmio_flush_range((void __force
*)
1468 mmio
->addr
.aperture
+ offset
, c
);
1470 memcpy_from_pmem(iobuf
+ copied
,
1471 mmio
->addr
.aperture
+ offset
, c
);
1481 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1485 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1486 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1488 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1489 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1490 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1491 unsigned int lane
, copied
= 0;
1494 lane
= nd_region_acquire_lane(nd_region
);
1496 u64 c
= min(len
, mmio
->size
);
1498 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1499 iobuf
+ copied
, c
, rw
, lane
);
1506 nd_region_release_lane(nd_region
, lane
);
1511 static void nfit_spa_mapping_release(struct kref
*kref
)
1513 struct nfit_spa_mapping
*spa_map
= to_spa_map(kref
);
1514 struct acpi_nfit_system_address
*spa
= spa_map
->spa
;
1515 struct acpi_nfit_desc
*acpi_desc
= spa_map
->acpi_desc
;
1517 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1518 dev_dbg(acpi_desc
->dev
, "%s: SPA%d\n", __func__
, spa
->range_index
);
1519 if (spa_map
->type
== SPA_MAP_APERTURE
)
1520 memunmap((void __force
*)spa_map
->addr
.aperture
);
1522 iounmap(spa_map
->addr
.base
);
1523 release_mem_region(spa
->address
, spa
->length
);
1524 list_del(&spa_map
->list
);
1528 static struct nfit_spa_mapping
*find_spa_mapping(
1529 struct acpi_nfit_desc
*acpi_desc
,
1530 struct acpi_nfit_system_address
*spa
)
1532 struct nfit_spa_mapping
*spa_map
;
1534 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1535 list_for_each_entry(spa_map
, &acpi_desc
->spa_maps
, list
)
1536 if (spa_map
->spa
== spa
)
1542 static void nfit_spa_unmap(struct acpi_nfit_desc
*acpi_desc
,
1543 struct acpi_nfit_system_address
*spa
)
1545 struct nfit_spa_mapping
*spa_map
;
1547 mutex_lock(&acpi_desc
->spa_map_mutex
);
1548 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1551 kref_put(&spa_map
->kref
, nfit_spa_mapping_release
);
1552 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1555 static void __iomem
*__nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1556 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1558 resource_size_t start
= spa
->address
;
1559 resource_size_t n
= spa
->length
;
1560 struct nfit_spa_mapping
*spa_map
;
1561 struct resource
*res
;
1563 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1565 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1567 kref_get(&spa_map
->kref
);
1568 return spa_map
->addr
.base
;
1571 spa_map
= kzalloc(sizeof(*spa_map
), GFP_KERNEL
);
1575 INIT_LIST_HEAD(&spa_map
->list
);
1577 kref_init(&spa_map
->kref
);
1578 spa_map
->acpi_desc
= acpi_desc
;
1580 res
= request_mem_region(start
, n
, dev_name(acpi_desc
->dev
));
1584 spa_map
->type
= type
;
1585 if (type
== SPA_MAP_APERTURE
)
1586 spa_map
->addr
.aperture
= (void __pmem
*)memremap(start
, n
,
1587 ARCH_MEMREMAP_PMEM
);
1589 spa_map
->addr
.base
= ioremap_nocache(start
, n
);
1592 if (!spa_map
->addr
.base
)
1595 list_add_tail(&spa_map
->list
, &acpi_desc
->spa_maps
);
1596 return spa_map
->addr
.base
;
1599 release_mem_region(start
, n
);
1606 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1607 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1608 * @nfit_spa: spa table to map
1609 * @type: aperture or control region
1611 * In the case where block-data-window apertures and
1612 * dimm-control-regions are interleaved they will end up sharing a
1613 * single request_mem_region() + ioremap() for the address range. In
1614 * the style of devm nfit_spa_map() mappings are automatically dropped
1615 * when all region devices referencing the same mapping are disabled /
1618 static void __iomem
*nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1619 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1621 void __iomem
*iomem
;
1623 mutex_lock(&acpi_desc
->spa_map_mutex
);
1624 iomem
= __nfit_spa_map(acpi_desc
, spa
, type
);
1625 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1630 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1631 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1634 mmio
->num_lines
= idt
->line_count
;
1635 mmio
->line_size
= idt
->line_size
;
1636 if (interleave_ways
== 0)
1638 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1645 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1646 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1648 struct nd_cmd_dimm_flags flags
;
1651 memset(&flags
, 0, sizeof(flags
));
1652 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1653 sizeof(flags
), NULL
);
1655 if (rc
>= 0 && flags
.status
== 0)
1656 nfit_blk
->dimm_flags
= flags
.flags
;
1657 else if (rc
== -ENOTTY
) {
1658 /* fall back to a conservative default */
1659 nfit_blk
->dimm_flags
= NFIT_BLK_DCR_LATCH
| NFIT_BLK_READ_FLUSH
;
1667 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1670 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1671 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1672 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1673 struct nfit_flush
*nfit_flush
;
1674 struct nfit_blk_mmio
*mmio
;
1675 struct nfit_blk
*nfit_blk
;
1676 struct nfit_mem
*nfit_mem
;
1677 struct nvdimm
*nvdimm
;
1680 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1681 nfit_mem
= nvdimm_provider_data(nvdimm
);
1682 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1683 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1684 nfit_mem
? "" : " nfit_mem",
1685 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1686 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1690 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1693 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1694 nfit_blk
->nd_region
= to_nd_region(dev
);
1696 /* map block aperture memory */
1697 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1698 mmio
= &nfit_blk
->mmio
[BDW
];
1699 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_bdw
,
1701 if (!mmio
->addr
.base
) {
1702 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1703 nvdimm_name(nvdimm
));
1706 mmio
->size
= nfit_mem
->bdw
->size
;
1707 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1708 mmio
->idt
= nfit_mem
->idt_bdw
;
1709 mmio
->spa
= nfit_mem
->spa_bdw
;
1710 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1711 nfit_mem
->memdev_bdw
->interleave_ways
);
1713 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1714 __func__
, nvdimm_name(nvdimm
));
1718 /* map block control memory */
1719 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1720 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1721 mmio
= &nfit_blk
->mmio
[DCR
];
1722 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_dcr
,
1724 if (!mmio
->addr
.base
) {
1725 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1726 nvdimm_name(nvdimm
));
1729 mmio
->size
= nfit_mem
->dcr
->window_size
;
1730 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1731 mmio
->idt
= nfit_mem
->idt_dcr
;
1732 mmio
->spa
= nfit_mem
->spa_dcr
;
1733 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1734 nfit_mem
->memdev_dcr
->interleave_ways
);
1736 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1737 __func__
, nvdimm_name(nvdimm
));
1741 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1743 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1744 __func__
, nvdimm_name(nvdimm
));
1748 nfit_flush
= nfit_mem
->nfit_flush
;
1749 if (nfit_flush
&& nfit_flush
->flush
->hint_count
!= 0) {
1750 nfit_blk
->nvdimm_flush
= devm_ioremap_nocache(dev
,
1751 nfit_flush
->flush
->hint_address
[0], 8);
1752 if (!nfit_blk
->nvdimm_flush
)
1756 if (!arch_has_wmb_pmem() && !nfit_blk
->nvdimm_flush
)
1757 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1759 if (mmio
->line_size
== 0)
1762 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1763 + 8 > mmio
->line_size
) {
1764 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1766 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1767 + 8 > mmio
->line_size
) {
1768 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1775 static void acpi_nfit_blk_region_disable(struct nvdimm_bus
*nvdimm_bus
,
1778 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1779 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1780 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1781 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1785 return; /* never enabled */
1787 /* auto-free BLK spa mappings */
1788 for (i
= 0; i
< 2; i
++) {
1789 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[i
];
1791 if (mmio
->addr
.base
)
1792 nfit_spa_unmap(acpi_desc
, mmio
->spa
);
1794 nd_blk_region_set_provider_data(ndbr
, NULL
);
1795 /* devm will free nfit_blk */
1798 static int ars_get_cap(struct acpi_nfit_desc
*acpi_desc
,
1799 struct nd_cmd_ars_cap
*cmd
, struct nfit_spa
*nfit_spa
)
1801 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1802 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1805 cmd
->address
= spa
->address
;
1806 cmd
->length
= spa
->length
;
1807 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_CAP
, cmd
,
1808 sizeof(*cmd
), &cmd_rc
);
1814 static int ars_start(struct acpi_nfit_desc
*acpi_desc
, struct nfit_spa
*nfit_spa
)
1818 struct nd_cmd_ars_start ars_start
;
1819 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1820 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1822 memset(&ars_start
, 0, sizeof(ars_start
));
1823 ars_start
.address
= spa
->address
;
1824 ars_start
.length
= spa
->length
;
1825 if (nfit_spa_type(spa
) == NFIT_SPA_PM
)
1826 ars_start
.type
= ND_ARS_PERSISTENT
;
1827 else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
)
1828 ars_start
.type
= ND_ARS_VOLATILE
;
1832 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1833 sizeof(ars_start
), &cmd_rc
);
1840 static int ars_continue(struct acpi_nfit_desc
*acpi_desc
)
1843 struct nd_cmd_ars_start ars_start
;
1844 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1845 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1847 memset(&ars_start
, 0, sizeof(ars_start
));
1848 ars_start
.address
= ars_status
->restart_address
;
1849 ars_start
.length
= ars_status
->restart_length
;
1850 ars_start
.type
= ars_status
->type
;
1851 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_START
, &ars_start
,
1852 sizeof(ars_start
), &cmd_rc
);
1858 static int ars_get_status(struct acpi_nfit_desc
*acpi_desc
)
1860 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
1861 struct nd_cmd_ars_status
*ars_status
= acpi_desc
->ars_status
;
1864 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_ARS_STATUS
, ars_status
,
1865 acpi_desc
->ars_status_size
, &cmd_rc
);
1871 static int ars_status_process_records(struct nvdimm_bus
*nvdimm_bus
,
1872 struct nd_cmd_ars_status
*ars_status
)
1877 for (i
= 0; i
< ars_status
->num_records
; i
++) {
1878 rc
= nvdimm_bus_add_poison(nvdimm_bus
,
1879 ars_status
->records
[i
].err_address
,
1880 ars_status
->records
[i
].length
);
1888 static void acpi_nfit_remove_resource(void *data
)
1890 struct resource
*res
= data
;
1892 remove_resource(res
);
1895 static int acpi_nfit_insert_resource(struct acpi_nfit_desc
*acpi_desc
,
1896 struct nd_region_desc
*ndr_desc
)
1898 struct resource
*res
, *nd_res
= ndr_desc
->res
;
1901 /* No operation if the region is already registered as PMEM */
1902 is_pmem
= region_intersects(nd_res
->start
, resource_size(nd_res
),
1903 IORESOURCE_MEM
, IORES_DESC_PERSISTENT_MEMORY
);
1904 if (is_pmem
== REGION_INTERSECTS
)
1907 res
= devm_kzalloc(acpi_desc
->dev
, sizeof(*res
), GFP_KERNEL
);
1911 res
->name
= "Persistent Memory";
1912 res
->start
= nd_res
->start
;
1913 res
->end
= nd_res
->end
;
1914 res
->flags
= IORESOURCE_MEM
;
1915 res
->desc
= IORES_DESC_PERSISTENT_MEMORY
;
1917 ret
= insert_resource(&iomem_resource
, res
);
1921 ret
= devm_add_action(acpi_desc
->dev
, acpi_nfit_remove_resource
, res
);
1923 remove_resource(res
);
1930 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
1931 struct nd_mapping
*nd_mapping
, struct nd_region_desc
*ndr_desc
,
1932 struct acpi_nfit_memory_map
*memdev
,
1933 struct nfit_spa
*nfit_spa
)
1935 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
1936 memdev
->device_handle
);
1937 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1938 struct nd_blk_region_desc
*ndbr_desc
;
1939 struct nfit_mem
*nfit_mem
;
1943 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
1944 spa
->range_index
, memdev
->device_handle
);
1948 nd_mapping
->nvdimm
= nvdimm
;
1949 switch (nfit_spa_type(spa
)) {
1951 case NFIT_SPA_VOLATILE
:
1952 nd_mapping
->start
= memdev
->address
;
1953 nd_mapping
->size
= memdev
->region_size
;
1956 nfit_mem
= nvdimm_provider_data(nvdimm
);
1957 if (!nfit_mem
|| !nfit_mem
->bdw
) {
1958 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
1959 spa
->range_index
, nvdimm_name(nvdimm
));
1961 nd_mapping
->size
= nfit_mem
->bdw
->capacity
;
1962 nd_mapping
->start
= nfit_mem
->bdw
->start_address
;
1963 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
1967 ndr_desc
->nd_mapping
= nd_mapping
;
1968 ndr_desc
->num_mappings
= blk_valid
;
1969 ndbr_desc
= to_blk_region_desc(ndr_desc
);
1970 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
1971 ndbr_desc
->disable
= acpi_nfit_blk_region_disable
;
1972 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
1973 nfit_spa
->nd_region
= nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
,
1975 if (!nfit_spa
->nd_region
)
1983 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
1984 struct nfit_spa
*nfit_spa
)
1986 static struct nd_mapping nd_mappings
[ND_MAX_MAPPINGS
];
1987 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1988 struct nd_blk_region_desc ndbr_desc
;
1989 struct nd_region_desc
*ndr_desc
;
1990 struct nfit_memdev
*nfit_memdev
;
1991 struct nvdimm_bus
*nvdimm_bus
;
1992 struct resource res
;
1995 if (nfit_spa
->nd_region
)
1998 if (spa
->range_index
== 0) {
1999 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
2004 memset(&res
, 0, sizeof(res
));
2005 memset(&nd_mappings
, 0, sizeof(nd_mappings
));
2006 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
2007 res
.start
= spa
->address
;
2008 res
.end
= res
.start
+ spa
->length
- 1;
2009 ndr_desc
= &ndbr_desc
.ndr_desc
;
2010 ndr_desc
->res
= &res
;
2011 ndr_desc
->provider_data
= nfit_spa
;
2012 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
2013 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
2014 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
2015 spa
->proximity_domain
);
2017 ndr_desc
->numa_node
= NUMA_NO_NODE
;
2019 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
2020 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
2021 struct nd_mapping
*nd_mapping
;
2023 if (memdev
->range_index
!= spa
->range_index
)
2025 if (count
>= ND_MAX_MAPPINGS
) {
2026 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
2027 spa
->range_index
, ND_MAX_MAPPINGS
);
2030 nd_mapping
= &nd_mappings
[count
++];
2031 rc
= acpi_nfit_init_mapping(acpi_desc
, nd_mapping
, ndr_desc
,
2037 ndr_desc
->nd_mapping
= nd_mappings
;
2038 ndr_desc
->num_mappings
= count
;
2039 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
2043 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
2044 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
2045 rc
= acpi_nfit_insert_resource(acpi_desc
, ndr_desc
);
2047 dev_warn(acpi_desc
->dev
,
2048 "failed to insert pmem resource to iomem: %d\n",
2053 nfit_spa
->nd_region
= nvdimm_pmem_region_create(nvdimm_bus
,
2055 if (!nfit_spa
->nd_region
)
2057 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
2058 nfit_spa
->nd_region
= nvdimm_volatile_region_create(nvdimm_bus
,
2060 if (!nfit_spa
->nd_region
)
2066 dev_err(acpi_desc
->dev
, "failed to register spa range %d\n",
2067 nfit_spa
->spa
->range_index
);
2071 static int ars_status_alloc(struct acpi_nfit_desc
*acpi_desc
,
2074 struct device
*dev
= acpi_desc
->dev
;
2075 struct nd_cmd_ars_status
*ars_status
;
2077 if (acpi_desc
->ars_status
&& acpi_desc
->ars_status_size
>= max_ars
) {
2078 memset(acpi_desc
->ars_status
, 0, acpi_desc
->ars_status_size
);
2082 if (acpi_desc
->ars_status
)
2083 devm_kfree(dev
, acpi_desc
->ars_status
);
2084 acpi_desc
->ars_status
= NULL
;
2085 ars_status
= devm_kzalloc(dev
, max_ars
, GFP_KERNEL
);
2088 acpi_desc
->ars_status
= ars_status
;
2089 acpi_desc
->ars_status_size
= max_ars
;
2093 static int acpi_nfit_query_poison(struct acpi_nfit_desc
*acpi_desc
,
2094 struct nfit_spa
*nfit_spa
)
2096 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2099 if (!nfit_spa
->max_ars
) {
2100 struct nd_cmd_ars_cap ars_cap
;
2102 memset(&ars_cap
, 0, sizeof(ars_cap
));
2103 rc
= ars_get_cap(acpi_desc
, &ars_cap
, nfit_spa
);
2106 nfit_spa
->max_ars
= ars_cap
.max_ars_out
;
2107 nfit_spa
->clear_err_unit
= ars_cap
.clear_err_unit
;
2108 /* check that the supported scrub types match the spa type */
2109 if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
&&
2110 ((ars_cap
.status
>> 16) & ND_ARS_VOLATILE
) == 0)
2112 else if (nfit_spa_type(spa
) == NFIT_SPA_PM
&&
2113 ((ars_cap
.status
>> 16) & ND_ARS_PERSISTENT
) == 0)
2117 if (ars_status_alloc(acpi_desc
, nfit_spa
->max_ars
))
2120 rc
= ars_get_status(acpi_desc
);
2121 if (rc
< 0 && rc
!= -ENOSPC
)
2124 if (ars_status_process_records(acpi_desc
->nvdimm_bus
,
2125 acpi_desc
->ars_status
))
2131 static void acpi_nfit_async_scrub(struct acpi_nfit_desc
*acpi_desc
,
2132 struct nfit_spa
*nfit_spa
)
2134 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
2135 unsigned int overflow_retry
= scrub_overflow_abort
;
2136 u64 init_ars_start
= 0, init_ars_len
= 0;
2137 struct device
*dev
= acpi_desc
->dev
;
2138 unsigned int tmo
= scrub_timeout
;
2141 if (nfit_spa
->ars_done
|| !nfit_spa
->nd_region
)
2144 rc
= ars_start(acpi_desc
, nfit_spa
);
2146 * If we timed out the initial scan we'll still be busy here,
2147 * and will wait another timeout before giving up permanently.
2149 if (rc
< 0 && rc
!= -EBUSY
)
2153 u64 ars_start
, ars_len
;
2155 if (acpi_desc
->cancel
)
2157 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2160 if (rc
== -EBUSY
&& !tmo
) {
2161 dev_warn(dev
, "range %d ars timeout, aborting\n",
2168 * Note, entries may be appended to the list
2169 * while the lock is dropped, but the workqueue
2170 * being active prevents entries being deleted /
2173 mutex_unlock(&acpi_desc
->init_mutex
);
2176 mutex_lock(&acpi_desc
->init_mutex
);
2180 /* we got some results, but there are more pending... */
2181 if (rc
== -ENOSPC
&& overflow_retry
--) {
2182 if (!init_ars_len
) {
2183 init_ars_len
= acpi_desc
->ars_status
->length
;
2184 init_ars_start
= acpi_desc
->ars_status
->address
;
2186 rc
= ars_continue(acpi_desc
);
2190 dev_warn(dev
, "range %d ars continuation failed\n",
2196 ars_start
= init_ars_start
;
2197 ars_len
= init_ars_len
;
2199 ars_start
= acpi_desc
->ars_status
->address
;
2200 ars_len
= acpi_desc
->ars_status
->length
;
2202 dev_dbg(dev
, "spa range: %d ars from %#llx + %#llx complete\n",
2203 spa
->range_index
, ars_start
, ars_len
);
2204 /* notify the region about new poison entries */
2205 nvdimm_region_notify(nfit_spa
->nd_region
,
2206 NVDIMM_REVALIDATE_POISON
);
2211 static void acpi_nfit_scrub(struct work_struct
*work
)
2214 u64 init_scrub_length
= 0;
2215 struct nfit_spa
*nfit_spa
;
2216 u64 init_scrub_address
= 0;
2217 bool init_ars_done
= false;
2218 struct acpi_nfit_desc
*acpi_desc
;
2219 unsigned int tmo
= scrub_timeout
;
2220 unsigned int overflow_retry
= scrub_overflow_abort
;
2222 acpi_desc
= container_of(work
, typeof(*acpi_desc
), work
);
2223 dev
= acpi_desc
->dev
;
2226 * We scrub in 2 phases. The first phase waits for any platform
2227 * firmware initiated scrubs to complete and then we go search for the
2228 * affected spa regions to mark them scanned. In the second phase we
2229 * initiate a directed scrub for every range that was not scrubbed in
2233 /* process platform firmware initiated scrubs */
2235 mutex_lock(&acpi_desc
->init_mutex
);
2236 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2237 struct nd_cmd_ars_status
*ars_status
;
2238 struct acpi_nfit_system_address
*spa
;
2239 u64 ars_start
, ars_len
;
2242 if (acpi_desc
->cancel
)
2245 if (nfit_spa
->nd_region
)
2248 if (init_ars_done
) {
2250 * No need to re-query, we're now just
2251 * reconciling all the ranges covered by the
2256 rc
= acpi_nfit_query_poison(acpi_desc
, nfit_spa
);
2258 if (rc
== -ENOTTY
) {
2259 /* no ars capability, just register spa and move on */
2260 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2264 if (rc
== -EBUSY
&& !tmo
) {
2265 /* fallthrough to directed scrub in phase 2 */
2266 dev_warn(dev
, "timeout awaiting ars results, continuing...\n");
2268 } else if (rc
== -EBUSY
) {
2269 mutex_unlock(&acpi_desc
->init_mutex
);
2275 /* we got some results, but there are more pending... */
2276 if (rc
== -ENOSPC
&& overflow_retry
--) {
2277 ars_status
= acpi_desc
->ars_status
;
2279 * Record the original scrub range, so that we
2280 * can recall all the ranges impacted by the
2283 if (!init_scrub_length
) {
2284 init_scrub_length
= ars_status
->length
;
2285 init_scrub_address
= ars_status
->address
;
2287 rc
= ars_continue(acpi_desc
);
2289 mutex_unlock(&acpi_desc
->init_mutex
);
2296 * Initial scrub failed, we'll give it one more
2302 /* We got some final results, record completed ranges */
2303 ars_status
= acpi_desc
->ars_status
;
2304 if (init_scrub_length
) {
2305 ars_start
= init_scrub_address
;
2306 ars_len
= ars_start
+ init_scrub_length
;
2308 ars_start
= ars_status
->address
;
2309 ars_len
= ars_status
->length
;
2311 spa
= nfit_spa
->spa
;
2313 if (!init_ars_done
) {
2314 init_ars_done
= true;
2315 dev_dbg(dev
, "init scrub %#llx + %#llx complete\n",
2316 ars_start
, ars_len
);
2318 if (ars_start
<= spa
->address
&& ars_start
+ ars_len
2319 >= spa
->address
+ spa
->length
)
2320 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2324 * For all the ranges not covered by an initial scrub we still
2325 * want to see if there are errors, but it's ok to discover them
2328 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
2330 * Flag all the ranges that still need scrubbing, but
2331 * register them now to make data available.
2333 if (nfit_spa
->nd_region
)
2334 nfit_spa
->ars_done
= 1;
2336 acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2339 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2340 acpi_nfit_async_scrub(acpi_desc
, nfit_spa
);
2341 mutex_unlock(&acpi_desc
->init_mutex
);
2344 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
2346 struct nfit_spa
*nfit_spa
;
2349 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
)
2350 if (nfit_spa_type(nfit_spa
->spa
) == NFIT_SPA_DCR
) {
2351 /* BLK regions don't need to wait for ars results */
2352 rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
2357 queue_work(nfit_wq
, &acpi_desc
->work
);
2361 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
2362 struct nfit_table_prev
*prev
)
2364 struct device
*dev
= acpi_desc
->dev
;
2366 if (!list_empty(&prev
->spas
) ||
2367 !list_empty(&prev
->memdevs
) ||
2368 !list_empty(&prev
->dcrs
) ||
2369 !list_empty(&prev
->bdws
) ||
2370 !list_empty(&prev
->idts
) ||
2371 !list_empty(&prev
->flushes
)) {
2372 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
2378 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, acpi_size sz
)
2380 struct device
*dev
= acpi_desc
->dev
;
2381 struct nfit_table_prev prev
;
2386 mutex_lock(&acpi_desc
->init_mutex
);
2388 INIT_LIST_HEAD(&prev
.spas
);
2389 INIT_LIST_HEAD(&prev
.memdevs
);
2390 INIT_LIST_HEAD(&prev
.dcrs
);
2391 INIT_LIST_HEAD(&prev
.bdws
);
2392 INIT_LIST_HEAD(&prev
.idts
);
2393 INIT_LIST_HEAD(&prev
.flushes
);
2395 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
2396 acpi_desc
->spas
.prev
);
2397 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
2398 acpi_desc
->memdevs
.prev
);
2399 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
2400 acpi_desc
->dcrs
.prev
);
2401 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
2402 acpi_desc
->bdws
.prev
);
2403 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
2404 acpi_desc
->idts
.prev
);
2405 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
2406 acpi_desc
->flushes
.prev
);
2408 data
= (u8
*) acpi_desc
->nfit
;
2410 while (!IS_ERR_OR_NULL(data
))
2411 data
= add_table(acpi_desc
, &prev
, data
, end
);
2414 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
2420 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
2424 if (nfit_mem_init(acpi_desc
) != 0) {
2429 acpi_nfit_init_dsms(acpi_desc
);
2431 rc
= acpi_nfit_register_dimms(acpi_desc
);
2435 rc
= acpi_nfit_register_regions(acpi_desc
);
2438 mutex_unlock(&acpi_desc
->init_mutex
);
2441 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
2443 struct acpi_nfit_flush_work
{
2444 struct work_struct work
;
2445 struct completion cmp
;
2448 static void flush_probe(struct work_struct
*work
)
2450 struct acpi_nfit_flush_work
*flush
;
2452 flush
= container_of(work
, typeof(*flush
), work
);
2453 complete(&flush
->cmp
);
2456 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor
*nd_desc
)
2458 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2459 struct device
*dev
= acpi_desc
->dev
;
2460 struct acpi_nfit_flush_work flush
;
2462 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2467 * Scrub work could take 10s of seconds, userspace may give up so we
2468 * need to be interruptible while waiting.
2470 INIT_WORK_ONSTACK(&flush
.work
, flush_probe
);
2471 COMPLETION_INITIALIZER_ONSTACK(flush
.cmp
);
2472 queue_work(nfit_wq
, &flush
.work
);
2473 return wait_for_completion_interruptible(&flush
.cmp
);
2476 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor
*nd_desc
,
2477 struct nvdimm
*nvdimm
, unsigned int cmd
)
2479 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
2483 if (cmd
!= ND_CMD_ARS_START
)
2487 * The kernel and userspace may race to initiate a scrub, but
2488 * the scrub thread is prepared to lose that initial race. It
2489 * just needs guarantees that any ars it initiates are not
2490 * interrupted by any intervening start reqeusts from userspace.
2492 if (work_busy(&acpi_desc
->work
))
2498 void acpi_nfit_desc_init(struct acpi_nfit_desc
*acpi_desc
, struct device
*dev
)
2500 struct nvdimm_bus_descriptor
*nd_desc
;
2502 dev_set_drvdata(dev
, acpi_desc
);
2503 acpi_desc
->dev
= dev
;
2504 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
2505 nd_desc
= &acpi_desc
->nd_desc
;
2506 nd_desc
->provider_name
= "ACPI.NFIT";
2507 nd_desc
->ndctl
= acpi_nfit_ctl
;
2508 nd_desc
->flush_probe
= acpi_nfit_flush_probe
;
2509 nd_desc
->clear_to_send
= acpi_nfit_clear_to_send
;
2510 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
2512 INIT_LIST_HEAD(&acpi_desc
->spa_maps
);
2513 INIT_LIST_HEAD(&acpi_desc
->spas
);
2514 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
2515 INIT_LIST_HEAD(&acpi_desc
->bdws
);
2516 INIT_LIST_HEAD(&acpi_desc
->idts
);
2517 INIT_LIST_HEAD(&acpi_desc
->flushes
);
2518 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
2519 INIT_LIST_HEAD(&acpi_desc
->dimms
);
2520 mutex_init(&acpi_desc
->spa_map_mutex
);
2521 mutex_init(&acpi_desc
->init_mutex
);
2522 INIT_WORK(&acpi_desc
->work
, acpi_nfit_scrub
);
2524 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init
);
2526 static int acpi_nfit_add(struct acpi_device
*adev
)
2528 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2529 struct acpi_nfit_desc
*acpi_desc
;
2530 struct device
*dev
= &adev
->dev
;
2531 struct acpi_table_header
*tbl
;
2532 acpi_status status
= AE_OK
;
2536 status
= acpi_get_table_with_size(ACPI_SIG_NFIT
, 0, &tbl
, &sz
);
2537 if (ACPI_FAILURE(status
)) {
2538 /* This is ok, we could have an nvdimm hotplugged later */
2539 dev_dbg(dev
, "failed to find NFIT at startup\n");
2543 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2546 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
2547 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
, &acpi_desc
->nd_desc
);
2548 if (!acpi_desc
->nvdimm_bus
)
2552 * Save the acpi header for later and then skip it,
2553 * making nfit point to the first nfit table header.
2555 acpi_desc
->acpi_header
= *tbl
;
2556 acpi_desc
->nfit
= (void *) tbl
+ sizeof(struct acpi_table_nfit
);
2557 sz
-= sizeof(struct acpi_table_nfit
);
2559 /* Evaluate _FIT and override with that if present */
2560 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2561 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
2562 union acpi_object
*obj
;
2564 * Adjust for the acpi_object header of the _FIT
2567 if (obj
->type
== ACPI_TYPE_BUFFER
) {
2569 (struct acpi_nfit_header
*)obj
->buffer
.pointer
;
2570 sz
= obj
->buffer
.length
;
2572 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
2573 __func__
, (int) obj
->type
);
2576 rc
= acpi_nfit_init(acpi_desc
, sz
);
2578 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
2584 static int acpi_nfit_remove(struct acpi_device
*adev
)
2586 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
2588 acpi_desc
->cancel
= 1;
2589 flush_workqueue(nfit_wq
);
2590 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
2594 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
2596 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
2597 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
2598 struct acpi_nfit_header
*nfit_saved
;
2599 union acpi_object
*obj
;
2600 struct device
*dev
= &adev
->dev
;
2604 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
2608 /* dev->driver may be null if we're being removed */
2609 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
2614 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2617 acpi_nfit_desc_init(acpi_desc
, &adev
->dev
);
2618 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
, &acpi_desc
->nd_desc
);
2619 if (!acpi_desc
->nvdimm_bus
)
2623 * Finish previous registration before considering new
2626 flush_workqueue(nfit_wq
);
2630 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
2631 if (ACPI_FAILURE(status
)) {
2632 dev_err(dev
, "failed to evaluate _FIT\n");
2636 nfit_saved
= acpi_desc
->nfit
;
2638 if (obj
->type
== ACPI_TYPE_BUFFER
) {
2640 (struct acpi_nfit_header
*)obj
->buffer
.pointer
;
2641 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.length
);
2643 /* Merge failed, restore old nfit, and exit */
2644 acpi_desc
->nfit
= nfit_saved
;
2645 dev_err(dev
, "failed to merge updated NFIT\n");
2648 /* Bad _FIT, restore old nfit */
2649 dev_err(dev
, "Invalid _FIT\n");
2657 static const struct acpi_device_id acpi_nfit_ids
[] = {
2661 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
2663 static struct acpi_driver acpi_nfit_driver
= {
2664 .name
= KBUILD_MODNAME
,
2665 .ids
= acpi_nfit_ids
,
2667 .add
= acpi_nfit_add
,
2668 .remove
= acpi_nfit_remove
,
2669 .notify
= acpi_nfit_notify
,
2673 static __init
int nfit_init(void)
2675 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
2676 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
2677 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
2678 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
2679 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
2680 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
2681 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
2683 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
2684 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
2685 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
2686 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
2687 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
2688 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
2689 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
2690 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
2691 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
2692 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
2693 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE1
]);
2694 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2
, nfit_uuid
[NFIT_DEV_DIMM_N_HPE2
]);
2696 nfit_wq
= create_singlethread_workqueue("nfit");
2700 return acpi_bus_register_driver(&acpi_nfit_driver
);
2703 static __exit
void nfit_exit(void)
2705 acpi_bus_unregister_driver(&acpi_nfit_driver
);
2706 destroy_workqueue(nfit_wq
);
2709 module_init(nfit_init
);
2710 module_exit(nfit_exit
);
2711 MODULE_LICENSE("GPL v2");
2712 MODULE_AUTHOR("Intel Corporation");