2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/list.h>
19 #include <linux/acpi.h>
20 #include <linux/sort.h>
21 #include <linux/pmem.h>
23 #include <asm/cacheflush.h>
27 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
30 #include <linux/io-64-nonatomic-hi-lo.h>
32 static bool force_enable_dimms
;
33 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
34 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
36 struct nfit_table_prev
{
37 struct list_head spas
;
38 struct list_head memdevs
;
39 struct list_head dcrs
;
40 struct list_head bdws
;
41 struct list_head idts
;
42 struct list_head flushes
;
45 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
47 const u8
*to_nfit_uuid(enum nfit_uuids id
)
51 EXPORT_SYMBOL(to_nfit_uuid
);
53 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
54 struct nvdimm_bus_descriptor
*nd_desc
)
56 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
59 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
61 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
64 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
67 if (!nd_desc
->provider_name
68 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
71 return to_acpi_device(acpi_desc
->dev
);
74 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
75 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
78 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
79 const struct nd_cmd_desc
*desc
= NULL
;
80 union acpi_object in_obj
, in_buf
, *out_obj
;
81 struct device
*dev
= acpi_desc
->dev
;
82 const char *cmd_name
, *dimm_name
;
83 unsigned long dsm_mask
;
90 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
91 struct acpi_device
*adev
= nfit_mem
->adev
;
95 dimm_name
= nvdimm_name(nvdimm
);
96 cmd_name
= nvdimm_cmd_name(cmd
);
97 dsm_mask
= nfit_mem
->dsm_mask
;
98 desc
= nd_cmd_dimm_desc(cmd
);
99 uuid
= to_nfit_uuid(NFIT_DEV_DIMM
);
100 handle
= adev
->handle
;
102 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
104 cmd_name
= nvdimm_bus_cmd_name(cmd
);
105 dsm_mask
= nd_desc
->dsm_mask
;
106 desc
= nd_cmd_bus_desc(cmd
);
107 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
108 handle
= adev
->handle
;
112 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
115 if (!test_bit(cmd
, &dsm_mask
))
118 in_obj
.type
= ACPI_TYPE_PACKAGE
;
119 in_obj
.package
.count
= 1;
120 in_obj
.package
.elements
= &in_buf
;
121 in_buf
.type
= ACPI_TYPE_BUFFER
;
122 in_buf
.buffer
.pointer
= buf
;
123 in_buf
.buffer
.length
= 0;
125 /* libnvdimm has already validated the input envelope */
126 for (i
= 0; i
< desc
->in_num
; i
++)
127 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
130 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
131 dev_dbg(dev
, "%s:%s cmd: %s input length: %d\n", __func__
,
132 dimm_name
, cmd_name
, in_buf
.buffer
.length
);
133 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
134 4, in_buf
.buffer
.pointer
, min_t(u32
, 128,
135 in_buf
.buffer
.length
), true);
138 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, cmd
, &in_obj
);
140 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
145 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
146 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
147 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
152 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
153 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
154 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
155 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
156 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
157 out_obj
->buffer
.length
), true);
160 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
161 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
162 (u32
*) out_obj
->buffer
.pointer
);
164 if (offset
+ out_size
> out_obj
->buffer
.length
) {
165 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
166 __func__
, dimm_name
, cmd_name
, i
);
170 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
171 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
172 __func__
, dimm_name
, cmd_name
, i
);
176 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
177 out_obj
->buffer
.pointer
+ offset
, out_size
);
180 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
183 * status valid, return the number of bytes left
184 * unfilled in the output buffer
186 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
188 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
189 __func__
, dimm_name
, cmd_name
, buf_len
,
202 static const char *spa_type_name(u16 type
)
204 static const char *to_name
[] = {
205 [NFIT_SPA_VOLATILE
] = "volatile",
206 [NFIT_SPA_PM
] = "pmem",
207 [NFIT_SPA_DCR
] = "dimm-control-region",
208 [NFIT_SPA_BDW
] = "block-data-window",
209 [NFIT_SPA_VDISK
] = "volatile-disk",
210 [NFIT_SPA_VCD
] = "volatile-cd",
211 [NFIT_SPA_PDISK
] = "persistent-disk",
212 [NFIT_SPA_PCD
] = "persistent-cd",
216 if (type
> NFIT_SPA_PCD
)
219 return to_name
[type
];
222 static int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
226 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
227 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
232 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
233 struct nfit_table_prev
*prev
,
234 struct acpi_nfit_system_address
*spa
)
236 struct device
*dev
= acpi_desc
->dev
;
237 struct nfit_spa
*nfit_spa
;
239 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
240 if (memcmp(nfit_spa
->spa
, spa
, sizeof(*spa
)) == 0) {
241 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
246 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
), GFP_KERNEL
);
249 INIT_LIST_HEAD(&nfit_spa
->list
);
251 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
252 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
254 spa_type_name(nfit_spa_type(spa
)));
258 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
259 struct nfit_table_prev
*prev
,
260 struct acpi_nfit_memory_map
*memdev
)
262 struct device
*dev
= acpi_desc
->dev
;
263 struct nfit_memdev
*nfit_memdev
;
265 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
266 if (memcmp(nfit_memdev
->memdev
, memdev
, sizeof(*memdev
)) == 0) {
267 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
271 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
), GFP_KERNEL
);
274 INIT_LIST_HEAD(&nfit_memdev
->list
);
275 nfit_memdev
->memdev
= memdev
;
276 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
277 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
278 __func__
, memdev
->device_handle
, memdev
->range_index
,
279 memdev
->region_index
);
283 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
284 struct nfit_table_prev
*prev
,
285 struct acpi_nfit_control_region
*dcr
)
287 struct device
*dev
= acpi_desc
->dev
;
288 struct nfit_dcr
*nfit_dcr
;
290 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
291 if (memcmp(nfit_dcr
->dcr
, dcr
, sizeof(*dcr
)) == 0) {
292 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
296 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
), GFP_KERNEL
);
299 INIT_LIST_HEAD(&nfit_dcr
->list
);
301 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
302 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
303 dcr
->region_index
, dcr
->windows
);
307 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
308 struct nfit_table_prev
*prev
,
309 struct acpi_nfit_data_region
*bdw
)
311 struct device
*dev
= acpi_desc
->dev
;
312 struct nfit_bdw
*nfit_bdw
;
314 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
315 if (memcmp(nfit_bdw
->bdw
, bdw
, sizeof(*bdw
)) == 0) {
316 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
320 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
), GFP_KERNEL
);
323 INIT_LIST_HEAD(&nfit_bdw
->list
);
325 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
326 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
327 bdw
->region_index
, bdw
->windows
);
331 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
332 struct nfit_table_prev
*prev
,
333 struct acpi_nfit_interleave
*idt
)
335 struct device
*dev
= acpi_desc
->dev
;
336 struct nfit_idt
*nfit_idt
;
338 list_for_each_entry(nfit_idt
, &prev
->idts
, list
)
339 if (memcmp(nfit_idt
->idt
, idt
, sizeof(*idt
)) == 0) {
340 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
344 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
), GFP_KERNEL
);
347 INIT_LIST_HEAD(&nfit_idt
->list
);
349 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
350 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
351 idt
->interleave_index
, idt
->line_count
);
355 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
356 struct nfit_table_prev
*prev
,
357 struct acpi_nfit_flush_address
*flush
)
359 struct device
*dev
= acpi_desc
->dev
;
360 struct nfit_flush
*nfit_flush
;
362 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
)
363 if (memcmp(nfit_flush
->flush
, flush
, sizeof(*flush
)) == 0) {
364 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
368 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
), GFP_KERNEL
);
371 INIT_LIST_HEAD(&nfit_flush
->list
);
372 nfit_flush
->flush
= flush
;
373 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
374 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
375 flush
->device_handle
, flush
->hint_count
);
379 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
380 struct nfit_table_prev
*prev
, void *table
, const void *end
)
382 struct device
*dev
= acpi_desc
->dev
;
383 struct acpi_nfit_header
*hdr
;
384 void *err
= ERR_PTR(-ENOMEM
);
391 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
397 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
398 if (!add_spa(acpi_desc
, prev
, table
))
401 case ACPI_NFIT_TYPE_MEMORY_MAP
:
402 if (!add_memdev(acpi_desc
, prev
, table
))
405 case ACPI_NFIT_TYPE_CONTROL_REGION
:
406 if (!add_dcr(acpi_desc
, prev
, table
))
409 case ACPI_NFIT_TYPE_DATA_REGION
:
410 if (!add_bdw(acpi_desc
, prev
, table
))
413 case ACPI_NFIT_TYPE_INTERLEAVE
:
414 if (!add_idt(acpi_desc
, prev
, table
))
417 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
418 if (!add_flush(acpi_desc
, prev
, table
))
421 case ACPI_NFIT_TYPE_SMBIOS
:
422 dev_dbg(dev
, "%s: smbios\n", __func__
);
425 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
429 return table
+ hdr
->length
;
432 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
433 struct nfit_mem
*nfit_mem
)
435 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
436 u16 dcr
= nfit_mem
->dcr
->region_index
;
437 struct nfit_spa
*nfit_spa
;
439 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
440 u16 range_index
= nfit_spa
->spa
->range_index
;
441 int type
= nfit_spa_type(nfit_spa
->spa
);
442 struct nfit_memdev
*nfit_memdev
;
444 if (type
!= NFIT_SPA_BDW
)
447 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
448 if (nfit_memdev
->memdev
->range_index
!= range_index
)
450 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
452 if (nfit_memdev
->memdev
->region_index
!= dcr
)
455 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
460 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
461 nfit_mem
->spa_dcr
->range_index
);
462 nfit_mem
->bdw
= NULL
;
465 static int nfit_mem_add(struct acpi_nfit_desc
*acpi_desc
,
466 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
468 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
469 struct nfit_memdev
*nfit_memdev
;
470 struct nfit_flush
*nfit_flush
;
471 struct nfit_dcr
*nfit_dcr
;
472 struct nfit_bdw
*nfit_bdw
;
473 struct nfit_idt
*nfit_idt
;
474 u16 idt_idx
, range_index
;
476 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
477 if (nfit_dcr
->dcr
->region_index
!= dcr
)
479 nfit_mem
->dcr
= nfit_dcr
->dcr
;
483 if (!nfit_mem
->dcr
) {
484 dev_dbg(acpi_desc
->dev
, "SPA %d missing:%s%s\n",
485 spa
->range_index
, __to_nfit_memdev(nfit_mem
)
486 ? "" : " MEMDEV", nfit_mem
->dcr
? "" : " DCR");
491 * We've found enough to create an nvdimm, optionally
492 * find an associated BDW
494 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
496 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
497 if (nfit_bdw
->bdw
->region_index
!= dcr
)
499 nfit_mem
->bdw
= nfit_bdw
->bdw
;
506 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
508 if (!nfit_mem
->spa_bdw
)
511 range_index
= nfit_mem
->spa_bdw
->range_index
;
512 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
513 if (nfit_memdev
->memdev
->range_index
!= range_index
||
514 nfit_memdev
->memdev
->region_index
!= dcr
)
516 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
517 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
518 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
519 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
521 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
525 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
526 if (nfit_flush
->flush
->device_handle
!=
527 nfit_memdev
->memdev
->device_handle
)
529 nfit_mem
->nfit_flush
= nfit_flush
;
538 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
539 struct acpi_nfit_system_address
*spa
)
541 struct nfit_mem
*nfit_mem
, *found
;
542 struct nfit_memdev
*nfit_memdev
;
543 int type
= nfit_spa_type(spa
);
554 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
557 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
560 dcr
= nfit_memdev
->memdev
->region_index
;
561 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
562 if (__to_nfit_memdev(nfit_mem
)->region_index
== dcr
) {
570 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
571 sizeof(*nfit_mem
), GFP_KERNEL
);
574 INIT_LIST_HEAD(&nfit_mem
->list
);
577 if (type
== NFIT_SPA_DCR
) {
578 struct nfit_idt
*nfit_idt
;
581 /* multiple dimms may share a SPA when interleaved */
582 nfit_mem
->spa_dcr
= spa
;
583 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
584 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
585 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
586 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
588 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
593 * A single dimm may belong to multiple SPA-PM
594 * ranges, record at least one in addition to
597 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
603 rc
= nfit_mem_add(acpi_desc
, nfit_mem
, spa
);
611 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
613 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
614 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
615 u32 handleA
, handleB
;
617 handleA
= __to_nfit_memdev(a
)->device_handle
;
618 handleB
= __to_nfit_memdev(b
)->device_handle
;
619 if (handleA
< handleB
)
621 else if (handleA
> handleB
)
626 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
628 struct nfit_spa
*nfit_spa
;
631 * For each SPA-DCR or SPA-PMEM address range find its
632 * corresponding MEMDEV(s). From each MEMDEV find the
633 * corresponding DCR. Then, if we're operating on a SPA-DCR,
634 * try to find a SPA-BDW and a corresponding BDW that references
635 * the DCR. Throw it all into an nfit_mem object. Note, that
638 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
641 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
646 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
651 static ssize_t
revision_show(struct device
*dev
,
652 struct device_attribute
*attr
, char *buf
)
654 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
655 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
656 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
658 return sprintf(buf
, "%d\n", acpi_desc
->nfit
->header
.revision
);
660 static DEVICE_ATTR_RO(revision
);
662 static struct attribute
*acpi_nfit_attributes
[] = {
663 &dev_attr_revision
.attr
,
667 static struct attribute_group acpi_nfit_attribute_group
= {
669 .attrs
= acpi_nfit_attributes
,
672 const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
673 &nvdimm_bus_attribute_group
,
674 &acpi_nfit_attribute_group
,
677 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups
);
679 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
681 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
682 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
684 return __to_nfit_memdev(nfit_mem
);
687 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
689 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
690 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
692 return nfit_mem
->dcr
;
695 static ssize_t
handle_show(struct device
*dev
,
696 struct device_attribute
*attr
, char *buf
)
698 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
700 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
702 static DEVICE_ATTR_RO(handle
);
704 static ssize_t
phys_id_show(struct device
*dev
,
705 struct device_attribute
*attr
, char *buf
)
707 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
709 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
711 static DEVICE_ATTR_RO(phys_id
);
713 static ssize_t
vendor_show(struct device
*dev
,
714 struct device_attribute
*attr
, char *buf
)
716 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
718 return sprintf(buf
, "%#x\n", dcr
->vendor_id
);
720 static DEVICE_ATTR_RO(vendor
);
722 static ssize_t
rev_id_show(struct device
*dev
,
723 struct device_attribute
*attr
, char *buf
)
725 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
727 return sprintf(buf
, "%#x\n", dcr
->revision_id
);
729 static DEVICE_ATTR_RO(rev_id
);
731 static ssize_t
device_show(struct device
*dev
,
732 struct device_attribute
*attr
, char *buf
)
734 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
736 return sprintf(buf
, "%#x\n", dcr
->device_id
);
738 static DEVICE_ATTR_RO(device
);
740 static ssize_t
format_show(struct device
*dev
,
741 struct device_attribute
*attr
, char *buf
)
743 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
745 return sprintf(buf
, "%#x\n", dcr
->code
);
747 static DEVICE_ATTR_RO(format
);
749 static ssize_t
serial_show(struct device
*dev
,
750 struct device_attribute
*attr
, char *buf
)
752 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
754 return sprintf(buf
, "%#x\n", dcr
->serial_number
);
756 static DEVICE_ATTR_RO(serial
);
758 static ssize_t
flags_show(struct device
*dev
,
759 struct device_attribute
*attr
, char *buf
)
761 u16 flags
= to_nfit_memdev(dev
)->flags
;
763 return sprintf(buf
, "%s%s%s%s%s\n",
764 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
765 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
766 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
767 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
768 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
770 static DEVICE_ATTR_RO(flags
);
772 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
773 &dev_attr_handle
.attr
,
774 &dev_attr_phys_id
.attr
,
775 &dev_attr_vendor
.attr
,
776 &dev_attr_device
.attr
,
777 &dev_attr_format
.attr
,
778 &dev_attr_serial
.attr
,
779 &dev_attr_rev_id
.attr
,
780 &dev_attr_flags
.attr
,
784 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
785 struct attribute
*a
, int n
)
787 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
789 if (to_nfit_dcr(dev
))
795 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
797 .attrs
= acpi_nfit_dimm_attributes
,
798 .is_visible
= acpi_nfit_dimm_attr_visible
,
801 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
802 &nvdimm_attribute_group
,
803 &nd_device_attribute_group
,
804 &acpi_nfit_dimm_attribute_group
,
808 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
811 struct nfit_mem
*nfit_mem
;
813 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
814 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
815 return nfit_mem
->nvdimm
;
820 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
821 struct nfit_mem
*nfit_mem
, u32 device_handle
)
823 struct acpi_device
*adev
, *adev_dimm
;
824 struct device
*dev
= acpi_desc
->dev
;
825 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_DIMM
);
828 nfit_mem
->dsm_mask
= acpi_desc
->dimm_dsm_force_en
;
829 adev
= to_acpi_dev(acpi_desc
);
833 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
834 nfit_mem
->adev
= adev_dimm
;
836 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
838 return force_enable_dimms
? 0 : -ENODEV
;
841 for (i
= ND_CMD_SMART
; i
<= ND_CMD_VENDOR
; i
++)
842 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
843 set_bit(i
, &nfit_mem
->dsm_mask
);
848 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
850 struct nfit_mem
*nfit_mem
;
853 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
854 struct nvdimm
*nvdimm
;
855 unsigned long flags
= 0;
860 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
861 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
867 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
868 flags
|= NDD_ALIASING
;
870 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
871 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
872 flags
|= NDD_UNARMED
;
874 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
878 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
879 acpi_nfit_dimm_attribute_groups
,
880 flags
, &nfit_mem
->dsm_mask
);
884 nfit_mem
->nvdimm
= nvdimm
;
887 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
890 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
892 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
893 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
894 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
895 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
899 return nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
902 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
904 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
905 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
906 struct acpi_device
*adev
;
909 nd_desc
->dsm_mask
= acpi_desc
->bus_dsm_force_en
;
910 adev
= to_acpi_dev(acpi_desc
);
914 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_ARS_STATUS
; i
++)
915 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
916 set_bit(i
, &nd_desc
->dsm_mask
);
919 static ssize_t
range_index_show(struct device
*dev
,
920 struct device_attribute
*attr
, char *buf
)
922 struct nd_region
*nd_region
= to_nd_region(dev
);
923 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
925 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
927 static DEVICE_ATTR_RO(range_index
);
929 static struct attribute
*acpi_nfit_region_attributes
[] = {
930 &dev_attr_range_index
.attr
,
934 static struct attribute_group acpi_nfit_region_attribute_group
= {
936 .attrs
= acpi_nfit_region_attributes
,
939 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
940 &nd_region_attribute_group
,
941 &nd_mapping_attribute_group
,
942 &nd_device_attribute_group
,
943 &nd_numa_attribute_group
,
944 &acpi_nfit_region_attribute_group
,
948 /* enough info to uniquely specify an interleave set */
949 struct nfit_set_info
{
950 struct nfit_set_info_map
{
957 static size_t sizeof_nfit_set_info(int num_mappings
)
959 return sizeof(struct nfit_set_info
)
960 + num_mappings
* sizeof(struct nfit_set_info_map
);
963 static int cmp_map(const void *m0
, const void *m1
)
965 const struct nfit_set_info_map
*map0
= m0
;
966 const struct nfit_set_info_map
*map1
= m1
;
968 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
972 /* Retrieve the nth entry referencing this spa */
973 static struct acpi_nfit_memory_map
*memdev_from_spa(
974 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
976 struct nfit_memdev
*nfit_memdev
;
978 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
979 if (nfit_memdev
->memdev
->range_index
== range_index
)
981 return nfit_memdev
->memdev
;
985 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
986 struct nd_region_desc
*ndr_desc
,
987 struct acpi_nfit_system_address
*spa
)
989 int i
, spa_type
= nfit_spa_type(spa
);
990 struct device
*dev
= acpi_desc
->dev
;
991 struct nd_interleave_set
*nd_set
;
992 u16 nr
= ndr_desc
->num_mappings
;
993 struct nfit_set_info
*info
;
995 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1000 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1004 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1007 for (i
= 0; i
< nr
; i
++) {
1008 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
1009 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1010 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1011 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1012 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1013 spa
->range_index
, i
);
1015 if (!memdev
|| !nfit_mem
->dcr
) {
1016 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1020 map
->region_offset
= memdev
->region_offset
;
1021 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1024 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1026 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1027 ndr_desc
->nd_set
= nd_set
;
1028 devm_kfree(dev
, info
);
1033 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1035 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1036 u32 sub_line_offset
, line_index
, line_offset
;
1037 u64 line_no
, table_skip_count
, table_offset
;
1039 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1040 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1041 line_offset
= idt
->line_offset
[line_index
]
1043 table_offset
= table_skip_count
* mmio
->table_size
;
1045 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1048 static void wmb_blk(struct nfit_blk
*nfit_blk
)
1051 if (nfit_blk
->nvdimm_flush
) {
1053 * The first wmb() is needed to 'sfence' all previous writes
1054 * such that they are architecturally visible for the platform
1055 * buffer flush. Note that we've already arranged for pmem
1056 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1057 * final wmb() ensures ordering for the NVDIMM flush write.
1060 writeq(1, nfit_blk
->nvdimm_flush
);
1066 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1068 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1069 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1071 if (mmio
->num_lines
)
1072 offset
= to_interleave_offset(offset
, mmio
);
1074 return readl(mmio
->addr
.base
+ offset
);
1077 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1078 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1081 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1084 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1086 BCW_LEN_MASK
= (1ULL << 8) - 1,
1090 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1091 len
= len
>> L1_CACHE_SHIFT
;
1092 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1093 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1095 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1096 if (mmio
->num_lines
)
1097 offset
= to_interleave_offset(offset
, mmio
);
1099 writeq(cmd
, mmio
->addr
.base
+ offset
);
1102 if (nfit_blk
->dimm_flags
& ND_BLK_DCR_LATCH
)
1103 readq(mmio
->addr
.base
+ offset
);
1106 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1107 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1110 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1111 unsigned int copied
= 0;
1115 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1116 + lane
* mmio
->size
;
1117 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1122 if (mmio
->num_lines
) {
1125 offset
= to_interleave_offset(base_offset
+ copied
,
1127 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1128 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1130 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1135 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1138 if (nfit_blk
->dimm_flags
& ND_BLK_READ_FLUSH
)
1139 mmio_flush_range((void __force
*)
1140 mmio
->addr
.aperture
+ offset
, c
);
1142 memcpy_from_pmem(iobuf
+ copied
,
1143 mmio
->addr
.aperture
+ offset
, c
);
1153 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1157 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1158 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1160 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1161 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1162 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1163 unsigned int lane
, copied
= 0;
1166 lane
= nd_region_acquire_lane(nd_region
);
1168 u64 c
= min(len
, mmio
->size
);
1170 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1171 iobuf
+ copied
, c
, rw
, lane
);
1178 nd_region_release_lane(nd_region
, lane
);
1183 static void nfit_spa_mapping_release(struct kref
*kref
)
1185 struct nfit_spa_mapping
*spa_map
= to_spa_map(kref
);
1186 struct acpi_nfit_system_address
*spa
= spa_map
->spa
;
1187 struct acpi_nfit_desc
*acpi_desc
= spa_map
->acpi_desc
;
1189 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1190 dev_dbg(acpi_desc
->dev
, "%s: SPA%d\n", __func__
, spa
->range_index
);
1191 if (spa_map
->type
== SPA_MAP_APERTURE
)
1192 memunmap((void __force
*)spa_map
->addr
.aperture
);
1194 iounmap(spa_map
->addr
.base
);
1195 release_mem_region(spa
->address
, spa
->length
);
1196 list_del(&spa_map
->list
);
1200 static struct nfit_spa_mapping
*find_spa_mapping(
1201 struct acpi_nfit_desc
*acpi_desc
,
1202 struct acpi_nfit_system_address
*spa
)
1204 struct nfit_spa_mapping
*spa_map
;
1206 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1207 list_for_each_entry(spa_map
, &acpi_desc
->spa_maps
, list
)
1208 if (spa_map
->spa
== spa
)
1214 static void nfit_spa_unmap(struct acpi_nfit_desc
*acpi_desc
,
1215 struct acpi_nfit_system_address
*spa
)
1217 struct nfit_spa_mapping
*spa_map
;
1219 mutex_lock(&acpi_desc
->spa_map_mutex
);
1220 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1223 kref_put(&spa_map
->kref
, nfit_spa_mapping_release
);
1224 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1227 static void __iomem
*__nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1228 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1230 resource_size_t start
= spa
->address
;
1231 resource_size_t n
= spa
->length
;
1232 struct nfit_spa_mapping
*spa_map
;
1233 struct resource
*res
;
1235 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1237 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1239 kref_get(&spa_map
->kref
);
1240 return spa_map
->addr
.base
;
1243 spa_map
= kzalloc(sizeof(*spa_map
), GFP_KERNEL
);
1247 INIT_LIST_HEAD(&spa_map
->list
);
1249 kref_init(&spa_map
->kref
);
1250 spa_map
->acpi_desc
= acpi_desc
;
1252 res
= request_mem_region(start
, n
, dev_name(acpi_desc
->dev
));
1256 spa_map
->type
= type
;
1257 if (type
== SPA_MAP_APERTURE
)
1258 spa_map
->addr
.aperture
= (void __pmem
*)memremap(start
, n
,
1259 ARCH_MEMREMAP_PMEM
);
1261 spa_map
->addr
.base
= ioremap_nocache(start
, n
);
1264 if (!spa_map
->addr
.base
)
1267 list_add_tail(&spa_map
->list
, &acpi_desc
->spa_maps
);
1268 return spa_map
->addr
.base
;
1271 release_mem_region(start
, n
);
1278 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1279 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1280 * @nfit_spa: spa table to map
1281 * @type: aperture or control region
1283 * In the case where block-data-window apertures and
1284 * dimm-control-regions are interleaved they will end up sharing a
1285 * single request_mem_region() + ioremap() for the address range. In
1286 * the style of devm nfit_spa_map() mappings are automatically dropped
1287 * when all region devices referencing the same mapping are disabled /
1290 static void __iomem
*nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1291 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1293 void __iomem
*iomem
;
1295 mutex_lock(&acpi_desc
->spa_map_mutex
);
1296 iomem
= __nfit_spa_map(acpi_desc
, spa
, type
);
1297 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1302 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1303 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1306 mmio
->num_lines
= idt
->line_count
;
1307 mmio
->line_size
= idt
->line_size
;
1308 if (interleave_ways
== 0)
1310 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1317 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1318 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1320 struct nd_cmd_dimm_flags flags
;
1323 memset(&flags
, 0, sizeof(flags
));
1324 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1327 if (rc
>= 0 && flags
.status
== 0)
1328 nfit_blk
->dimm_flags
= flags
.flags
;
1329 else if (rc
== -ENOTTY
) {
1330 /* fall back to a conservative default */
1331 nfit_blk
->dimm_flags
= ND_BLK_DCR_LATCH
| ND_BLK_READ_FLUSH
;
1339 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1342 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1343 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1344 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1345 struct nfit_flush
*nfit_flush
;
1346 struct nfit_blk_mmio
*mmio
;
1347 struct nfit_blk
*nfit_blk
;
1348 struct nfit_mem
*nfit_mem
;
1349 struct nvdimm
*nvdimm
;
1352 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1353 nfit_mem
= nvdimm_provider_data(nvdimm
);
1354 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1355 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1356 nfit_mem
? "" : " nfit_mem",
1357 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1358 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1362 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1365 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1366 nfit_blk
->nd_region
= to_nd_region(dev
);
1368 /* map block aperture memory */
1369 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1370 mmio
= &nfit_blk
->mmio
[BDW
];
1371 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_bdw
,
1373 if (!mmio
->addr
.base
) {
1374 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1375 nvdimm_name(nvdimm
));
1378 mmio
->size
= nfit_mem
->bdw
->size
;
1379 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1380 mmio
->idt
= nfit_mem
->idt_bdw
;
1381 mmio
->spa
= nfit_mem
->spa_bdw
;
1382 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1383 nfit_mem
->memdev_bdw
->interleave_ways
);
1385 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1386 __func__
, nvdimm_name(nvdimm
));
1390 /* map block control memory */
1391 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1392 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1393 mmio
= &nfit_blk
->mmio
[DCR
];
1394 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_dcr
,
1396 if (!mmio
->addr
.base
) {
1397 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1398 nvdimm_name(nvdimm
));
1401 mmio
->size
= nfit_mem
->dcr
->window_size
;
1402 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1403 mmio
->idt
= nfit_mem
->idt_dcr
;
1404 mmio
->spa
= nfit_mem
->spa_dcr
;
1405 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1406 nfit_mem
->memdev_dcr
->interleave_ways
);
1408 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1409 __func__
, nvdimm_name(nvdimm
));
1413 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1415 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1416 __func__
, nvdimm_name(nvdimm
));
1420 nfit_flush
= nfit_mem
->nfit_flush
;
1421 if (nfit_flush
&& nfit_flush
->flush
->hint_count
!= 0) {
1422 nfit_blk
->nvdimm_flush
= devm_ioremap_nocache(dev
,
1423 nfit_flush
->flush
->hint_address
[0], 8);
1424 if (!nfit_blk
->nvdimm_flush
)
1428 if (!arch_has_wmb_pmem() && !nfit_blk
->nvdimm_flush
)
1429 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1431 if (mmio
->line_size
== 0)
1434 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1435 + 8 > mmio
->line_size
) {
1436 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1438 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1439 + 8 > mmio
->line_size
) {
1440 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1447 static void acpi_nfit_blk_region_disable(struct nvdimm_bus
*nvdimm_bus
,
1450 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1451 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1452 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1453 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1457 return; /* never enabled */
1459 /* auto-free BLK spa mappings */
1460 for (i
= 0; i
< 2; i
++) {
1461 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[i
];
1463 if (mmio
->addr
.base
)
1464 nfit_spa_unmap(acpi_desc
, mmio
->spa
);
1466 nd_blk_region_set_provider_data(ndbr
, NULL
);
1467 /* devm will free nfit_blk */
1470 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
1471 struct nd_mapping
*nd_mapping
, struct nd_region_desc
*ndr_desc
,
1472 struct acpi_nfit_memory_map
*memdev
,
1473 struct acpi_nfit_system_address
*spa
)
1475 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
1476 memdev
->device_handle
);
1477 struct nd_blk_region_desc
*ndbr_desc
;
1478 struct nfit_mem
*nfit_mem
;
1482 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
1483 spa
->range_index
, memdev
->device_handle
);
1487 nd_mapping
->nvdimm
= nvdimm
;
1488 switch (nfit_spa_type(spa
)) {
1490 case NFIT_SPA_VOLATILE
:
1491 nd_mapping
->start
= memdev
->address
;
1492 nd_mapping
->size
= memdev
->region_size
;
1495 nfit_mem
= nvdimm_provider_data(nvdimm
);
1496 if (!nfit_mem
|| !nfit_mem
->bdw
) {
1497 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
1498 spa
->range_index
, nvdimm_name(nvdimm
));
1500 nd_mapping
->size
= nfit_mem
->bdw
->capacity
;
1501 nd_mapping
->start
= nfit_mem
->bdw
->start_address
;
1502 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
1506 ndr_desc
->nd_mapping
= nd_mapping
;
1507 ndr_desc
->num_mappings
= blk_valid
;
1508 ndbr_desc
= to_blk_region_desc(ndr_desc
);
1509 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
1510 ndbr_desc
->disable
= acpi_nfit_blk_region_disable
;
1511 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
1512 if (!nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
, ndr_desc
))
1520 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
1521 struct nfit_spa
*nfit_spa
)
1523 static struct nd_mapping nd_mappings
[ND_MAX_MAPPINGS
];
1524 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1525 struct nd_blk_region_desc ndbr_desc
;
1526 struct nd_region_desc
*ndr_desc
;
1527 struct nfit_memdev
*nfit_memdev
;
1528 struct nvdimm_bus
*nvdimm_bus
;
1529 struct resource res
;
1532 if (nfit_spa
->is_registered
)
1535 if (spa
->range_index
== 0) {
1536 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
1541 memset(&res
, 0, sizeof(res
));
1542 memset(&nd_mappings
, 0, sizeof(nd_mappings
));
1543 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
1544 res
.start
= spa
->address
;
1545 res
.end
= res
.start
+ spa
->length
- 1;
1546 ndr_desc
= &ndbr_desc
.ndr_desc
;
1547 ndr_desc
->res
= &res
;
1548 ndr_desc
->provider_data
= nfit_spa
;
1549 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
1550 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
1551 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
1552 spa
->proximity_domain
);
1554 ndr_desc
->numa_node
= NUMA_NO_NODE
;
1556 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1557 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1558 struct nd_mapping
*nd_mapping
;
1560 if (memdev
->range_index
!= spa
->range_index
)
1562 if (count
>= ND_MAX_MAPPINGS
) {
1563 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
1564 spa
->range_index
, ND_MAX_MAPPINGS
);
1567 nd_mapping
= &nd_mappings
[count
++];
1568 rc
= acpi_nfit_init_mapping(acpi_desc
, nd_mapping
, ndr_desc
,
1574 ndr_desc
->nd_mapping
= nd_mappings
;
1575 ndr_desc
->num_mappings
= count
;
1576 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
1580 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
1581 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
1582 if (!nvdimm_pmem_region_create(nvdimm_bus
, ndr_desc
))
1584 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
1585 if (!nvdimm_volatile_region_create(nvdimm_bus
, ndr_desc
))
1589 nfit_spa
->is_registered
= 1;
1593 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
1595 struct nfit_spa
*nfit_spa
;
1597 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
1598 int rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
1606 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
1607 struct nfit_table_prev
*prev
)
1609 struct device
*dev
= acpi_desc
->dev
;
1611 if (!list_empty(&prev
->spas
) ||
1612 !list_empty(&prev
->memdevs
) ||
1613 !list_empty(&prev
->dcrs
) ||
1614 !list_empty(&prev
->bdws
) ||
1615 !list_empty(&prev
->idts
) ||
1616 !list_empty(&prev
->flushes
)) {
1617 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
1623 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, acpi_size sz
)
1625 struct device
*dev
= acpi_desc
->dev
;
1626 struct nfit_table_prev prev
;
1631 mutex_lock(&acpi_desc
->init_mutex
);
1633 INIT_LIST_HEAD(&prev
.spas
);
1634 INIT_LIST_HEAD(&prev
.memdevs
);
1635 INIT_LIST_HEAD(&prev
.dcrs
);
1636 INIT_LIST_HEAD(&prev
.bdws
);
1637 INIT_LIST_HEAD(&prev
.idts
);
1638 INIT_LIST_HEAD(&prev
.flushes
);
1640 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
1641 acpi_desc
->spas
.prev
);
1642 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
1643 acpi_desc
->memdevs
.prev
);
1644 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
1645 acpi_desc
->dcrs
.prev
);
1646 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
1647 acpi_desc
->bdws
.prev
);
1648 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
1649 acpi_desc
->idts
.prev
);
1650 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
1651 acpi_desc
->flushes
.prev
);
1653 data
= (u8
*) acpi_desc
->nfit
;
1655 data
+= sizeof(struct acpi_table_nfit
);
1656 while (!IS_ERR_OR_NULL(data
))
1657 data
= add_table(acpi_desc
, &prev
, data
, end
);
1660 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
1666 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
1670 if (nfit_mem_init(acpi_desc
) != 0) {
1675 acpi_nfit_init_dsms(acpi_desc
);
1677 rc
= acpi_nfit_register_dimms(acpi_desc
);
1681 rc
= acpi_nfit_register_regions(acpi_desc
);
1684 mutex_unlock(&acpi_desc
->init_mutex
);
1687 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
1689 static struct acpi_nfit_desc
*acpi_nfit_desc_init(struct acpi_device
*adev
)
1691 struct nvdimm_bus_descriptor
*nd_desc
;
1692 struct acpi_nfit_desc
*acpi_desc
;
1693 struct device
*dev
= &adev
->dev
;
1695 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
1697 return ERR_PTR(-ENOMEM
);
1699 dev_set_drvdata(dev
, acpi_desc
);
1700 acpi_desc
->dev
= dev
;
1701 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
1702 nd_desc
= &acpi_desc
->nd_desc
;
1703 nd_desc
->provider_name
= "ACPI.NFIT";
1704 nd_desc
->ndctl
= acpi_nfit_ctl
;
1705 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
1707 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
, nd_desc
);
1708 if (!acpi_desc
->nvdimm_bus
) {
1709 devm_kfree(dev
, acpi_desc
);
1710 return ERR_PTR(-ENXIO
);
1713 INIT_LIST_HEAD(&acpi_desc
->spa_maps
);
1714 INIT_LIST_HEAD(&acpi_desc
->spas
);
1715 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
1716 INIT_LIST_HEAD(&acpi_desc
->bdws
);
1717 INIT_LIST_HEAD(&acpi_desc
->idts
);
1718 INIT_LIST_HEAD(&acpi_desc
->flushes
);
1719 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
1720 INIT_LIST_HEAD(&acpi_desc
->dimms
);
1721 mutex_init(&acpi_desc
->spa_map_mutex
);
1722 mutex_init(&acpi_desc
->init_mutex
);
1727 static int acpi_nfit_add(struct acpi_device
*adev
)
1729 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1730 struct acpi_nfit_desc
*acpi_desc
;
1731 struct device
*dev
= &adev
->dev
;
1732 struct acpi_table_header
*tbl
;
1733 acpi_status status
= AE_OK
;
1737 status
= acpi_get_table_with_size("NFIT", 0, &tbl
, &sz
);
1738 if (ACPI_FAILURE(status
)) {
1739 /* This is ok, we could have an nvdimm hotplugged later */
1740 dev_dbg(dev
, "failed to find NFIT at startup\n");
1744 acpi_desc
= acpi_nfit_desc_init(adev
);
1745 if (IS_ERR(acpi_desc
)) {
1746 dev_err(dev
, "%s: error initializing acpi_desc: %ld\n",
1747 __func__
, PTR_ERR(acpi_desc
));
1748 return PTR_ERR(acpi_desc
);
1751 acpi_desc
->nfit
= (struct acpi_table_nfit
*) tbl
;
1753 /* Evaluate _FIT and override with that if present */
1754 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
1755 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
1756 acpi_desc
->nfit
= (struct acpi_table_nfit
*)buf
.pointer
;
1760 rc
= acpi_nfit_init(acpi_desc
, sz
);
1762 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
1768 static int acpi_nfit_remove(struct acpi_device
*adev
)
1770 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
1772 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
1776 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
1778 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
1779 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1780 struct acpi_table_nfit
*nfit_saved
;
1781 struct device
*dev
= &adev
->dev
;
1785 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
1789 /* dev->driver may be null if we're being removed */
1790 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
1795 acpi_desc
= acpi_nfit_desc_init(adev
);
1796 if (IS_ERR(acpi_desc
)) {
1797 dev_err(dev
, "%s: error initializing acpi_desc: %ld\n",
1798 __func__
, PTR_ERR(acpi_desc
));
1804 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
1805 if (ACPI_FAILURE(status
)) {
1806 dev_err(dev
, "failed to evaluate _FIT\n");
1810 nfit_saved
= acpi_desc
->nfit
;
1811 acpi_desc
->nfit
= (struct acpi_table_nfit
*)buf
.pointer
;
1812 ret
= acpi_nfit_init(acpi_desc
, buf
.length
);
1814 /* Merge failed, restore old nfit, and exit */
1815 acpi_desc
->nfit
= nfit_saved
;
1816 dev_err(dev
, "failed to merge updated NFIT\n");
1824 static const struct acpi_device_id acpi_nfit_ids
[] = {
1828 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
1830 static struct acpi_driver acpi_nfit_driver
= {
1831 .name
= KBUILD_MODNAME
,
1832 .ids
= acpi_nfit_ids
,
1834 .add
= acpi_nfit_add
,
1835 .remove
= acpi_nfit_remove
,
1836 .notify
= acpi_nfit_notify
,
1840 static __init
int nfit_init(void)
1842 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
1843 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
1844 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
1845 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
1846 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
1847 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
1848 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
1850 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
1851 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
1852 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
1853 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
1854 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
1855 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
1856 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
1857 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
1858 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
1859 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
1861 return acpi_bus_register_driver(&acpi_nfit_driver
);
1864 static __exit
void nfit_exit(void)
1866 acpi_bus_unregister_driver(&acpi_nfit_driver
);
1869 module_init(nfit_init
);
1870 module_exit(nfit_exit
);
1871 MODULE_LICENSE("GPL v2");
1872 MODULE_AUTHOR("Intel Corporation");