2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/list.h>
19 #include <linux/acpi.h>
20 #include <linux/sort.h>
21 #include <linux/pmem.h>
23 #include <asm/cacheflush.h>
27 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
30 #include <linux/io-64-nonatomic-hi-lo.h>
32 static bool force_enable_dimms
;
33 module_param(force_enable_dimms
, bool, S_IRUGO
|S_IWUSR
);
34 MODULE_PARM_DESC(force_enable_dimms
, "Ignore _STA (ACPI DIMM device) status");
36 struct nfit_table_prev
{
37 struct list_head spas
;
38 struct list_head memdevs
;
39 struct list_head dcrs
;
40 struct list_head bdws
;
41 struct list_head idts
;
42 struct list_head flushes
;
45 static u8 nfit_uuid
[NFIT_UUID_MAX
][16];
47 const u8
*to_nfit_uuid(enum nfit_uuids id
)
51 EXPORT_SYMBOL(to_nfit_uuid
);
53 static struct acpi_nfit_desc
*to_acpi_nfit_desc(
54 struct nvdimm_bus_descriptor
*nd_desc
)
56 return container_of(nd_desc
, struct acpi_nfit_desc
, nd_desc
);
59 static struct acpi_device
*to_acpi_dev(struct acpi_nfit_desc
*acpi_desc
)
61 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
64 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
67 if (!nd_desc
->provider_name
68 || strcmp(nd_desc
->provider_name
, "ACPI.NFIT") != 0)
71 return to_acpi_device(acpi_desc
->dev
);
74 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
75 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
78 struct acpi_nfit_desc
*acpi_desc
= to_acpi_nfit_desc(nd_desc
);
79 const struct nd_cmd_desc
*desc
= NULL
;
80 union acpi_object in_obj
, in_buf
, *out_obj
;
81 struct device
*dev
= acpi_desc
->dev
;
82 const char *cmd_name
, *dimm_name
;
83 unsigned long dsm_mask
;
90 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
91 struct acpi_device
*adev
= nfit_mem
->adev
;
95 dimm_name
= nvdimm_name(nvdimm
);
96 cmd_name
= nvdimm_cmd_name(cmd
);
97 dsm_mask
= nfit_mem
->dsm_mask
;
98 desc
= nd_cmd_dimm_desc(cmd
);
99 uuid
= to_nfit_uuid(NFIT_DEV_DIMM
);
100 handle
= adev
->handle
;
102 struct acpi_device
*adev
= to_acpi_dev(acpi_desc
);
104 cmd_name
= nvdimm_bus_cmd_name(cmd
);
105 dsm_mask
= nd_desc
->dsm_mask
;
106 desc
= nd_cmd_bus_desc(cmd
);
107 uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
108 handle
= adev
->handle
;
112 if (!desc
|| (cmd
&& (desc
->out_num
+ desc
->in_num
== 0)))
115 if (!test_bit(cmd
, &dsm_mask
))
118 in_obj
.type
= ACPI_TYPE_PACKAGE
;
119 in_obj
.package
.count
= 1;
120 in_obj
.package
.elements
= &in_buf
;
121 in_buf
.type
= ACPI_TYPE_BUFFER
;
122 in_buf
.buffer
.pointer
= buf
;
123 in_buf
.buffer
.length
= 0;
125 /* libnvdimm has already validated the input envelope */
126 for (i
= 0; i
< desc
->in_num
; i
++)
127 in_buf
.buffer
.length
+= nd_cmd_in_size(nvdimm
, cmd
, desc
,
130 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
131 dev_dbg(dev
, "%s:%s cmd: %s input length: %d\n", __func__
,
132 dimm_name
, cmd_name
, in_buf
.buffer
.length
);
133 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
134 4, in_buf
.buffer
.pointer
, min_t(u32
, 128,
135 in_buf
.buffer
.length
), true);
138 out_obj
= acpi_evaluate_dsm(handle
, uuid
, 1, cmd
, &in_obj
);
140 dev_dbg(dev
, "%s:%s _DSM failed cmd: %s\n", __func__
, dimm_name
,
145 if (out_obj
->package
.type
!= ACPI_TYPE_BUFFER
) {
146 dev_dbg(dev
, "%s:%s unexpected output object type cmd: %s type: %d\n",
147 __func__
, dimm_name
, cmd_name
, out_obj
->type
);
152 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG
)) {
153 dev_dbg(dev
, "%s:%s cmd: %s output length: %d\n", __func__
,
154 dimm_name
, cmd_name
, out_obj
->buffer
.length
);
155 print_hex_dump_debug(cmd_name
, DUMP_PREFIX_OFFSET
, 4,
156 4, out_obj
->buffer
.pointer
, min_t(u32
, 128,
157 out_obj
->buffer
.length
), true);
160 for (i
= 0, offset
= 0; i
< desc
->out_num
; i
++) {
161 u32 out_size
= nd_cmd_out_size(nvdimm
, cmd
, desc
, i
, buf
,
162 (u32
*) out_obj
->buffer
.pointer
);
164 if (offset
+ out_size
> out_obj
->buffer
.length
) {
165 dev_dbg(dev
, "%s:%s output object underflow cmd: %s field: %d\n",
166 __func__
, dimm_name
, cmd_name
, i
);
170 if (in_buf
.buffer
.length
+ offset
+ out_size
> buf_len
) {
171 dev_dbg(dev
, "%s:%s output overrun cmd: %s field: %d\n",
172 __func__
, dimm_name
, cmd_name
, i
);
176 memcpy(buf
+ in_buf
.buffer
.length
+ offset
,
177 out_obj
->buffer
.pointer
+ offset
, out_size
);
180 if (offset
+ in_buf
.buffer
.length
< buf_len
) {
183 * status valid, return the number of bytes left
184 * unfilled in the output buffer
186 rc
= buf_len
- offset
- in_buf
.buffer
.length
;
188 dev_err(dev
, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
189 __func__
, dimm_name
, cmd_name
, buf_len
,
202 static const char *spa_type_name(u16 type
)
204 static const char *to_name
[] = {
205 [NFIT_SPA_VOLATILE
] = "volatile",
206 [NFIT_SPA_PM
] = "pmem",
207 [NFIT_SPA_DCR
] = "dimm-control-region",
208 [NFIT_SPA_BDW
] = "block-data-window",
209 [NFIT_SPA_VDISK
] = "volatile-disk",
210 [NFIT_SPA_VCD
] = "volatile-cd",
211 [NFIT_SPA_PDISK
] = "persistent-disk",
212 [NFIT_SPA_PCD
] = "persistent-cd",
216 if (type
> NFIT_SPA_PCD
)
219 return to_name
[type
];
222 static int nfit_spa_type(struct acpi_nfit_system_address
*spa
)
226 for (i
= 0; i
< NFIT_UUID_MAX
; i
++)
227 if (memcmp(to_nfit_uuid(i
), spa
->range_guid
, 16) == 0)
232 static bool add_spa(struct acpi_nfit_desc
*acpi_desc
,
233 struct nfit_table_prev
*prev
,
234 struct acpi_nfit_system_address
*spa
)
236 size_t length
= min_t(size_t, sizeof(*spa
), spa
->header
.length
);
237 struct device
*dev
= acpi_desc
->dev
;
238 struct nfit_spa
*nfit_spa
;
240 list_for_each_entry(nfit_spa
, &prev
->spas
, list
) {
241 if (memcmp(nfit_spa
->spa
, spa
, length
) == 0) {
242 list_move_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
247 nfit_spa
= devm_kzalloc(dev
, sizeof(*nfit_spa
), GFP_KERNEL
);
250 INIT_LIST_HEAD(&nfit_spa
->list
);
252 list_add_tail(&nfit_spa
->list
, &acpi_desc
->spas
);
253 dev_dbg(dev
, "%s: spa index: %d type: %s\n", __func__
,
255 spa_type_name(nfit_spa_type(spa
)));
259 static bool add_memdev(struct acpi_nfit_desc
*acpi_desc
,
260 struct nfit_table_prev
*prev
,
261 struct acpi_nfit_memory_map
*memdev
)
263 size_t length
= min_t(size_t, sizeof(*memdev
), memdev
->header
.length
);
264 struct device
*dev
= acpi_desc
->dev
;
265 struct nfit_memdev
*nfit_memdev
;
267 list_for_each_entry(nfit_memdev
, &prev
->memdevs
, list
)
268 if (memcmp(nfit_memdev
->memdev
, memdev
, length
) == 0) {
269 list_move_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
273 nfit_memdev
= devm_kzalloc(dev
, sizeof(*nfit_memdev
), GFP_KERNEL
);
276 INIT_LIST_HEAD(&nfit_memdev
->list
);
277 nfit_memdev
->memdev
= memdev
;
278 list_add_tail(&nfit_memdev
->list
, &acpi_desc
->memdevs
);
279 dev_dbg(dev
, "%s: memdev handle: %#x spa: %d dcr: %d\n",
280 __func__
, memdev
->device_handle
, memdev
->range_index
,
281 memdev
->region_index
);
285 static bool add_dcr(struct acpi_nfit_desc
*acpi_desc
,
286 struct nfit_table_prev
*prev
,
287 struct acpi_nfit_control_region
*dcr
)
289 size_t length
= min_t(size_t, sizeof(*dcr
), dcr
->header
.length
);
290 struct device
*dev
= acpi_desc
->dev
;
291 struct nfit_dcr
*nfit_dcr
;
293 list_for_each_entry(nfit_dcr
, &prev
->dcrs
, list
)
294 if (memcmp(nfit_dcr
->dcr
, dcr
, length
) == 0) {
295 list_move_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
299 nfit_dcr
= devm_kzalloc(dev
, sizeof(*nfit_dcr
), GFP_KERNEL
);
302 INIT_LIST_HEAD(&nfit_dcr
->list
);
304 list_add_tail(&nfit_dcr
->list
, &acpi_desc
->dcrs
);
305 dev_dbg(dev
, "%s: dcr index: %d windows: %d\n", __func__
,
306 dcr
->region_index
, dcr
->windows
);
310 static bool add_bdw(struct acpi_nfit_desc
*acpi_desc
,
311 struct nfit_table_prev
*prev
,
312 struct acpi_nfit_data_region
*bdw
)
314 size_t length
= min_t(size_t, sizeof(*bdw
), bdw
->header
.length
);
315 struct device
*dev
= acpi_desc
->dev
;
316 struct nfit_bdw
*nfit_bdw
;
318 list_for_each_entry(nfit_bdw
, &prev
->bdws
, list
)
319 if (memcmp(nfit_bdw
->bdw
, bdw
, length
) == 0) {
320 list_move_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
324 nfit_bdw
= devm_kzalloc(dev
, sizeof(*nfit_bdw
), GFP_KERNEL
);
327 INIT_LIST_HEAD(&nfit_bdw
->list
);
329 list_add_tail(&nfit_bdw
->list
, &acpi_desc
->bdws
);
330 dev_dbg(dev
, "%s: bdw dcr: %d windows: %d\n", __func__
,
331 bdw
->region_index
, bdw
->windows
);
335 static bool add_idt(struct acpi_nfit_desc
*acpi_desc
,
336 struct nfit_table_prev
*prev
,
337 struct acpi_nfit_interleave
*idt
)
339 size_t length
= min_t(size_t, sizeof(*idt
), idt
->header
.length
);
340 struct device
*dev
= acpi_desc
->dev
;
341 struct nfit_idt
*nfit_idt
;
343 list_for_each_entry(nfit_idt
, &prev
->idts
, list
)
344 if (memcmp(nfit_idt
->idt
, idt
, length
) == 0) {
345 list_move_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
349 nfit_idt
= devm_kzalloc(dev
, sizeof(*nfit_idt
), GFP_KERNEL
);
352 INIT_LIST_HEAD(&nfit_idt
->list
);
354 list_add_tail(&nfit_idt
->list
, &acpi_desc
->idts
);
355 dev_dbg(dev
, "%s: idt index: %d num_lines: %d\n", __func__
,
356 idt
->interleave_index
, idt
->line_count
);
360 static bool add_flush(struct acpi_nfit_desc
*acpi_desc
,
361 struct nfit_table_prev
*prev
,
362 struct acpi_nfit_flush_address
*flush
)
364 size_t length
= min_t(size_t, sizeof(*flush
), flush
->header
.length
);
365 struct device
*dev
= acpi_desc
->dev
;
366 struct nfit_flush
*nfit_flush
;
368 list_for_each_entry(nfit_flush
, &prev
->flushes
, list
)
369 if (memcmp(nfit_flush
->flush
, flush
, length
) == 0) {
370 list_move_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
374 nfit_flush
= devm_kzalloc(dev
, sizeof(*nfit_flush
), GFP_KERNEL
);
377 INIT_LIST_HEAD(&nfit_flush
->list
);
378 nfit_flush
->flush
= flush
;
379 list_add_tail(&nfit_flush
->list
, &acpi_desc
->flushes
);
380 dev_dbg(dev
, "%s: nfit_flush handle: %d hint_count: %d\n", __func__
,
381 flush
->device_handle
, flush
->hint_count
);
385 static void *add_table(struct acpi_nfit_desc
*acpi_desc
,
386 struct nfit_table_prev
*prev
, void *table
, const void *end
)
388 struct device
*dev
= acpi_desc
->dev
;
389 struct acpi_nfit_header
*hdr
;
390 void *err
= ERR_PTR(-ENOMEM
);
397 dev_warn(dev
, "found a zero length table '%d' parsing nfit\n",
403 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS
:
404 if (!add_spa(acpi_desc
, prev
, table
))
407 case ACPI_NFIT_TYPE_MEMORY_MAP
:
408 if (!add_memdev(acpi_desc
, prev
, table
))
411 case ACPI_NFIT_TYPE_CONTROL_REGION
:
412 if (!add_dcr(acpi_desc
, prev
, table
))
415 case ACPI_NFIT_TYPE_DATA_REGION
:
416 if (!add_bdw(acpi_desc
, prev
, table
))
419 case ACPI_NFIT_TYPE_INTERLEAVE
:
420 if (!add_idt(acpi_desc
, prev
, table
))
423 case ACPI_NFIT_TYPE_FLUSH_ADDRESS
:
424 if (!add_flush(acpi_desc
, prev
, table
))
427 case ACPI_NFIT_TYPE_SMBIOS
:
428 dev_dbg(dev
, "%s: smbios\n", __func__
);
431 dev_err(dev
, "unknown table '%d' parsing nfit\n", hdr
->type
);
435 return table
+ hdr
->length
;
438 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc
*acpi_desc
,
439 struct nfit_mem
*nfit_mem
)
441 u32 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
442 u16 dcr
= nfit_mem
->dcr
->region_index
;
443 struct nfit_spa
*nfit_spa
;
445 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
446 u16 range_index
= nfit_spa
->spa
->range_index
;
447 int type
= nfit_spa_type(nfit_spa
->spa
);
448 struct nfit_memdev
*nfit_memdev
;
450 if (type
!= NFIT_SPA_BDW
)
453 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
454 if (nfit_memdev
->memdev
->range_index
!= range_index
)
456 if (nfit_memdev
->memdev
->device_handle
!= device_handle
)
458 if (nfit_memdev
->memdev
->region_index
!= dcr
)
461 nfit_mem
->spa_bdw
= nfit_spa
->spa
;
466 dev_dbg(acpi_desc
->dev
, "SPA-BDW not found for SPA-DCR %d\n",
467 nfit_mem
->spa_dcr
->range_index
);
468 nfit_mem
->bdw
= NULL
;
471 static int nfit_mem_add(struct acpi_nfit_desc
*acpi_desc
,
472 struct nfit_mem
*nfit_mem
, struct acpi_nfit_system_address
*spa
)
474 u16 dcr
= __to_nfit_memdev(nfit_mem
)->region_index
;
475 struct nfit_memdev
*nfit_memdev
;
476 struct nfit_flush
*nfit_flush
;
477 struct nfit_dcr
*nfit_dcr
;
478 struct nfit_bdw
*nfit_bdw
;
479 struct nfit_idt
*nfit_idt
;
480 u16 idt_idx
, range_index
;
482 list_for_each_entry(nfit_dcr
, &acpi_desc
->dcrs
, list
) {
483 if (nfit_dcr
->dcr
->region_index
!= dcr
)
485 nfit_mem
->dcr
= nfit_dcr
->dcr
;
489 if (!nfit_mem
->dcr
) {
490 dev_dbg(acpi_desc
->dev
, "SPA %d missing:%s%s\n",
491 spa
->range_index
, __to_nfit_memdev(nfit_mem
)
492 ? "" : " MEMDEV", nfit_mem
->dcr
? "" : " DCR");
497 * We've found enough to create an nvdimm, optionally
498 * find an associated BDW
500 list_add(&nfit_mem
->list
, &acpi_desc
->dimms
);
502 list_for_each_entry(nfit_bdw
, &acpi_desc
->bdws
, list
) {
503 if (nfit_bdw
->bdw
->region_index
!= dcr
)
505 nfit_mem
->bdw
= nfit_bdw
->bdw
;
512 nfit_mem_find_spa_bdw(acpi_desc
, nfit_mem
);
514 if (!nfit_mem
->spa_bdw
)
517 range_index
= nfit_mem
->spa_bdw
->range_index
;
518 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
519 if (nfit_memdev
->memdev
->range_index
!= range_index
||
520 nfit_memdev
->memdev
->region_index
!= dcr
)
522 nfit_mem
->memdev_bdw
= nfit_memdev
->memdev
;
523 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
524 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
525 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
527 nfit_mem
->idt_bdw
= nfit_idt
->idt
;
531 list_for_each_entry(nfit_flush
, &acpi_desc
->flushes
, list
) {
532 if (nfit_flush
->flush
->device_handle
!=
533 nfit_memdev
->memdev
->device_handle
)
535 nfit_mem
->nfit_flush
= nfit_flush
;
544 static int nfit_mem_dcr_init(struct acpi_nfit_desc
*acpi_desc
,
545 struct acpi_nfit_system_address
*spa
)
547 struct nfit_mem
*nfit_mem
, *found
;
548 struct nfit_memdev
*nfit_memdev
;
549 int type
= nfit_spa_type(spa
);
560 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
563 if (nfit_memdev
->memdev
->range_index
!= spa
->range_index
)
566 dcr
= nfit_memdev
->memdev
->region_index
;
567 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
568 if (__to_nfit_memdev(nfit_mem
)->region_index
== dcr
) {
576 nfit_mem
= devm_kzalloc(acpi_desc
->dev
,
577 sizeof(*nfit_mem
), GFP_KERNEL
);
580 INIT_LIST_HEAD(&nfit_mem
->list
);
583 if (type
== NFIT_SPA_DCR
) {
584 struct nfit_idt
*nfit_idt
;
587 /* multiple dimms may share a SPA when interleaved */
588 nfit_mem
->spa_dcr
= spa
;
589 nfit_mem
->memdev_dcr
= nfit_memdev
->memdev
;
590 idt_idx
= nfit_memdev
->memdev
->interleave_index
;
591 list_for_each_entry(nfit_idt
, &acpi_desc
->idts
, list
) {
592 if (nfit_idt
->idt
->interleave_index
!= idt_idx
)
594 nfit_mem
->idt_dcr
= nfit_idt
->idt
;
599 * A single dimm may belong to multiple SPA-PM
600 * ranges, record at least one in addition to
603 nfit_mem
->memdev_pmem
= nfit_memdev
->memdev
;
609 rc
= nfit_mem_add(acpi_desc
, nfit_mem
, spa
);
617 static int nfit_mem_cmp(void *priv
, struct list_head
*_a
, struct list_head
*_b
)
619 struct nfit_mem
*a
= container_of(_a
, typeof(*a
), list
);
620 struct nfit_mem
*b
= container_of(_b
, typeof(*b
), list
);
621 u32 handleA
, handleB
;
623 handleA
= __to_nfit_memdev(a
)->device_handle
;
624 handleB
= __to_nfit_memdev(b
)->device_handle
;
625 if (handleA
< handleB
)
627 else if (handleA
> handleB
)
632 static int nfit_mem_init(struct acpi_nfit_desc
*acpi_desc
)
634 struct nfit_spa
*nfit_spa
;
637 * For each SPA-DCR or SPA-PMEM address range find its
638 * corresponding MEMDEV(s). From each MEMDEV find the
639 * corresponding DCR. Then, if we're operating on a SPA-DCR,
640 * try to find a SPA-BDW and a corresponding BDW that references
641 * the DCR. Throw it all into an nfit_mem object. Note, that
644 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
647 rc
= nfit_mem_dcr_init(acpi_desc
, nfit_spa
->spa
);
652 list_sort(NULL
, &acpi_desc
->dimms
, nfit_mem_cmp
);
657 static ssize_t
revision_show(struct device
*dev
,
658 struct device_attribute
*attr
, char *buf
)
660 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
661 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
662 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
664 return sprintf(buf
, "%d\n", acpi_desc
->acpi_header
.revision
);
666 static DEVICE_ATTR_RO(revision
);
668 static struct attribute
*acpi_nfit_attributes
[] = {
669 &dev_attr_revision
.attr
,
673 static struct attribute_group acpi_nfit_attribute_group
= {
675 .attrs
= acpi_nfit_attributes
,
678 const struct attribute_group
*acpi_nfit_attribute_groups
[] = {
679 &nvdimm_bus_attribute_group
,
680 &acpi_nfit_attribute_group
,
683 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups
);
685 static struct acpi_nfit_memory_map
*to_nfit_memdev(struct device
*dev
)
687 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
688 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
690 return __to_nfit_memdev(nfit_mem
);
693 static struct acpi_nfit_control_region
*to_nfit_dcr(struct device
*dev
)
695 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
696 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
698 return nfit_mem
->dcr
;
701 static ssize_t
handle_show(struct device
*dev
,
702 struct device_attribute
*attr
, char *buf
)
704 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
706 return sprintf(buf
, "%#x\n", memdev
->device_handle
);
708 static DEVICE_ATTR_RO(handle
);
710 static ssize_t
phys_id_show(struct device
*dev
,
711 struct device_attribute
*attr
, char *buf
)
713 struct acpi_nfit_memory_map
*memdev
= to_nfit_memdev(dev
);
715 return sprintf(buf
, "%#x\n", memdev
->physical_id
);
717 static DEVICE_ATTR_RO(phys_id
);
719 static ssize_t
vendor_show(struct device
*dev
,
720 struct device_attribute
*attr
, char *buf
)
722 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
724 return sprintf(buf
, "%#x\n", dcr
->vendor_id
);
726 static DEVICE_ATTR_RO(vendor
);
728 static ssize_t
rev_id_show(struct device
*dev
,
729 struct device_attribute
*attr
, char *buf
)
731 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
733 return sprintf(buf
, "%#x\n", dcr
->revision_id
);
735 static DEVICE_ATTR_RO(rev_id
);
737 static ssize_t
device_show(struct device
*dev
,
738 struct device_attribute
*attr
, char *buf
)
740 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
742 return sprintf(buf
, "%#x\n", dcr
->device_id
);
744 static DEVICE_ATTR_RO(device
);
746 static ssize_t
format_show(struct device
*dev
,
747 struct device_attribute
*attr
, char *buf
)
749 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
751 return sprintf(buf
, "%#x\n", dcr
->code
);
753 static DEVICE_ATTR_RO(format
);
755 static ssize_t
serial_show(struct device
*dev
,
756 struct device_attribute
*attr
, char *buf
)
758 struct acpi_nfit_control_region
*dcr
= to_nfit_dcr(dev
);
760 return sprintf(buf
, "%#x\n", dcr
->serial_number
);
762 static DEVICE_ATTR_RO(serial
);
764 static ssize_t
flags_show(struct device
*dev
,
765 struct device_attribute
*attr
, char *buf
)
767 u16 flags
= to_nfit_memdev(dev
)->flags
;
769 return sprintf(buf
, "%s%s%s%s%s\n",
770 flags
& ACPI_NFIT_MEM_SAVE_FAILED
? "save_fail " : "",
771 flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? "restore_fail " : "",
772 flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? "flush_fail " : "",
773 flags
& ACPI_NFIT_MEM_NOT_ARMED
? "not_armed " : "",
774 flags
& ACPI_NFIT_MEM_HEALTH_OBSERVED
? "smart_event " : "");
776 static DEVICE_ATTR_RO(flags
);
778 static struct attribute
*acpi_nfit_dimm_attributes
[] = {
779 &dev_attr_handle
.attr
,
780 &dev_attr_phys_id
.attr
,
781 &dev_attr_vendor
.attr
,
782 &dev_attr_device
.attr
,
783 &dev_attr_format
.attr
,
784 &dev_attr_serial
.attr
,
785 &dev_attr_rev_id
.attr
,
786 &dev_attr_flags
.attr
,
790 static umode_t
acpi_nfit_dimm_attr_visible(struct kobject
*kobj
,
791 struct attribute
*a
, int n
)
793 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
795 if (to_nfit_dcr(dev
))
801 static struct attribute_group acpi_nfit_dimm_attribute_group
= {
803 .attrs
= acpi_nfit_dimm_attributes
,
804 .is_visible
= acpi_nfit_dimm_attr_visible
,
807 static const struct attribute_group
*acpi_nfit_dimm_attribute_groups
[] = {
808 &nvdimm_attribute_group
,
809 &nd_device_attribute_group
,
810 &acpi_nfit_dimm_attribute_group
,
814 static struct nvdimm
*acpi_nfit_dimm_by_handle(struct acpi_nfit_desc
*acpi_desc
,
817 struct nfit_mem
*nfit_mem
;
819 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
)
820 if (__to_nfit_memdev(nfit_mem
)->device_handle
== device_handle
)
821 return nfit_mem
->nvdimm
;
826 static int acpi_nfit_add_dimm(struct acpi_nfit_desc
*acpi_desc
,
827 struct nfit_mem
*nfit_mem
, u32 device_handle
)
829 struct acpi_device
*adev
, *adev_dimm
;
830 struct device
*dev
= acpi_desc
->dev
;
831 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_DIMM
);
834 nfit_mem
->dsm_mask
= acpi_desc
->dimm_dsm_force_en
;
835 adev
= to_acpi_dev(acpi_desc
);
839 adev_dimm
= acpi_find_child_device(adev
, device_handle
, false);
840 nfit_mem
->adev
= adev_dimm
;
842 dev_err(dev
, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
844 return force_enable_dimms
? 0 : -ENODEV
;
847 for (i
= ND_CMD_SMART
; i
<= ND_CMD_VENDOR
; i
++)
848 if (acpi_check_dsm(adev_dimm
->handle
, uuid
, 1, 1ULL << i
))
849 set_bit(i
, &nfit_mem
->dsm_mask
);
854 static int acpi_nfit_register_dimms(struct acpi_nfit_desc
*acpi_desc
)
856 struct nfit_mem
*nfit_mem
;
859 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
860 struct nvdimm
*nvdimm
;
861 unsigned long flags
= 0;
866 device_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
867 nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
, device_handle
);
873 if (nfit_mem
->bdw
&& nfit_mem
->memdev_pmem
)
874 flags
|= NDD_ALIASING
;
876 mem_flags
= __to_nfit_memdev(nfit_mem
)->flags
;
877 if (mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
)
878 flags
|= NDD_UNARMED
;
880 rc
= acpi_nfit_add_dimm(acpi_desc
, nfit_mem
, device_handle
);
884 nvdimm
= nvdimm_create(acpi_desc
->nvdimm_bus
, nfit_mem
,
885 acpi_nfit_dimm_attribute_groups
,
886 flags
, &nfit_mem
->dsm_mask
);
890 nfit_mem
->nvdimm
= nvdimm
;
893 if ((mem_flags
& ACPI_NFIT_MEM_FAILED_MASK
) == 0)
896 dev_info(acpi_desc
->dev
, "%s flags:%s%s%s%s\n",
898 mem_flags
& ACPI_NFIT_MEM_SAVE_FAILED
? " save_fail" : "",
899 mem_flags
& ACPI_NFIT_MEM_RESTORE_FAILED
? " restore_fail":"",
900 mem_flags
& ACPI_NFIT_MEM_FLUSH_FAILED
? " flush_fail" : "",
901 mem_flags
& ACPI_NFIT_MEM_NOT_ARMED
? " not_armed" : "");
905 return nvdimm_bus_check_dimm_count(acpi_desc
->nvdimm_bus
, dimm_count
);
908 static void acpi_nfit_init_dsms(struct acpi_nfit_desc
*acpi_desc
)
910 struct nvdimm_bus_descriptor
*nd_desc
= &acpi_desc
->nd_desc
;
911 const u8
*uuid
= to_nfit_uuid(NFIT_DEV_BUS
);
912 struct acpi_device
*adev
;
915 nd_desc
->dsm_mask
= acpi_desc
->bus_dsm_force_en
;
916 adev
= to_acpi_dev(acpi_desc
);
920 for (i
= ND_CMD_ARS_CAP
; i
<= ND_CMD_ARS_STATUS
; i
++)
921 if (acpi_check_dsm(adev
->handle
, uuid
, 1, 1ULL << i
))
922 set_bit(i
, &nd_desc
->dsm_mask
);
925 static ssize_t
range_index_show(struct device
*dev
,
926 struct device_attribute
*attr
, char *buf
)
928 struct nd_region
*nd_region
= to_nd_region(dev
);
929 struct nfit_spa
*nfit_spa
= nd_region_provider_data(nd_region
);
931 return sprintf(buf
, "%d\n", nfit_spa
->spa
->range_index
);
933 static DEVICE_ATTR_RO(range_index
);
935 static struct attribute
*acpi_nfit_region_attributes
[] = {
936 &dev_attr_range_index
.attr
,
940 static struct attribute_group acpi_nfit_region_attribute_group
= {
942 .attrs
= acpi_nfit_region_attributes
,
945 static const struct attribute_group
*acpi_nfit_region_attribute_groups
[] = {
946 &nd_region_attribute_group
,
947 &nd_mapping_attribute_group
,
948 &nd_device_attribute_group
,
949 &nd_numa_attribute_group
,
950 &acpi_nfit_region_attribute_group
,
954 /* enough info to uniquely specify an interleave set */
955 struct nfit_set_info
{
956 struct nfit_set_info_map
{
963 static size_t sizeof_nfit_set_info(int num_mappings
)
965 return sizeof(struct nfit_set_info
)
966 + num_mappings
* sizeof(struct nfit_set_info_map
);
969 static int cmp_map(const void *m0
, const void *m1
)
971 const struct nfit_set_info_map
*map0
= m0
;
972 const struct nfit_set_info_map
*map1
= m1
;
974 return memcmp(&map0
->region_offset
, &map1
->region_offset
,
978 /* Retrieve the nth entry referencing this spa */
979 static struct acpi_nfit_memory_map
*memdev_from_spa(
980 struct acpi_nfit_desc
*acpi_desc
, u16 range_index
, int n
)
982 struct nfit_memdev
*nfit_memdev
;
984 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
)
985 if (nfit_memdev
->memdev
->range_index
== range_index
)
987 return nfit_memdev
->memdev
;
991 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc
*acpi_desc
,
992 struct nd_region_desc
*ndr_desc
,
993 struct acpi_nfit_system_address
*spa
)
995 int i
, spa_type
= nfit_spa_type(spa
);
996 struct device
*dev
= acpi_desc
->dev
;
997 struct nd_interleave_set
*nd_set
;
998 u16 nr
= ndr_desc
->num_mappings
;
999 struct nfit_set_info
*info
;
1001 if (spa_type
== NFIT_SPA_PM
|| spa_type
== NFIT_SPA_VOLATILE
)
1006 nd_set
= devm_kzalloc(dev
, sizeof(*nd_set
), GFP_KERNEL
);
1010 info
= devm_kzalloc(dev
, sizeof_nfit_set_info(nr
), GFP_KERNEL
);
1013 for (i
= 0; i
< nr
; i
++) {
1014 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
1015 struct nfit_set_info_map
*map
= &info
->mapping
[i
];
1016 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1017 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1018 struct acpi_nfit_memory_map
*memdev
= memdev_from_spa(acpi_desc
,
1019 spa
->range_index
, i
);
1021 if (!memdev
|| !nfit_mem
->dcr
) {
1022 dev_err(dev
, "%s: failed to find DCR\n", __func__
);
1026 map
->region_offset
= memdev
->region_offset
;
1027 map
->serial_number
= nfit_mem
->dcr
->serial_number
;
1030 sort(&info
->mapping
[0], nr
, sizeof(struct nfit_set_info_map
),
1032 nd_set
->cookie
= nd_fletcher64(info
, sizeof_nfit_set_info(nr
), 0);
1033 ndr_desc
->nd_set
= nd_set
;
1034 devm_kfree(dev
, info
);
1039 static u64
to_interleave_offset(u64 offset
, struct nfit_blk_mmio
*mmio
)
1041 struct acpi_nfit_interleave
*idt
= mmio
->idt
;
1042 u32 sub_line_offset
, line_index
, line_offset
;
1043 u64 line_no
, table_skip_count
, table_offset
;
1045 line_no
= div_u64_rem(offset
, mmio
->line_size
, &sub_line_offset
);
1046 table_skip_count
= div_u64_rem(line_no
, mmio
->num_lines
, &line_index
);
1047 line_offset
= idt
->line_offset
[line_index
]
1049 table_offset
= table_skip_count
* mmio
->table_size
;
1051 return mmio
->base_offset
+ line_offset
+ table_offset
+ sub_line_offset
;
1054 static void wmb_blk(struct nfit_blk
*nfit_blk
)
1057 if (nfit_blk
->nvdimm_flush
) {
1059 * The first wmb() is needed to 'sfence' all previous writes
1060 * such that they are architecturally visible for the platform
1061 * buffer flush. Note that we've already arranged for pmem
1062 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1063 * final wmb() ensures ordering for the NVDIMM flush write.
1066 writeq(1, nfit_blk
->nvdimm_flush
);
1072 static u32
read_blk_stat(struct nfit_blk
*nfit_blk
, unsigned int bw
)
1074 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1075 u64 offset
= nfit_blk
->stat_offset
+ mmio
->size
* bw
;
1077 if (mmio
->num_lines
)
1078 offset
= to_interleave_offset(offset
, mmio
);
1080 return readl(mmio
->addr
.base
+ offset
);
1083 static void write_blk_ctl(struct nfit_blk
*nfit_blk
, unsigned int bw
,
1084 resource_size_t dpa
, unsigned int len
, unsigned int write
)
1087 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[DCR
];
1090 BCW_OFFSET_MASK
= (1ULL << 48)-1,
1092 BCW_LEN_MASK
= (1ULL << 8) - 1,
1096 cmd
= (dpa
>> L1_CACHE_SHIFT
) & BCW_OFFSET_MASK
;
1097 len
= len
>> L1_CACHE_SHIFT
;
1098 cmd
|= ((u64
) len
& BCW_LEN_MASK
) << BCW_LEN_SHIFT
;
1099 cmd
|= ((u64
) write
) << BCW_CMD_SHIFT
;
1101 offset
= nfit_blk
->cmd_offset
+ mmio
->size
* bw
;
1102 if (mmio
->num_lines
)
1103 offset
= to_interleave_offset(offset
, mmio
);
1105 writeq(cmd
, mmio
->addr
.base
+ offset
);
1108 if (nfit_blk
->dimm_flags
& ND_BLK_DCR_LATCH
)
1109 readq(mmio
->addr
.base
+ offset
);
1112 static int acpi_nfit_blk_single_io(struct nfit_blk
*nfit_blk
,
1113 resource_size_t dpa
, void *iobuf
, size_t len
, int rw
,
1116 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1117 unsigned int copied
= 0;
1121 base_offset
= nfit_blk
->bdw_offset
+ dpa
% L1_CACHE_BYTES
1122 + lane
* mmio
->size
;
1123 write_blk_ctl(nfit_blk
, lane
, dpa
, len
, rw
);
1128 if (mmio
->num_lines
) {
1131 offset
= to_interleave_offset(base_offset
+ copied
,
1133 div_u64_rem(offset
, mmio
->line_size
, &line_offset
);
1134 c
= min_t(size_t, len
, mmio
->line_size
- line_offset
);
1136 offset
= base_offset
+ nfit_blk
->bdw_offset
;
1141 memcpy_to_pmem(mmio
->addr
.aperture
+ offset
,
1144 if (nfit_blk
->dimm_flags
& ND_BLK_READ_FLUSH
)
1145 mmio_flush_range((void __force
*)
1146 mmio
->addr
.aperture
+ offset
, c
);
1148 memcpy_from_pmem(iobuf
+ copied
,
1149 mmio
->addr
.aperture
+ offset
, c
);
1159 rc
= read_blk_stat(nfit_blk
, lane
) ? -EIO
: 0;
1163 static int acpi_nfit_blk_region_do_io(struct nd_blk_region
*ndbr
,
1164 resource_size_t dpa
, void *iobuf
, u64 len
, int rw
)
1166 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1167 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
1168 struct nd_region
*nd_region
= nfit_blk
->nd_region
;
1169 unsigned int lane
, copied
= 0;
1172 lane
= nd_region_acquire_lane(nd_region
);
1174 u64 c
= min(len
, mmio
->size
);
1176 rc
= acpi_nfit_blk_single_io(nfit_blk
, dpa
+ copied
,
1177 iobuf
+ copied
, c
, rw
, lane
);
1184 nd_region_release_lane(nd_region
, lane
);
1189 static void nfit_spa_mapping_release(struct kref
*kref
)
1191 struct nfit_spa_mapping
*spa_map
= to_spa_map(kref
);
1192 struct acpi_nfit_system_address
*spa
= spa_map
->spa
;
1193 struct acpi_nfit_desc
*acpi_desc
= spa_map
->acpi_desc
;
1195 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1196 dev_dbg(acpi_desc
->dev
, "%s: SPA%d\n", __func__
, spa
->range_index
);
1197 if (spa_map
->type
== SPA_MAP_APERTURE
)
1198 memunmap((void __force
*)spa_map
->addr
.aperture
);
1200 iounmap(spa_map
->addr
.base
);
1201 release_mem_region(spa
->address
, spa
->length
);
1202 list_del(&spa_map
->list
);
1206 static struct nfit_spa_mapping
*find_spa_mapping(
1207 struct acpi_nfit_desc
*acpi_desc
,
1208 struct acpi_nfit_system_address
*spa
)
1210 struct nfit_spa_mapping
*spa_map
;
1212 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1213 list_for_each_entry(spa_map
, &acpi_desc
->spa_maps
, list
)
1214 if (spa_map
->spa
== spa
)
1220 static void nfit_spa_unmap(struct acpi_nfit_desc
*acpi_desc
,
1221 struct acpi_nfit_system_address
*spa
)
1223 struct nfit_spa_mapping
*spa_map
;
1225 mutex_lock(&acpi_desc
->spa_map_mutex
);
1226 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1229 kref_put(&spa_map
->kref
, nfit_spa_mapping_release
);
1230 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1233 static void __iomem
*__nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1234 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1236 resource_size_t start
= spa
->address
;
1237 resource_size_t n
= spa
->length
;
1238 struct nfit_spa_mapping
*spa_map
;
1239 struct resource
*res
;
1241 WARN_ON(!mutex_is_locked(&acpi_desc
->spa_map_mutex
));
1243 spa_map
= find_spa_mapping(acpi_desc
, spa
);
1245 kref_get(&spa_map
->kref
);
1246 return spa_map
->addr
.base
;
1249 spa_map
= kzalloc(sizeof(*spa_map
), GFP_KERNEL
);
1253 INIT_LIST_HEAD(&spa_map
->list
);
1255 kref_init(&spa_map
->kref
);
1256 spa_map
->acpi_desc
= acpi_desc
;
1258 res
= request_mem_region(start
, n
, dev_name(acpi_desc
->dev
));
1262 spa_map
->type
= type
;
1263 if (type
== SPA_MAP_APERTURE
)
1264 spa_map
->addr
.aperture
= (void __pmem
*)memremap(start
, n
,
1265 ARCH_MEMREMAP_PMEM
);
1267 spa_map
->addr
.base
= ioremap_nocache(start
, n
);
1270 if (!spa_map
->addr
.base
)
1273 list_add_tail(&spa_map
->list
, &acpi_desc
->spa_maps
);
1274 return spa_map
->addr
.base
;
1277 release_mem_region(start
, n
);
1284 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1285 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1286 * @nfit_spa: spa table to map
1287 * @type: aperture or control region
1289 * In the case where block-data-window apertures and
1290 * dimm-control-regions are interleaved they will end up sharing a
1291 * single request_mem_region() + ioremap() for the address range. In
1292 * the style of devm nfit_spa_map() mappings are automatically dropped
1293 * when all region devices referencing the same mapping are disabled /
1296 static void __iomem
*nfit_spa_map(struct acpi_nfit_desc
*acpi_desc
,
1297 struct acpi_nfit_system_address
*spa
, enum spa_map_type type
)
1299 void __iomem
*iomem
;
1301 mutex_lock(&acpi_desc
->spa_map_mutex
);
1302 iomem
= __nfit_spa_map(acpi_desc
, spa
, type
);
1303 mutex_unlock(&acpi_desc
->spa_map_mutex
);
1308 static int nfit_blk_init_interleave(struct nfit_blk_mmio
*mmio
,
1309 struct acpi_nfit_interleave
*idt
, u16 interleave_ways
)
1312 mmio
->num_lines
= idt
->line_count
;
1313 mmio
->line_size
= idt
->line_size
;
1314 if (interleave_ways
== 0)
1316 mmio
->table_size
= mmio
->num_lines
* interleave_ways
1323 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor
*nd_desc
,
1324 struct nvdimm
*nvdimm
, struct nfit_blk
*nfit_blk
)
1326 struct nd_cmd_dimm_flags flags
;
1329 memset(&flags
, 0, sizeof(flags
));
1330 rc
= nd_desc
->ndctl(nd_desc
, nvdimm
, ND_CMD_DIMM_FLAGS
, &flags
,
1333 if (rc
>= 0 && flags
.status
== 0)
1334 nfit_blk
->dimm_flags
= flags
.flags
;
1335 else if (rc
== -ENOTTY
) {
1336 /* fall back to a conservative default */
1337 nfit_blk
->dimm_flags
= ND_BLK_DCR_LATCH
| ND_BLK_READ_FLUSH
;
1345 static int acpi_nfit_blk_region_enable(struct nvdimm_bus
*nvdimm_bus
,
1348 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1349 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1350 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1351 struct nfit_flush
*nfit_flush
;
1352 struct nfit_blk_mmio
*mmio
;
1353 struct nfit_blk
*nfit_blk
;
1354 struct nfit_mem
*nfit_mem
;
1355 struct nvdimm
*nvdimm
;
1358 nvdimm
= nd_blk_region_to_dimm(ndbr
);
1359 nfit_mem
= nvdimm_provider_data(nvdimm
);
1360 if (!nfit_mem
|| !nfit_mem
->dcr
|| !nfit_mem
->bdw
) {
1361 dev_dbg(dev
, "%s: missing%s%s%s\n", __func__
,
1362 nfit_mem
? "" : " nfit_mem",
1363 (nfit_mem
&& nfit_mem
->dcr
) ? "" : " dcr",
1364 (nfit_mem
&& nfit_mem
->bdw
) ? "" : " bdw");
1368 nfit_blk
= devm_kzalloc(dev
, sizeof(*nfit_blk
), GFP_KERNEL
);
1371 nd_blk_region_set_provider_data(ndbr
, nfit_blk
);
1372 nfit_blk
->nd_region
= to_nd_region(dev
);
1374 /* map block aperture memory */
1375 nfit_blk
->bdw_offset
= nfit_mem
->bdw
->offset
;
1376 mmio
= &nfit_blk
->mmio
[BDW
];
1377 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_bdw
,
1379 if (!mmio
->addr
.base
) {
1380 dev_dbg(dev
, "%s: %s failed to map bdw\n", __func__
,
1381 nvdimm_name(nvdimm
));
1384 mmio
->size
= nfit_mem
->bdw
->size
;
1385 mmio
->base_offset
= nfit_mem
->memdev_bdw
->region_offset
;
1386 mmio
->idt
= nfit_mem
->idt_bdw
;
1387 mmio
->spa
= nfit_mem
->spa_bdw
;
1388 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_bdw
,
1389 nfit_mem
->memdev_bdw
->interleave_ways
);
1391 dev_dbg(dev
, "%s: %s failed to init bdw interleave\n",
1392 __func__
, nvdimm_name(nvdimm
));
1396 /* map block control memory */
1397 nfit_blk
->cmd_offset
= nfit_mem
->dcr
->command_offset
;
1398 nfit_blk
->stat_offset
= nfit_mem
->dcr
->status_offset
;
1399 mmio
= &nfit_blk
->mmio
[DCR
];
1400 mmio
->addr
.base
= nfit_spa_map(acpi_desc
, nfit_mem
->spa_dcr
,
1402 if (!mmio
->addr
.base
) {
1403 dev_dbg(dev
, "%s: %s failed to map dcr\n", __func__
,
1404 nvdimm_name(nvdimm
));
1407 mmio
->size
= nfit_mem
->dcr
->window_size
;
1408 mmio
->base_offset
= nfit_mem
->memdev_dcr
->region_offset
;
1409 mmio
->idt
= nfit_mem
->idt_dcr
;
1410 mmio
->spa
= nfit_mem
->spa_dcr
;
1411 rc
= nfit_blk_init_interleave(mmio
, nfit_mem
->idt_dcr
,
1412 nfit_mem
->memdev_dcr
->interleave_ways
);
1414 dev_dbg(dev
, "%s: %s failed to init dcr interleave\n",
1415 __func__
, nvdimm_name(nvdimm
));
1419 rc
= acpi_nfit_blk_get_flags(nd_desc
, nvdimm
, nfit_blk
);
1421 dev_dbg(dev
, "%s: %s failed get DIMM flags\n",
1422 __func__
, nvdimm_name(nvdimm
));
1426 nfit_flush
= nfit_mem
->nfit_flush
;
1427 if (nfit_flush
&& nfit_flush
->flush
->hint_count
!= 0) {
1428 nfit_blk
->nvdimm_flush
= devm_ioremap_nocache(dev
,
1429 nfit_flush
->flush
->hint_address
[0], 8);
1430 if (!nfit_blk
->nvdimm_flush
)
1434 if (!arch_has_wmb_pmem() && !nfit_blk
->nvdimm_flush
)
1435 dev_warn(dev
, "unable to guarantee persistence of writes\n");
1437 if (mmio
->line_size
== 0)
1440 if ((u32
) nfit_blk
->cmd_offset
% mmio
->line_size
1441 + 8 > mmio
->line_size
) {
1442 dev_dbg(dev
, "cmd_offset crosses interleave boundary\n");
1444 } else if ((u32
) nfit_blk
->stat_offset
% mmio
->line_size
1445 + 8 > mmio
->line_size
) {
1446 dev_dbg(dev
, "stat_offset crosses interleave boundary\n");
1453 static void acpi_nfit_blk_region_disable(struct nvdimm_bus
*nvdimm_bus
,
1456 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
1457 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1458 struct nd_blk_region
*ndbr
= to_nd_blk_region(dev
);
1459 struct nfit_blk
*nfit_blk
= nd_blk_region_provider_data(ndbr
);
1463 return; /* never enabled */
1465 /* auto-free BLK spa mappings */
1466 for (i
= 0; i
< 2; i
++) {
1467 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[i
];
1469 if (mmio
->addr
.base
)
1470 nfit_spa_unmap(acpi_desc
, mmio
->spa
);
1472 nd_blk_region_set_provider_data(ndbr
, NULL
);
1473 /* devm will free nfit_blk */
1476 static int acpi_nfit_init_mapping(struct acpi_nfit_desc
*acpi_desc
,
1477 struct nd_mapping
*nd_mapping
, struct nd_region_desc
*ndr_desc
,
1478 struct acpi_nfit_memory_map
*memdev
,
1479 struct acpi_nfit_system_address
*spa
)
1481 struct nvdimm
*nvdimm
= acpi_nfit_dimm_by_handle(acpi_desc
,
1482 memdev
->device_handle
);
1483 struct nd_blk_region_desc
*ndbr_desc
;
1484 struct nfit_mem
*nfit_mem
;
1488 dev_err(acpi_desc
->dev
, "spa%d dimm: %#x not found\n",
1489 spa
->range_index
, memdev
->device_handle
);
1493 nd_mapping
->nvdimm
= nvdimm
;
1494 switch (nfit_spa_type(spa
)) {
1496 case NFIT_SPA_VOLATILE
:
1497 nd_mapping
->start
= memdev
->address
;
1498 nd_mapping
->size
= memdev
->region_size
;
1501 nfit_mem
= nvdimm_provider_data(nvdimm
);
1502 if (!nfit_mem
|| !nfit_mem
->bdw
) {
1503 dev_dbg(acpi_desc
->dev
, "spa%d %s missing bdw\n",
1504 spa
->range_index
, nvdimm_name(nvdimm
));
1506 nd_mapping
->size
= nfit_mem
->bdw
->capacity
;
1507 nd_mapping
->start
= nfit_mem
->bdw
->start_address
;
1508 ndr_desc
->num_lanes
= nfit_mem
->bdw
->windows
;
1512 ndr_desc
->nd_mapping
= nd_mapping
;
1513 ndr_desc
->num_mappings
= blk_valid
;
1514 ndbr_desc
= to_blk_region_desc(ndr_desc
);
1515 ndbr_desc
->enable
= acpi_nfit_blk_region_enable
;
1516 ndbr_desc
->disable
= acpi_nfit_blk_region_disable
;
1517 ndbr_desc
->do_io
= acpi_desc
->blk_do_io
;
1518 if (!nvdimm_blk_region_create(acpi_desc
->nvdimm_bus
, ndr_desc
))
1526 static int acpi_nfit_register_region(struct acpi_nfit_desc
*acpi_desc
,
1527 struct nfit_spa
*nfit_spa
)
1529 static struct nd_mapping nd_mappings
[ND_MAX_MAPPINGS
];
1530 struct acpi_nfit_system_address
*spa
= nfit_spa
->spa
;
1531 struct nd_blk_region_desc ndbr_desc
;
1532 struct nd_region_desc
*ndr_desc
;
1533 struct nfit_memdev
*nfit_memdev
;
1534 struct nvdimm_bus
*nvdimm_bus
;
1535 struct resource res
;
1538 if (nfit_spa
->is_registered
)
1541 if (spa
->range_index
== 0) {
1542 dev_dbg(acpi_desc
->dev
, "%s: detected invalid spa index\n",
1547 memset(&res
, 0, sizeof(res
));
1548 memset(&nd_mappings
, 0, sizeof(nd_mappings
));
1549 memset(&ndbr_desc
, 0, sizeof(ndbr_desc
));
1550 res
.start
= spa
->address
;
1551 res
.end
= res
.start
+ spa
->length
- 1;
1552 ndr_desc
= &ndbr_desc
.ndr_desc
;
1553 ndr_desc
->res
= &res
;
1554 ndr_desc
->provider_data
= nfit_spa
;
1555 ndr_desc
->attr_groups
= acpi_nfit_region_attribute_groups
;
1556 if (spa
->flags
& ACPI_NFIT_PROXIMITY_VALID
)
1557 ndr_desc
->numa_node
= acpi_map_pxm_to_online_node(
1558 spa
->proximity_domain
);
1560 ndr_desc
->numa_node
= NUMA_NO_NODE
;
1562 list_for_each_entry(nfit_memdev
, &acpi_desc
->memdevs
, list
) {
1563 struct acpi_nfit_memory_map
*memdev
= nfit_memdev
->memdev
;
1564 struct nd_mapping
*nd_mapping
;
1566 if (memdev
->range_index
!= spa
->range_index
)
1568 if (count
>= ND_MAX_MAPPINGS
) {
1569 dev_err(acpi_desc
->dev
, "spa%d exceeds max mappings %d\n",
1570 spa
->range_index
, ND_MAX_MAPPINGS
);
1573 nd_mapping
= &nd_mappings
[count
++];
1574 rc
= acpi_nfit_init_mapping(acpi_desc
, nd_mapping
, ndr_desc
,
1580 ndr_desc
->nd_mapping
= nd_mappings
;
1581 ndr_desc
->num_mappings
= count
;
1582 rc
= acpi_nfit_init_interleave_set(acpi_desc
, ndr_desc
, spa
);
1586 nvdimm_bus
= acpi_desc
->nvdimm_bus
;
1587 if (nfit_spa_type(spa
) == NFIT_SPA_PM
) {
1588 if (!nvdimm_pmem_region_create(nvdimm_bus
, ndr_desc
))
1590 } else if (nfit_spa_type(spa
) == NFIT_SPA_VOLATILE
) {
1591 if (!nvdimm_volatile_region_create(nvdimm_bus
, ndr_desc
))
1595 nfit_spa
->is_registered
= 1;
1599 static int acpi_nfit_register_regions(struct acpi_nfit_desc
*acpi_desc
)
1601 struct nfit_spa
*nfit_spa
;
1603 list_for_each_entry(nfit_spa
, &acpi_desc
->spas
, list
) {
1604 int rc
= acpi_nfit_register_region(acpi_desc
, nfit_spa
);
1612 static int acpi_nfit_check_deletions(struct acpi_nfit_desc
*acpi_desc
,
1613 struct nfit_table_prev
*prev
)
1615 struct device
*dev
= acpi_desc
->dev
;
1617 if (!list_empty(&prev
->spas
) ||
1618 !list_empty(&prev
->memdevs
) ||
1619 !list_empty(&prev
->dcrs
) ||
1620 !list_empty(&prev
->bdws
) ||
1621 !list_empty(&prev
->idts
) ||
1622 !list_empty(&prev
->flushes
)) {
1623 dev_err(dev
, "new nfit deletes entries (unsupported)\n");
1629 int acpi_nfit_init(struct acpi_nfit_desc
*acpi_desc
, acpi_size sz
)
1631 struct device
*dev
= acpi_desc
->dev
;
1632 struct nfit_table_prev prev
;
1637 mutex_lock(&acpi_desc
->init_mutex
);
1639 INIT_LIST_HEAD(&prev
.spas
);
1640 INIT_LIST_HEAD(&prev
.memdevs
);
1641 INIT_LIST_HEAD(&prev
.dcrs
);
1642 INIT_LIST_HEAD(&prev
.bdws
);
1643 INIT_LIST_HEAD(&prev
.idts
);
1644 INIT_LIST_HEAD(&prev
.flushes
);
1646 list_cut_position(&prev
.spas
, &acpi_desc
->spas
,
1647 acpi_desc
->spas
.prev
);
1648 list_cut_position(&prev
.memdevs
, &acpi_desc
->memdevs
,
1649 acpi_desc
->memdevs
.prev
);
1650 list_cut_position(&prev
.dcrs
, &acpi_desc
->dcrs
,
1651 acpi_desc
->dcrs
.prev
);
1652 list_cut_position(&prev
.bdws
, &acpi_desc
->bdws
,
1653 acpi_desc
->bdws
.prev
);
1654 list_cut_position(&prev
.idts
, &acpi_desc
->idts
,
1655 acpi_desc
->idts
.prev
);
1656 list_cut_position(&prev
.flushes
, &acpi_desc
->flushes
,
1657 acpi_desc
->flushes
.prev
);
1659 data
= (u8
*) acpi_desc
->nfit
;
1661 while (!IS_ERR_OR_NULL(data
))
1662 data
= add_table(acpi_desc
, &prev
, data
, end
);
1665 dev_dbg(dev
, "%s: nfit table parsing error: %ld\n", __func__
,
1671 rc
= acpi_nfit_check_deletions(acpi_desc
, &prev
);
1675 if (nfit_mem_init(acpi_desc
) != 0) {
1680 acpi_nfit_init_dsms(acpi_desc
);
1682 rc
= acpi_nfit_register_dimms(acpi_desc
);
1686 rc
= acpi_nfit_register_regions(acpi_desc
);
1689 mutex_unlock(&acpi_desc
->init_mutex
);
1692 EXPORT_SYMBOL_GPL(acpi_nfit_init
);
1694 static struct acpi_nfit_desc
*acpi_nfit_desc_init(struct acpi_device
*adev
)
1696 struct nvdimm_bus_descriptor
*nd_desc
;
1697 struct acpi_nfit_desc
*acpi_desc
;
1698 struct device
*dev
= &adev
->dev
;
1700 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
1702 return ERR_PTR(-ENOMEM
);
1704 dev_set_drvdata(dev
, acpi_desc
);
1705 acpi_desc
->dev
= dev
;
1706 acpi_desc
->blk_do_io
= acpi_nfit_blk_region_do_io
;
1707 nd_desc
= &acpi_desc
->nd_desc
;
1708 nd_desc
->provider_name
= "ACPI.NFIT";
1709 nd_desc
->ndctl
= acpi_nfit_ctl
;
1710 nd_desc
->attr_groups
= acpi_nfit_attribute_groups
;
1712 acpi_desc
->nvdimm_bus
= nvdimm_bus_register(dev
, nd_desc
);
1713 if (!acpi_desc
->nvdimm_bus
) {
1714 devm_kfree(dev
, acpi_desc
);
1715 return ERR_PTR(-ENXIO
);
1718 INIT_LIST_HEAD(&acpi_desc
->spa_maps
);
1719 INIT_LIST_HEAD(&acpi_desc
->spas
);
1720 INIT_LIST_HEAD(&acpi_desc
->dcrs
);
1721 INIT_LIST_HEAD(&acpi_desc
->bdws
);
1722 INIT_LIST_HEAD(&acpi_desc
->idts
);
1723 INIT_LIST_HEAD(&acpi_desc
->flushes
);
1724 INIT_LIST_HEAD(&acpi_desc
->memdevs
);
1725 INIT_LIST_HEAD(&acpi_desc
->dimms
);
1726 mutex_init(&acpi_desc
->spa_map_mutex
);
1727 mutex_init(&acpi_desc
->init_mutex
);
1732 static int acpi_nfit_add(struct acpi_device
*adev
)
1734 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1735 struct acpi_nfit_desc
*acpi_desc
;
1736 struct device
*dev
= &adev
->dev
;
1737 struct acpi_table_header
*tbl
;
1738 acpi_status status
= AE_OK
;
1742 status
= acpi_get_table_with_size("NFIT", 0, &tbl
, &sz
);
1743 if (ACPI_FAILURE(status
)) {
1744 /* This is ok, we could have an nvdimm hotplugged later */
1745 dev_dbg(dev
, "failed to find NFIT at startup\n");
1749 acpi_desc
= acpi_nfit_desc_init(adev
);
1750 if (IS_ERR(acpi_desc
)) {
1751 dev_err(dev
, "%s: error initializing acpi_desc: %ld\n",
1752 __func__
, PTR_ERR(acpi_desc
));
1753 return PTR_ERR(acpi_desc
);
1757 * Save the acpi header for later and then skip it,
1758 * making nfit point to the first nfit table header.
1760 acpi_desc
->acpi_header
= *tbl
;
1761 acpi_desc
->nfit
= (void *) tbl
+ sizeof(struct acpi_table_nfit
);
1762 sz
-= sizeof(struct acpi_table_nfit
);
1764 /* Evaluate _FIT and override with that if present */
1765 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
1766 if (ACPI_SUCCESS(status
) && buf
.length
> 0) {
1767 union acpi_object
*obj
;
1769 * Adjust for the acpi_object header of the _FIT
1772 if (obj
->type
== ACPI_TYPE_BUFFER
) {
1774 (struct acpi_nfit_header
*)obj
->buffer
.pointer
;
1775 sz
= obj
->buffer
.length
;
1777 dev_dbg(dev
, "%s invalid type %d, ignoring _FIT\n",
1778 __func__
, (int) obj
->type
);
1781 rc
= acpi_nfit_init(acpi_desc
, sz
);
1783 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
1789 static int acpi_nfit_remove(struct acpi_device
*adev
)
1791 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
1793 nvdimm_bus_unregister(acpi_desc
->nvdimm_bus
);
1797 static void acpi_nfit_notify(struct acpi_device
*adev
, u32 event
)
1799 struct acpi_nfit_desc
*acpi_desc
= dev_get_drvdata(&adev
->dev
);
1800 struct acpi_buffer buf
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1801 struct acpi_nfit_header
*nfit_saved
;
1802 union acpi_object
*obj
;
1803 struct device
*dev
= &adev
->dev
;
1807 dev_dbg(dev
, "%s: event: %d\n", __func__
, event
);
1811 /* dev->driver may be null if we're being removed */
1812 dev_dbg(dev
, "%s: no driver found for dev\n", __func__
);
1817 acpi_desc
= acpi_nfit_desc_init(adev
);
1818 if (IS_ERR(acpi_desc
)) {
1819 dev_err(dev
, "%s: error initializing acpi_desc: %ld\n",
1820 __func__
, PTR_ERR(acpi_desc
));
1826 status
= acpi_evaluate_object(adev
->handle
, "_FIT", NULL
, &buf
);
1827 if (ACPI_FAILURE(status
)) {
1828 dev_err(dev
, "failed to evaluate _FIT\n");
1832 nfit_saved
= acpi_desc
->nfit
;
1834 if (obj
->type
== ACPI_TYPE_BUFFER
) {
1836 (struct acpi_nfit_header
*)obj
->buffer
.pointer
;
1837 ret
= acpi_nfit_init(acpi_desc
, obj
->buffer
.length
);
1839 /* Merge failed, restore old nfit, and exit */
1840 acpi_desc
->nfit
= nfit_saved
;
1841 dev_err(dev
, "failed to merge updated NFIT\n");
1844 /* Bad _FIT, restore old nfit */
1845 dev_err(dev
, "Invalid _FIT\n");
1853 static const struct acpi_device_id acpi_nfit_ids
[] = {
1857 MODULE_DEVICE_TABLE(acpi
, acpi_nfit_ids
);
1859 static struct acpi_driver acpi_nfit_driver
= {
1860 .name
= KBUILD_MODNAME
,
1861 .ids
= acpi_nfit_ids
,
1863 .add
= acpi_nfit_add
,
1864 .remove
= acpi_nfit_remove
,
1865 .notify
= acpi_nfit_notify
,
1869 static __init
int nfit_init(void)
1871 BUILD_BUG_ON(sizeof(struct acpi_table_nfit
) != 40);
1872 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address
) != 56);
1873 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map
) != 48);
1874 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave
) != 20);
1875 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios
) != 9);
1876 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region
) != 80);
1877 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region
) != 40);
1879 acpi_str_to_uuid(UUID_VOLATILE_MEMORY
, nfit_uuid
[NFIT_SPA_VOLATILE
]);
1880 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY
, nfit_uuid
[NFIT_SPA_PM
]);
1881 acpi_str_to_uuid(UUID_CONTROL_REGION
, nfit_uuid
[NFIT_SPA_DCR
]);
1882 acpi_str_to_uuid(UUID_DATA_REGION
, nfit_uuid
[NFIT_SPA_BDW
]);
1883 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_VDISK
]);
1884 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_VCD
]);
1885 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK
, nfit_uuid
[NFIT_SPA_PDISK
]);
1886 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD
, nfit_uuid
[NFIT_SPA_PCD
]);
1887 acpi_str_to_uuid(UUID_NFIT_BUS
, nfit_uuid
[NFIT_DEV_BUS
]);
1888 acpi_str_to_uuid(UUID_NFIT_DIMM
, nfit_uuid
[NFIT_DEV_DIMM
]);
1890 return acpi_bus_register_driver(&acpi_nfit_driver
);
1893 static __exit
void nfit_exit(void)
1895 acpi_bus_unregister_driver(&acpi_nfit_driver
);
1898 module_init(nfit_init
);
1899 module_exit(nfit_exit
);
1900 MODULE_LICENSE("GPL v2");
1901 MODULE_AUTHOR("Intel Corporation");