1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/platform_device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/workqueue.h>
9 #include <linux/libnvdimm.h>
10 #include <linux/genalloc.h>
11 #include <linux/vmalloc.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/ndctl.h>
16 #include <linux/sizes.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
23 #include "nfit_test.h"
24 #include "../watermark.h"
29 * Generate an NFIT table to describe the following topology:
31 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
33 * (a) (b) DIMM BLK-REGION
34 * +----------+--------------+----------+---------+
35 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
36 * | imc0 +--+- - - - - region0 - - - -+----------+ +
37 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
38 * | +----------+--------------v----------v v
42 * | +-------------------------^----------^ ^
43 * +--+---+ | blk4.0 | pm1.0 | 2 region4
44 * | imc1 +--+-------------------------+----------+ +
45 * +------+ | blk5.0 | pm1.0 | 3 region5
46 * +-------------------------+----------+-+-------+
50 * +--+---+ (Hotplug DIMM)
51 * | +----------------------------------------------+
52 * +--+---+ | blk6.0/pm7.0 | 4 region6/7
53 * | imc0 +--+----------------------------------------------+
57 * *) In this layout we have four dimms and two memory controllers in one
58 * socket. Each unique interface (BLK or PMEM) to DPA space
59 * is identified by a region device with a dynamically assigned id.
61 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
62 * A single PMEM namespace "pm0.0" is created using half of the
63 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
64 * allocate from from the bottom of a region. The unallocated
65 * portion of REGION0 aliases with REGION2 and REGION3. That
66 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
67 * "blk3.0") starting at the base of each DIMM to offset (a) in those
68 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
69 * names that can be assigned to a namespace.
71 * *) In the last portion of dimm0 and dimm1 we have an interleaved
72 * SPA range, REGION1, that spans those two dimms as well as dimm2
73 * and dimm3. Some of REGION1 allocated to a PMEM namespace named
74 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
75 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
78 * *) The portion of dimm2 and dimm3 that do not participate in the
79 * REGION1 interleaved SPA range (i.e. the DPA address below offset
80 * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
81 * Note, that BLK namespaces need not be contiguous in DPA-space, and
82 * can consume aliased capacity from multiple interleave sets.
84 * BUS1: Legacy NVDIMM (single contiguous range)
87 * +---------------------+
88 * |---------------------|
90 * |---------------------|
91 * +---------------------+
93 * *) A NFIT-table may describe a simple system-physical-address range
94 * with no BLK aliasing. This type of region may optionally
95 * reference an NVDIMM.
102 NUM_SPA
= NUM_PM
+ NUM_DCR
+ NUM_BDW
,
103 NUM_MEM
= NUM_DCR
+ NUM_BDW
+ 2 /* spa0 iset */
104 + 4 /* spa1 iset */ + 1 /* spa11 iset */,
106 LABEL_SIZE
= SZ_128K
,
107 SPA_VCD_SIZE
= SZ_4M
,
108 SPA0_SIZE
= DIMM_SIZE
,
109 SPA1_SIZE
= DIMM_SIZE
*2,
110 SPA2_SIZE
= DIMM_SIZE
,
113 NUM_NFITS
= 2, /* permit testing multiple NFITs per system */
116 struct nfit_test_dcr
{
119 __u8 aperature
[BDW_SIZE
];
122 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
123 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
124 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
126 static u32 handle
[] = {
127 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
128 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
129 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
130 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
131 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
132 [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
133 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
136 static unsigned long dimm_fail_cmd_flags
[ARRAY_SIZE(handle
)];
137 static int dimm_fail_cmd_code
[ARRAY_SIZE(handle
)];
138 struct nfit_test_sec
{
143 u8 master_passphrase
[32];
144 u64 overwrite_end_time
;
145 } dimm_sec_info
[NUM_DCR
];
147 static const struct nd_intel_smart smart_def
= {
148 .flags
= ND_INTEL_SMART_HEALTH_VALID
149 | ND_INTEL_SMART_SPARES_VALID
150 | ND_INTEL_SMART_ALARM_VALID
151 | ND_INTEL_SMART_USED_VALID
152 | ND_INTEL_SMART_SHUTDOWN_VALID
153 | ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
154 | ND_INTEL_SMART_MTEMP_VALID
155 | ND_INTEL_SMART_CTEMP_VALID
,
156 .health
= ND_INTEL_SMART_NON_CRITICAL_HEALTH
,
157 .media_temperature
= 23 * 16,
158 .ctrl_temperature
= 25 * 16,
159 .pmic_temperature
= 40 * 16,
161 .alarm_flags
= ND_INTEL_SMART_SPARE_TRIP
162 | ND_INTEL_SMART_TEMP_TRIP
,
166 .shutdown_count
= 42,
170 struct nfit_test_fw
{
171 enum intel_fw_update_state state
;
177 bool missed_activate
;
178 unsigned long last_activate
;
182 struct acpi_nfit_desc acpi_desc
;
183 struct platform_device pdev
;
184 struct list_head resources
;
193 dma_addr_t
*dimm_dma
;
195 dma_addr_t
*flush_dma
;
197 dma_addr_t
*label_dma
;
199 dma_addr_t
*spa_set_dma
;
200 struct nfit_test_dcr
**dcr
;
202 int (*alloc
)(struct nfit_test
*t
);
203 void (*setup
)(struct nfit_test
*t
);
205 union acpi_object
**_fit
;
208 struct nd_cmd_ars_status
*ars_status
;
209 unsigned long deadline
;
212 struct device
*dimm_dev
[ARRAY_SIZE(handle
)];
213 struct nd_intel_smart
*smart
;
214 struct nd_intel_smart_threshold
*smart_threshold
;
215 struct badrange badrange
;
216 struct work_struct work
;
217 struct nfit_test_fw
*fw
;
220 static struct workqueue_struct
*nfit_wq
;
222 static struct gen_pool
*nfit_pool
;
224 static const char zero_key
[NVDIMM_PASSPHRASE_LEN
];
226 static struct nfit_test
*to_nfit_test(struct device
*dev
)
228 struct platform_device
*pdev
= to_platform_device(dev
);
230 return container_of(pdev
, struct nfit_test
, pdev
);
233 static int nd_intel_test_get_fw_info(struct nfit_test
*t
,
234 struct nd_intel_fw_info
*nd_cmd
, unsigned int buf_len
,
237 struct device
*dev
= &t
->pdev
.dev
;
238 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
240 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
241 __func__
, t
, nd_cmd
, buf_len
, idx
);
243 if (buf_len
< sizeof(*nd_cmd
))
247 nd_cmd
->storage_size
= INTEL_FW_STORAGE_SIZE
;
248 nd_cmd
->max_send_len
= INTEL_FW_MAX_SEND_LEN
;
249 nd_cmd
->query_interval
= INTEL_FW_QUERY_INTERVAL
;
250 nd_cmd
->max_query_time
= INTEL_FW_QUERY_MAX_TIME
;
251 nd_cmd
->update_cap
= 0;
252 nd_cmd
->fis_version
= INTEL_FW_FIS_VERSION
;
253 nd_cmd
->run_version
= 0;
254 nd_cmd
->updated_version
= fw
->version
;
259 static int nd_intel_test_start_update(struct nfit_test
*t
,
260 struct nd_intel_fw_start
*nd_cmd
, unsigned int buf_len
,
263 struct device
*dev
= &t
->pdev
.dev
;
264 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
266 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
267 __func__
, t
, nd_cmd
, buf_len
, idx
);
269 if (buf_len
< sizeof(*nd_cmd
))
272 if (fw
->state
!= FW_STATE_NEW
) {
273 /* extended status, FW update in progress */
274 nd_cmd
->status
= 0x10007;
278 fw
->state
= FW_STATE_IN_PROGRESS
;
280 fw
->size_received
= 0;
282 nd_cmd
->context
= fw
->context
;
284 dev_dbg(dev
, "%s: context issued: %#x\n", __func__
, nd_cmd
->context
);
289 static int nd_intel_test_send_data(struct nfit_test
*t
,
290 struct nd_intel_fw_send_data
*nd_cmd
, unsigned int buf_len
,
293 struct device
*dev
= &t
->pdev
.dev
;
294 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
295 u32
*status
= (u32
*)&nd_cmd
->data
[nd_cmd
->length
];
297 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
298 __func__
, t
, nd_cmd
, buf_len
, idx
);
300 if (buf_len
< sizeof(*nd_cmd
))
304 dev_dbg(dev
, "%s: cmd->status: %#x\n", __func__
, *status
);
305 dev_dbg(dev
, "%s: cmd->data[0]: %#x\n", __func__
, nd_cmd
->data
[0]);
306 dev_dbg(dev
, "%s: cmd->data[%u]: %#x\n", __func__
, nd_cmd
->length
-1,
307 nd_cmd
->data
[nd_cmd
->length
-1]);
309 if (fw
->state
!= FW_STATE_IN_PROGRESS
) {
310 dev_dbg(dev
, "%s: not in IN_PROGRESS state\n", __func__
);
315 if (nd_cmd
->context
!= fw
->context
) {
316 dev_dbg(dev
, "%s: incorrect context: in: %#x correct: %#x\n",
317 __func__
, nd_cmd
->context
, fw
->context
);
323 * check offset + len > size of fw storage
324 * check length is > max send length
326 if (nd_cmd
->offset
+ nd_cmd
->length
> INTEL_FW_STORAGE_SIZE
||
327 nd_cmd
->length
> INTEL_FW_MAX_SEND_LEN
) {
329 dev_dbg(dev
, "%s: buffer boundary violation\n", __func__
);
333 fw
->size_received
+= nd_cmd
->length
;
334 dev_dbg(dev
, "%s: copying %u bytes, %u bytes so far\n",
335 __func__
, nd_cmd
->length
, fw
->size_received
);
340 static int nd_intel_test_finish_fw(struct nfit_test
*t
,
341 struct nd_intel_fw_finish_update
*nd_cmd
,
342 unsigned int buf_len
, int idx
)
344 struct device
*dev
= &t
->pdev
.dev
;
345 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
347 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
348 __func__
, t
, nd_cmd
, buf_len
, idx
);
350 if (fw
->state
== FW_STATE_UPDATED
) {
351 /* update already done, need activation */
352 nd_cmd
->status
= 0x20007;
356 dev_dbg(dev
, "%s: context: %#x ctrl_flags: %#x\n",
357 __func__
, nd_cmd
->context
, nd_cmd
->ctrl_flags
);
359 switch (nd_cmd
->ctrl_flags
) {
361 if (nd_cmd
->context
!= fw
->context
) {
362 dev_dbg(dev
, "%s: incorrect context: in: %#x correct: %#x\n",
363 __func__
, nd_cmd
->context
,
365 nd_cmd
->status
= 0x10007;
369 fw
->state
= FW_STATE_VERIFY
;
370 /* set 1 second of time for firmware "update" */
371 fw
->end_time
= jiffies
+ HZ
;
375 fw
->size_received
= 0;
376 /* successfully aborted status */
377 nd_cmd
->status
= 0x40007;
378 fw
->state
= FW_STATE_NEW
;
379 dev_dbg(dev
, "%s: abort successful\n", __func__
);
382 default: /* bad control flag */
383 dev_warn(dev
, "%s: unknown control flag: %#x\n",
384 __func__
, nd_cmd
->ctrl_flags
);
391 static int nd_intel_test_finish_query(struct nfit_test
*t
,
392 struct nd_intel_fw_finish_query
*nd_cmd
,
393 unsigned int buf_len
, int idx
)
395 struct device
*dev
= &t
->pdev
.dev
;
396 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
398 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
399 __func__
, t
, nd_cmd
, buf_len
, idx
);
401 if (buf_len
< sizeof(*nd_cmd
))
404 if (nd_cmd
->context
!= fw
->context
) {
405 dev_dbg(dev
, "%s: incorrect context: in: %#x correct: %#x\n",
406 __func__
, nd_cmd
->context
, fw
->context
);
407 nd_cmd
->status
= 0x10007;
411 dev_dbg(dev
, "%s context: %#x\n", __func__
, nd_cmd
->context
);
415 nd_cmd
->updated_fw_rev
= 0;
417 dev_dbg(dev
, "%s: new state\n", __func__
);
420 case FW_STATE_IN_PROGRESS
:
421 /* sequencing error */
422 nd_cmd
->status
= 0x40007;
423 nd_cmd
->updated_fw_rev
= 0;
424 dev_dbg(dev
, "%s: sequence error\n", __func__
);
427 case FW_STATE_VERIFY
:
428 if (time_is_after_jiffies64(fw
->end_time
)) {
429 nd_cmd
->updated_fw_rev
= 0;
430 nd_cmd
->status
= 0x20007;
431 dev_dbg(dev
, "%s: still verifying\n", __func__
);
434 dev_dbg(dev
, "%s: transition out verify\n", __func__
);
435 fw
->state
= FW_STATE_UPDATED
;
436 fw
->missed_activate
= false;
438 case FW_STATE_UPDATED
:
440 /* bogus test version */
441 fw
->version
= nd_cmd
->updated_fw_rev
=
442 INTEL_FW_FAKE_VERSION
;
443 dev_dbg(dev
, "%s: updated\n", __func__
);
446 default: /* we should never get here */
453 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size
*nd_cmd
,
454 unsigned int buf_len
)
456 if (buf_len
< sizeof(*nd_cmd
))
460 nd_cmd
->config_size
= LABEL_SIZE
;
461 nd_cmd
->max_xfer
= SZ_4K
;
466 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
467 *nd_cmd
, unsigned int buf_len
, void *label
)
469 unsigned int len
, offset
= nd_cmd
->in_offset
;
472 if (buf_len
< sizeof(*nd_cmd
))
474 if (offset
>= LABEL_SIZE
)
476 if (nd_cmd
->in_length
+ sizeof(*nd_cmd
) > buf_len
)
480 len
= min(nd_cmd
->in_length
, LABEL_SIZE
- offset
);
481 memcpy(nd_cmd
->out_buf
, label
+ offset
, len
);
482 rc
= buf_len
- sizeof(*nd_cmd
) - len
;
487 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr
*nd_cmd
,
488 unsigned int buf_len
, void *label
)
490 unsigned int len
, offset
= nd_cmd
->in_offset
;
494 if (buf_len
< sizeof(*nd_cmd
))
496 if (offset
>= LABEL_SIZE
)
498 if (nd_cmd
->in_length
+ sizeof(*nd_cmd
) + 4 > buf_len
)
501 status
= (void *)nd_cmd
+ nd_cmd
->in_length
+ sizeof(*nd_cmd
);
503 len
= min(nd_cmd
->in_length
, LABEL_SIZE
- offset
);
504 memcpy(label
+ offset
, nd_cmd
->in_buf
, len
);
505 rc
= buf_len
- sizeof(*nd_cmd
) - (len
+ 4);
510 #define NFIT_TEST_CLEAR_ERR_UNIT 256
512 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap
*nd_cmd
,
513 unsigned int buf_len
)
517 if (buf_len
< sizeof(*nd_cmd
))
520 /* for testing, only store up to n records that fit within 4k */
521 ars_recs
= SZ_4K
/ sizeof(struct nd_ars_record
);
523 nd_cmd
->max_ars_out
= sizeof(struct nd_cmd_ars_status
)
524 + ars_recs
* sizeof(struct nd_ars_record
);
525 nd_cmd
->status
= (ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
) << 16;
526 nd_cmd
->clear_err_unit
= NFIT_TEST_CLEAR_ERR_UNIT
;
531 static void post_ars_status(struct ars_state
*ars_state
,
532 struct badrange
*badrange
, u64 addr
, u64 len
)
534 struct nd_cmd_ars_status
*ars_status
;
535 struct nd_ars_record
*ars_record
;
536 struct badrange_entry
*be
;
537 u64 end
= addr
+ len
- 1;
540 ars_state
->deadline
= jiffies
+ 1*HZ
;
541 ars_status
= ars_state
->ars_status
;
542 ars_status
->status
= 0;
543 ars_status
->address
= addr
;
544 ars_status
->length
= len
;
545 ars_status
->type
= ND_ARS_PERSISTENT
;
547 spin_lock(&badrange
->lock
);
548 list_for_each_entry(be
, &badrange
->list
, list
) {
549 u64 be_end
= be
->start
+ be
->length
- 1;
552 /* skip entries outside the range */
553 if (be_end
< addr
|| be
->start
> end
)
556 rstart
= (be
->start
< addr
) ? addr
: be
->start
;
557 rend
= (be_end
< end
) ? be_end
: end
;
558 ars_record
= &ars_status
->records
[i
];
559 ars_record
->handle
= 0;
560 ars_record
->err_address
= rstart
;
561 ars_record
->length
= rend
- rstart
+ 1;
564 spin_unlock(&badrange
->lock
);
565 ars_status
->num_records
= i
;
566 ars_status
->out_length
= sizeof(struct nd_cmd_ars_status
)
567 + i
* sizeof(struct nd_ars_record
);
570 static int nfit_test_cmd_ars_start(struct nfit_test
*t
,
571 struct ars_state
*ars_state
,
572 struct nd_cmd_ars_start
*ars_start
, unsigned int buf_len
,
575 if (buf_len
< sizeof(*ars_start
))
578 spin_lock(&ars_state
->lock
);
579 if (time_before(jiffies
, ars_state
->deadline
)) {
580 ars_start
->status
= NFIT_ARS_START_BUSY
;
583 ars_start
->status
= 0;
584 ars_start
->scrub_time
= 1;
585 post_ars_status(ars_state
, &t
->badrange
, ars_start
->address
,
589 spin_unlock(&ars_state
->lock
);
594 static int nfit_test_cmd_ars_status(struct ars_state
*ars_state
,
595 struct nd_cmd_ars_status
*ars_status
, unsigned int buf_len
,
598 if (buf_len
< ars_state
->ars_status
->out_length
)
601 spin_lock(&ars_state
->lock
);
602 if (time_before(jiffies
, ars_state
->deadline
)) {
603 memset(ars_status
, 0, buf_len
);
604 ars_status
->status
= NFIT_ARS_STATUS_BUSY
;
605 ars_status
->out_length
= sizeof(*ars_status
);
608 memcpy(ars_status
, ars_state
->ars_status
,
609 ars_state
->ars_status
->out_length
);
612 spin_unlock(&ars_state
->lock
);
616 static int nfit_test_cmd_clear_error(struct nfit_test
*t
,
617 struct nd_cmd_clear_error
*clear_err
,
618 unsigned int buf_len
, int *cmd_rc
)
620 const u64 mask
= NFIT_TEST_CLEAR_ERR_UNIT
- 1;
621 if (buf_len
< sizeof(*clear_err
))
624 if ((clear_err
->address
& mask
) || (clear_err
->length
& mask
))
627 badrange_forget(&t
->badrange
, clear_err
->address
, clear_err
->length
);
628 clear_err
->status
= 0;
629 clear_err
->cleared
= clear_err
->length
;
634 struct region_search_spa
{
636 struct nd_region
*region
;
639 static int is_region_device(struct device
*dev
)
641 return !strncmp(dev
->kobj
.name
, "region", 6);
644 static int nfit_test_search_region_spa(struct device
*dev
, void *data
)
646 struct region_search_spa
*ctx
= data
;
647 struct nd_region
*nd_region
;
648 resource_size_t ndr_end
;
650 if (!is_region_device(dev
))
653 nd_region
= to_nd_region(dev
);
654 ndr_end
= nd_region
->ndr_start
+ nd_region
->ndr_size
;
656 if (ctx
->addr
>= nd_region
->ndr_start
&& ctx
->addr
< ndr_end
) {
657 ctx
->region
= nd_region
;
664 static int nfit_test_search_spa(struct nvdimm_bus
*bus
,
665 struct nd_cmd_translate_spa
*spa
)
668 struct nd_region
*nd_region
= NULL
;
669 struct nvdimm
*nvdimm
= NULL
;
670 struct nd_mapping
*nd_mapping
= NULL
;
671 struct region_search_spa ctx
= {
677 ret
= device_for_each_child(&bus
->dev
, &ctx
,
678 nfit_test_search_region_spa
);
683 nd_region
= ctx
.region
;
685 dpa
= ctx
.addr
- nd_region
->ndr_start
;
688 * last dimm is selected for test
690 nd_mapping
= &nd_region
->mapping
[nd_region
->ndr_mappings
- 1];
691 nvdimm
= nd_mapping
->nvdimm
;
693 spa
->devices
[0].nfit_device_handle
= handle
[nvdimm
->id
];
694 spa
->num_nvdimms
= 1;
695 spa
->devices
[0].dpa
= dpa
;
700 static int nfit_test_cmd_translate_spa(struct nvdimm_bus
*bus
,
701 struct nd_cmd_translate_spa
*spa
, unsigned int buf_len
)
703 if (buf_len
< spa
->translate_length
)
706 if (nfit_test_search_spa(bus
, spa
) < 0 || !spa
->num_nvdimms
)
712 static int nfit_test_cmd_smart(struct nd_intel_smart
*smart
, unsigned int buf_len
,
713 struct nd_intel_smart
*smart_data
)
715 if (buf_len
< sizeof(*smart
))
717 memcpy(smart
, smart_data
, sizeof(*smart
));
721 static int nfit_test_cmd_smart_threshold(
722 struct nd_intel_smart_threshold
*out
,
723 unsigned int buf_len
,
724 struct nd_intel_smart_threshold
*smart_t
)
726 if (buf_len
< sizeof(*smart_t
))
728 memcpy(out
, smart_t
, sizeof(*smart_t
));
732 static void smart_notify(struct device
*bus_dev
,
733 struct device
*dimm_dev
, struct nd_intel_smart
*smart
,
734 struct nd_intel_smart_threshold
*thresh
)
736 dev_dbg(dimm_dev
, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
737 __func__
, thresh
->alarm_control
, thresh
->spares
,
738 smart
->spares
, thresh
->media_temperature
,
739 smart
->media_temperature
, thresh
->ctrl_temperature
,
740 smart
->ctrl_temperature
);
741 if (((thresh
->alarm_control
& ND_INTEL_SMART_SPARE_TRIP
)
744 || ((thresh
->alarm_control
& ND_INTEL_SMART_TEMP_TRIP
)
745 && smart
->media_temperature
746 >= thresh
->media_temperature
)
747 || ((thresh
->alarm_control
& ND_INTEL_SMART_CTEMP_TRIP
)
748 && smart
->ctrl_temperature
749 >= thresh
->ctrl_temperature
)
750 || (smart
->health
!= ND_INTEL_SMART_NON_CRITICAL_HEALTH
)
751 || (smart
->shutdown_state
!= 0)) {
752 device_lock(bus_dev
);
753 __acpi_nvdimm_notify(dimm_dev
, 0x81);
754 device_unlock(bus_dev
);
758 static int nfit_test_cmd_smart_set_threshold(
759 struct nd_intel_smart_set_threshold
*in
,
760 unsigned int buf_len
,
761 struct nd_intel_smart_threshold
*thresh
,
762 struct nd_intel_smart
*smart
,
763 struct device
*bus_dev
, struct device
*dimm_dev
)
767 size
= sizeof(*in
) - 4;
770 memcpy(thresh
->data
, in
, size
);
772 smart_notify(bus_dev
, dimm_dev
, smart
, thresh
);
777 static int nfit_test_cmd_smart_inject(
778 struct nd_intel_smart_inject
*inj
,
779 unsigned int buf_len
,
780 struct nd_intel_smart_threshold
*thresh
,
781 struct nd_intel_smart
*smart
,
782 struct device
*bus_dev
, struct device
*dimm_dev
)
784 if (buf_len
!= sizeof(*inj
))
787 if (inj
->flags
& ND_INTEL_SMART_INJECT_MTEMP
) {
788 if (inj
->mtemp_enable
)
789 smart
->media_temperature
= inj
->media_temperature
;
791 smart
->media_temperature
= smart_def
.media_temperature
;
793 if (inj
->flags
& ND_INTEL_SMART_INJECT_SPARE
) {
794 if (inj
->spare_enable
)
795 smart
->spares
= inj
->spares
;
797 smart
->spares
= smart_def
.spares
;
799 if (inj
->flags
& ND_INTEL_SMART_INJECT_FATAL
) {
800 if (inj
->fatal_enable
)
801 smart
->health
= ND_INTEL_SMART_FATAL_HEALTH
;
803 smart
->health
= ND_INTEL_SMART_NON_CRITICAL_HEALTH
;
805 if (inj
->flags
& ND_INTEL_SMART_INJECT_SHUTDOWN
) {
806 if (inj
->unsafe_shutdown_enable
) {
807 smart
->shutdown_state
= 1;
808 smart
->shutdown_count
++;
810 smart
->shutdown_state
= 0;
813 smart_notify(bus_dev
, dimm_dev
, smart
, thresh
);
818 static void uc_error_notify(struct work_struct
*work
)
820 struct nfit_test
*t
= container_of(work
, typeof(*t
), work
);
822 __acpi_nfit_notify(&t
->pdev
.dev
, t
, NFIT_NOTIFY_UC_MEMORY_ERROR
);
825 static int nfit_test_cmd_ars_error_inject(struct nfit_test
*t
,
826 struct nd_cmd_ars_err_inj
*err_inj
, unsigned int buf_len
)
830 if (buf_len
!= sizeof(*err_inj
)) {
835 if (err_inj
->err_inj_spa_range_length
<= 0) {
840 rc
= badrange_add(&t
->badrange
, err_inj
->err_inj_spa_range_base
,
841 err_inj
->err_inj_spa_range_length
);
845 if (err_inj
->err_inj_options
& (1 << ND_ARS_ERR_INJ_OPT_NOTIFY
))
846 queue_work(nfit_wq
, &t
->work
);
852 err_inj
->status
= NFIT_ARS_INJECT_INVALID
;
856 static int nfit_test_cmd_ars_inject_clear(struct nfit_test
*t
,
857 struct nd_cmd_ars_err_inj_clr
*err_clr
, unsigned int buf_len
)
861 if (buf_len
!= sizeof(*err_clr
)) {
866 if (err_clr
->err_inj_clr_spa_range_length
<= 0) {
871 badrange_forget(&t
->badrange
, err_clr
->err_inj_clr_spa_range_base
,
872 err_clr
->err_inj_clr_spa_range_length
);
878 err_clr
->status
= NFIT_ARS_INJECT_INVALID
;
882 static int nfit_test_cmd_ars_inject_status(struct nfit_test
*t
,
883 struct nd_cmd_ars_err_inj_stat
*err_stat
,
884 unsigned int buf_len
)
886 struct badrange_entry
*be
;
887 int max
= SZ_4K
/ sizeof(struct nd_error_stat_query_record
);
890 err_stat
->status
= 0;
891 spin_lock(&t
->badrange
.lock
);
892 list_for_each_entry(be
, &t
->badrange
.list
, list
) {
893 err_stat
->record
[i
].err_inj_stat_spa_range_base
= be
->start
;
894 err_stat
->record
[i
].err_inj_stat_spa_range_length
= be
->length
;
899 spin_unlock(&t
->badrange
.lock
);
900 err_stat
->inj_err_rec_count
= i
;
905 static int nd_intel_test_cmd_set_lss_status(struct nfit_test
*t
,
906 struct nd_intel_lss
*nd_cmd
, unsigned int buf_len
)
908 struct device
*dev
= &t
->pdev
.dev
;
910 if (buf_len
< sizeof(*nd_cmd
))
913 switch (nd_cmd
->enable
) {
916 dev_dbg(dev
, "%s: Latch System Shutdown Status disabled\n",
921 dev_dbg(dev
, "%s: Latch System Shutdown Status enabled\n",
925 dev_warn(dev
, "Unknown enable value: %#x\n", nd_cmd
->enable
);
926 nd_cmd
->status
= 0x3;
934 static int override_return_code(int dimm
, unsigned int func
, int rc
)
936 if ((1 << func
) & dimm_fail_cmd_flags
[dimm
]) {
937 if (dimm_fail_cmd_code
[dimm
])
938 return dimm_fail_cmd_code
[dimm
];
944 static int nd_intel_test_cmd_security_status(struct nfit_test
*t
,
945 struct nd_intel_get_security_state
*nd_cmd
,
946 unsigned int buf_len
, int dimm
)
948 struct device
*dev
= &t
->pdev
.dev
;
949 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
952 nd_cmd
->state
= sec
->state
;
953 nd_cmd
->extended_state
= sec
->ext_state
;
954 dev_dbg(dev
, "security state (%#x) returned\n", nd_cmd
->state
);
959 static int nd_intel_test_cmd_unlock_unit(struct nfit_test
*t
,
960 struct nd_intel_unlock_unit
*nd_cmd
,
961 unsigned int buf_len
, int dimm
)
963 struct device
*dev
= &t
->pdev
.dev
;
964 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
966 if (!(sec
->state
& ND_INTEL_SEC_STATE_LOCKED
) ||
967 (sec
->state
& ND_INTEL_SEC_STATE_FROZEN
)) {
968 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
969 dev_dbg(dev
, "unlock unit: invalid state: %#x\n",
971 } else if (memcmp(nd_cmd
->passphrase
, sec
->passphrase
,
972 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
973 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
974 dev_dbg(dev
, "unlock unit: invalid passphrase\n");
977 sec
->state
= ND_INTEL_SEC_STATE_ENABLED
;
978 dev_dbg(dev
, "Unit unlocked\n");
981 dev_dbg(dev
, "unlocking status returned: %#x\n", nd_cmd
->status
);
985 static int nd_intel_test_cmd_set_pass(struct nfit_test
*t
,
986 struct nd_intel_set_passphrase
*nd_cmd
,
987 unsigned int buf_len
, int dimm
)
989 struct device
*dev
= &t
->pdev
.dev
;
990 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
992 if (sec
->state
& ND_INTEL_SEC_STATE_FROZEN
) {
993 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
994 dev_dbg(dev
, "set passphrase: wrong security state\n");
995 } else if (memcmp(nd_cmd
->old_pass
, sec
->passphrase
,
996 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
997 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
998 dev_dbg(dev
, "set passphrase: wrong passphrase\n");
1000 memcpy(sec
->passphrase
, nd_cmd
->new_pass
,
1001 ND_INTEL_PASSPHRASE_SIZE
);
1002 sec
->state
|= ND_INTEL_SEC_STATE_ENABLED
;
1004 dev_dbg(dev
, "passphrase updated\n");
1010 static int nd_intel_test_cmd_freeze_lock(struct nfit_test
*t
,
1011 struct nd_intel_freeze_lock
*nd_cmd
,
1012 unsigned int buf_len
, int dimm
)
1014 struct device
*dev
= &t
->pdev
.dev
;
1015 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1017 if (!(sec
->state
& ND_INTEL_SEC_STATE_ENABLED
)) {
1018 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
1019 dev_dbg(dev
, "freeze lock: wrong security state\n");
1021 sec
->state
|= ND_INTEL_SEC_STATE_FROZEN
;
1023 dev_dbg(dev
, "security frozen\n");
1029 static int nd_intel_test_cmd_disable_pass(struct nfit_test
*t
,
1030 struct nd_intel_disable_passphrase
*nd_cmd
,
1031 unsigned int buf_len
, int dimm
)
1033 struct device
*dev
= &t
->pdev
.dev
;
1034 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1036 if (!(sec
->state
& ND_INTEL_SEC_STATE_ENABLED
) ||
1037 (sec
->state
& ND_INTEL_SEC_STATE_FROZEN
)) {
1038 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
1039 dev_dbg(dev
, "disable passphrase: wrong security state\n");
1040 } else if (memcmp(nd_cmd
->passphrase
, sec
->passphrase
,
1041 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
1042 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
1043 dev_dbg(dev
, "disable passphrase: wrong passphrase\n");
1045 memset(sec
->passphrase
, 0, ND_INTEL_PASSPHRASE_SIZE
);
1047 dev_dbg(dev
, "disable passphrase: done\n");
1053 static int nd_intel_test_cmd_secure_erase(struct nfit_test
*t
,
1054 struct nd_intel_secure_erase
*nd_cmd
,
1055 unsigned int buf_len
, int dimm
)
1057 struct device
*dev
= &t
->pdev
.dev
;
1058 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1060 if (sec
->state
& ND_INTEL_SEC_STATE_FROZEN
) {
1061 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
1062 dev_dbg(dev
, "secure erase: wrong security state\n");
1063 } else if (memcmp(nd_cmd
->passphrase
, sec
->passphrase
,
1064 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
1065 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
1066 dev_dbg(dev
, "secure erase: wrong passphrase\n");
1068 if (!(sec
->state
& ND_INTEL_SEC_STATE_ENABLED
)
1069 && (memcmp(nd_cmd
->passphrase
, zero_key
,
1070 ND_INTEL_PASSPHRASE_SIZE
) != 0)) {
1071 dev_dbg(dev
, "invalid zero key\n");
1074 memset(sec
->passphrase
, 0, ND_INTEL_PASSPHRASE_SIZE
);
1075 memset(sec
->master_passphrase
, 0, ND_INTEL_PASSPHRASE_SIZE
);
1077 sec
->ext_state
= ND_INTEL_SEC_ESTATE_ENABLED
;
1078 dev_dbg(dev
, "secure erase: done\n");
1084 static int nd_intel_test_cmd_overwrite(struct nfit_test
*t
,
1085 struct nd_intel_overwrite
*nd_cmd
,
1086 unsigned int buf_len
, int dimm
)
1088 struct device
*dev
= &t
->pdev
.dev
;
1089 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1091 if ((sec
->state
& ND_INTEL_SEC_STATE_ENABLED
) &&
1092 memcmp(nd_cmd
->passphrase
, sec
->passphrase
,
1093 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
1094 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
1095 dev_dbg(dev
, "overwrite: wrong passphrase\n");
1099 sec
->old_state
= sec
->state
;
1100 sec
->state
= ND_INTEL_SEC_STATE_OVERWRITE
;
1101 dev_dbg(dev
, "overwrite progressing.\n");
1102 sec
->overwrite_end_time
= get_jiffies_64() + 5 * HZ
;
1107 static int nd_intel_test_cmd_query_overwrite(struct nfit_test
*t
,
1108 struct nd_intel_query_overwrite
*nd_cmd
,
1109 unsigned int buf_len
, int dimm
)
1111 struct device
*dev
= &t
->pdev
.dev
;
1112 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1114 if (!(sec
->state
& ND_INTEL_SEC_STATE_OVERWRITE
)) {
1115 nd_cmd
->status
= ND_INTEL_STATUS_OQUERY_SEQUENCE_ERR
;
1119 if (time_is_before_jiffies64(sec
->overwrite_end_time
)) {
1120 sec
->overwrite_end_time
= 0;
1121 sec
->state
= sec
->old_state
;
1123 sec
->ext_state
= ND_INTEL_SEC_ESTATE_ENABLED
;
1124 dev_dbg(dev
, "overwrite is complete\n");
1126 nd_cmd
->status
= ND_INTEL_STATUS_OQUERY_INPROGRESS
;
1130 static int nd_intel_test_cmd_master_set_pass(struct nfit_test
*t
,
1131 struct nd_intel_set_master_passphrase
*nd_cmd
,
1132 unsigned int buf_len
, int dimm
)
1134 struct device
*dev
= &t
->pdev
.dev
;
1135 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1137 if (!(sec
->ext_state
& ND_INTEL_SEC_ESTATE_ENABLED
)) {
1138 nd_cmd
->status
= ND_INTEL_STATUS_NOT_SUPPORTED
;
1139 dev_dbg(dev
, "master set passphrase: in wrong state\n");
1140 } else if (sec
->ext_state
& ND_INTEL_SEC_ESTATE_PLIMIT
) {
1141 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
1142 dev_dbg(dev
, "master set passphrase: in wrong security state\n");
1143 } else if (memcmp(nd_cmd
->old_pass
, sec
->master_passphrase
,
1144 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
1145 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
1146 dev_dbg(dev
, "master set passphrase: wrong passphrase\n");
1148 memcpy(sec
->master_passphrase
, nd_cmd
->new_pass
,
1149 ND_INTEL_PASSPHRASE_SIZE
);
1150 sec
->ext_state
= ND_INTEL_SEC_ESTATE_ENABLED
;
1151 dev_dbg(dev
, "master passphrase: updated\n");
1157 static int nd_intel_test_cmd_master_secure_erase(struct nfit_test
*t
,
1158 struct nd_intel_master_secure_erase
*nd_cmd
,
1159 unsigned int buf_len
, int dimm
)
1161 struct device
*dev
= &t
->pdev
.dev
;
1162 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1164 if (!(sec
->ext_state
& ND_INTEL_SEC_ESTATE_ENABLED
)) {
1165 nd_cmd
->status
= ND_INTEL_STATUS_NOT_SUPPORTED
;
1166 dev_dbg(dev
, "master secure erase: in wrong state\n");
1167 } else if (sec
->ext_state
& ND_INTEL_SEC_ESTATE_PLIMIT
) {
1168 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_STATE
;
1169 dev_dbg(dev
, "master secure erase: in wrong security state\n");
1170 } else if (memcmp(nd_cmd
->passphrase
, sec
->master_passphrase
,
1171 ND_INTEL_PASSPHRASE_SIZE
) != 0) {
1172 nd_cmd
->status
= ND_INTEL_STATUS_INVALID_PASS
;
1173 dev_dbg(dev
, "master secure erase: wrong passphrase\n");
1175 /* we do not erase master state passphrase ever */
1176 sec
->ext_state
= ND_INTEL_SEC_ESTATE_ENABLED
;
1177 memset(sec
->passphrase
, 0, ND_INTEL_PASSPHRASE_SIZE
);
1179 dev_dbg(dev
, "master secure erase: done\n");
1185 static unsigned long last_activate
;
1187 static int nvdimm_bus_intel_fw_activate_businfo(struct nfit_test
*t
,
1188 struct nd_intel_bus_fw_activate_businfo
*nd_cmd
,
1189 unsigned int buf_len
)
1195 for (i
= 0; i
< NUM_DCR
; i
++) {
1196 struct nfit_test_fw
*fw
= &t
->fw
[i
];
1203 * Emulate 3 second activation max, and 1 second incremental
1204 * quiesce time per dimm requiring multiple activates to get all
1208 state
= ND_INTEL_FWA_ARMED
;
1209 else if (!last_activate
|| time_after(jiffies
, last_activate
+ 3 * HZ
))
1210 state
= ND_INTEL_FWA_IDLE
;
1212 state
= ND_INTEL_FWA_BUSY
;
1214 tmo
= armed
* USEC_PER_SEC
;
1215 *nd_cmd
= (struct nd_intel_bus_fw_activate_businfo
) {
1216 .capability
= ND_INTEL_BUS_FWA_CAP_FWQUIESCE
1217 | ND_INTEL_BUS_FWA_CAP_OSQUIESCE
1218 | ND_INTEL_BUS_FWA_CAP_RESET
,
1220 .activate_tmo
= tmo
,
1221 .cpu_quiesce_tmo
= tmo
,
1222 .io_quiesce_tmo
= tmo
,
1223 .max_quiesce_tmo
= 3 * USEC_PER_SEC
,
1229 static int nvdimm_bus_intel_fw_activate(struct nfit_test
*t
,
1230 struct nd_intel_bus_fw_activate
*nd_cmd
,
1231 unsigned int buf_len
)
1233 struct nd_intel_bus_fw_activate_businfo info
;
1237 nvdimm_bus_intel_fw_activate_businfo(t
, &info
, sizeof(info
));
1238 if (info
.state
== ND_INTEL_FWA_BUSY
)
1239 status
= ND_INTEL_BUS_FWA_STATUS_BUSY
;
1240 else if (info
.activate_tmo
> info
.max_quiesce_tmo
)
1241 status
= ND_INTEL_BUS_FWA_STATUS_TMO
;
1242 else if (info
.state
== ND_INTEL_FWA_IDLE
)
1243 status
= ND_INTEL_BUS_FWA_STATUS_NOARM
;
1245 dev_dbg(&t
->pdev
.dev
, "status: %d\n", status
);
1246 nd_cmd
->status
= status
;
1247 if (status
&& status
!= ND_INTEL_BUS_FWA_STATUS_TMO
)
1250 last_activate
= jiffies
;
1251 for (i
= 0; i
< NUM_DCR
; i
++) {
1252 struct nfit_test_fw
*fw
= &t
->fw
[i
];
1256 if (fw
->state
!= FW_STATE_UPDATED
)
1257 fw
->missed_activate
= true;
1259 fw
->state
= FW_STATE_NEW
;
1261 fw
->last_activate
= last_activate
;
1267 static int nd_intel_test_cmd_fw_activate_dimminfo(struct nfit_test
*t
,
1268 struct nd_intel_fw_activate_dimminfo
*nd_cmd
,
1269 unsigned int buf_len
, int dimm
)
1271 struct nd_intel_bus_fw_activate_businfo info
;
1272 struct nfit_test_fw
*fw
= &t
->fw
[dimm
];
1275 nvdimm_bus_intel_fw_activate_businfo(t
, &info
, sizeof(info
));
1277 if (info
.state
== ND_INTEL_FWA_BUSY
)
1278 state
= ND_INTEL_FWA_BUSY
;
1279 else if (info
.state
== ND_INTEL_FWA_IDLE
)
1280 state
= ND_INTEL_FWA_IDLE
;
1282 state
= ND_INTEL_FWA_ARMED
;
1284 state
= ND_INTEL_FWA_IDLE
;
1286 result
= ND_INTEL_DIMM_FWA_NONE
;
1287 if (last_activate
&& fw
->last_activate
== last_activate
&&
1288 state
== ND_INTEL_FWA_IDLE
) {
1289 if (fw
->missed_activate
)
1290 result
= ND_INTEL_DIMM_FWA_NOTSTAGED
;
1292 result
= ND_INTEL_DIMM_FWA_SUCCESS
;
1295 *nd_cmd
= (struct nd_intel_fw_activate_dimminfo
) {
1303 static int nd_intel_test_cmd_fw_activate_arm(struct nfit_test
*t
,
1304 struct nd_intel_fw_activate_arm
*nd_cmd
,
1305 unsigned int buf_len
, int dimm
)
1307 struct nfit_test_fw
*fw
= &t
->fw
[dimm
];
1309 fw
->armed
= nd_cmd
->activate_arm
== ND_INTEL_DIMM_FWA_ARM
;
1314 static int get_dimm(struct nfit_mem
*nfit_mem
, unsigned int func
)
1318 /* lookup per-dimm data */
1319 for (i
= 0; i
< ARRAY_SIZE(handle
); i
++)
1320 if (__to_nfit_memdev(nfit_mem
)->device_handle
== handle
[i
])
1322 if (i
>= ARRAY_SIZE(handle
))
1327 static void nfit_ctl_dbg(struct acpi_nfit_desc
*acpi_desc
,
1328 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
1331 struct nfit_test
*t
= container_of(acpi_desc
, typeof(*t
), acpi_desc
);
1332 unsigned int func
= cmd
;
1333 unsigned int family
= 0;
1335 if (cmd
== ND_CMD_CALL
) {
1336 struct nd_cmd_pkg
*pkg
= buf
;
1338 len
= pkg
->nd_size_in
;
1339 family
= pkg
->nd_family
;
1340 buf
= pkg
->nd_payload
;
1341 func
= pkg
->nd_command
;
1343 dev_dbg(&t
->pdev
.dev
, "%s family: %d cmd: %d: func: %d input length: %d\n",
1344 nvdimm
? nvdimm_name(nvdimm
) : "bus", family
, cmd
, func
,
1346 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET
, 16, 4,
1347 buf
, min(len
, 256u), true);
1350 static int nfit_test_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
1351 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
1352 unsigned int buf_len
, int *cmd_rc
)
1354 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
1355 struct nfit_test
*t
= container_of(acpi_desc
, typeof(*t
), acpi_desc
);
1356 unsigned int func
= cmd
;
1357 int i
, rc
= 0, __cmd_rc
;
1363 nfit_ctl_dbg(acpi_desc
, nvdimm
, cmd
, buf
, buf_len
);
1366 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
1367 unsigned long cmd_mask
= nvdimm_cmd_mask(nvdimm
);
1372 if (cmd
== ND_CMD_CALL
) {
1373 struct nd_cmd_pkg
*call_pkg
= buf
;
1375 buf_len
= call_pkg
->nd_size_in
+ call_pkg
->nd_size_out
;
1376 buf
= (void *) call_pkg
->nd_payload
;
1377 func
= call_pkg
->nd_command
;
1378 if (call_pkg
->nd_family
!= nfit_mem
->family
)
1381 i
= get_dimm(nfit_mem
, func
);
1385 dev_WARN_ONCE(&t
->pdev
.dev
, 1,
1386 "ND_CMD_CALL only valid for nfit_test0\n");
1391 case NVDIMM_INTEL_GET_SECURITY_STATE
:
1392 rc
= nd_intel_test_cmd_security_status(t
,
1395 case NVDIMM_INTEL_UNLOCK_UNIT
:
1396 rc
= nd_intel_test_cmd_unlock_unit(t
,
1399 case NVDIMM_INTEL_SET_PASSPHRASE
:
1400 rc
= nd_intel_test_cmd_set_pass(t
,
1403 case NVDIMM_INTEL_DISABLE_PASSPHRASE
:
1404 rc
= nd_intel_test_cmd_disable_pass(t
,
1407 case NVDIMM_INTEL_FREEZE_LOCK
:
1408 rc
= nd_intel_test_cmd_freeze_lock(t
,
1411 case NVDIMM_INTEL_SECURE_ERASE
:
1412 rc
= nd_intel_test_cmd_secure_erase(t
,
1415 case NVDIMM_INTEL_OVERWRITE
:
1416 rc
= nd_intel_test_cmd_overwrite(t
,
1419 case NVDIMM_INTEL_QUERY_OVERWRITE
:
1420 rc
= nd_intel_test_cmd_query_overwrite(t
,
1423 case NVDIMM_INTEL_SET_MASTER_PASSPHRASE
:
1424 rc
= nd_intel_test_cmd_master_set_pass(t
,
1427 case NVDIMM_INTEL_MASTER_SECURE_ERASE
:
1428 rc
= nd_intel_test_cmd_master_secure_erase(t
,
1431 case NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO
:
1432 rc
= nd_intel_test_cmd_fw_activate_dimminfo(
1433 t
, buf
, buf_len
, i
);
1435 case NVDIMM_INTEL_FW_ACTIVATE_ARM
:
1436 rc
= nd_intel_test_cmd_fw_activate_arm(
1437 t
, buf
, buf_len
, i
);
1439 case ND_INTEL_ENABLE_LSS_STATUS
:
1440 rc
= nd_intel_test_cmd_set_lss_status(t
,
1443 case ND_INTEL_FW_GET_INFO
:
1444 rc
= nd_intel_test_get_fw_info(t
, buf
,
1447 case ND_INTEL_FW_START_UPDATE
:
1448 rc
= nd_intel_test_start_update(t
, buf
,
1451 case ND_INTEL_FW_SEND_DATA
:
1452 rc
= nd_intel_test_send_data(t
, buf
,
1455 case ND_INTEL_FW_FINISH_UPDATE
:
1456 rc
= nd_intel_test_finish_fw(t
, buf
,
1459 case ND_INTEL_FW_FINISH_QUERY
:
1460 rc
= nd_intel_test_finish_query(t
, buf
,
1463 case ND_INTEL_SMART
:
1464 rc
= nfit_test_cmd_smart(buf
, buf_len
,
1467 case ND_INTEL_SMART_THRESHOLD
:
1468 rc
= nfit_test_cmd_smart_threshold(buf
,
1470 &t
->smart_threshold
[i
]);
1472 case ND_INTEL_SMART_SET_THRESHOLD
:
1473 rc
= nfit_test_cmd_smart_set_threshold(buf
,
1475 &t
->smart_threshold
[i
],
1477 &t
->pdev
.dev
, t
->dimm_dev
[i
]);
1479 case ND_INTEL_SMART_INJECT
:
1480 rc
= nfit_test_cmd_smart_inject(buf
,
1482 &t
->smart_threshold
[i
],
1484 &t
->pdev
.dev
, t
->dimm_dev
[i
]);
1489 return override_return_code(i
, func
, rc
);
1492 if (!test_bit(cmd
, &cmd_mask
)
1493 || !test_bit(func
, &nfit_mem
->dsm_mask
))
1496 i
= get_dimm(nfit_mem
, func
);
1501 case ND_CMD_GET_CONFIG_SIZE
:
1502 rc
= nfit_test_cmd_get_config_size(buf
, buf_len
);
1504 case ND_CMD_GET_CONFIG_DATA
:
1505 rc
= nfit_test_cmd_get_config_data(buf
, buf_len
,
1506 t
->label
[i
- t
->dcr_idx
]);
1508 case ND_CMD_SET_CONFIG_DATA
:
1509 rc
= nfit_test_cmd_set_config_data(buf
, buf_len
,
1510 t
->label
[i
- t
->dcr_idx
]);
1515 return override_return_code(i
, func
, rc
);
1517 struct ars_state
*ars_state
= &t
->ars_state
;
1518 struct nd_cmd_pkg
*call_pkg
= buf
;
1523 if (cmd
== ND_CMD_CALL
&& call_pkg
->nd_family
1524 == NVDIMM_BUS_FAMILY_NFIT
) {
1525 func
= call_pkg
->nd_command
;
1526 buf_len
= call_pkg
->nd_size_in
+ call_pkg
->nd_size_out
;
1527 buf
= (void *) call_pkg
->nd_payload
;
1530 case NFIT_CMD_TRANSLATE_SPA
:
1531 rc
= nfit_test_cmd_translate_spa(
1532 acpi_desc
->nvdimm_bus
, buf
, buf_len
);
1534 case NFIT_CMD_ARS_INJECT_SET
:
1535 rc
= nfit_test_cmd_ars_error_inject(t
, buf
,
1538 case NFIT_CMD_ARS_INJECT_CLEAR
:
1539 rc
= nfit_test_cmd_ars_inject_clear(t
, buf
,
1542 case NFIT_CMD_ARS_INJECT_GET
:
1543 rc
= nfit_test_cmd_ars_inject_status(t
, buf
,
1549 } else if (cmd
== ND_CMD_CALL
&& call_pkg
->nd_family
1550 == NVDIMM_BUS_FAMILY_INTEL
) {
1551 func
= call_pkg
->nd_command
;
1552 buf_len
= call_pkg
->nd_size_in
+ call_pkg
->nd_size_out
;
1553 buf
= (void *) call_pkg
->nd_payload
;
1556 case NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO
:
1557 rc
= nvdimm_bus_intel_fw_activate_businfo(t
,
1560 case NVDIMM_BUS_INTEL_FW_ACTIVATE
:
1561 rc
= nvdimm_bus_intel_fw_activate(t
, buf
,
1567 } else if (cmd
== ND_CMD_CALL
)
1570 if (!nd_desc
|| !test_bit(cmd
, &nd_desc
->cmd_mask
))
1574 case ND_CMD_ARS_CAP
:
1575 rc
= nfit_test_cmd_ars_cap(buf
, buf_len
);
1577 case ND_CMD_ARS_START
:
1578 rc
= nfit_test_cmd_ars_start(t
, ars_state
, buf
,
1581 case ND_CMD_ARS_STATUS
:
1582 rc
= nfit_test_cmd_ars_status(ars_state
, buf
, buf_len
,
1585 case ND_CMD_CLEAR_ERROR
:
1586 rc
= nfit_test_cmd_clear_error(t
, buf
, buf_len
, cmd_rc
);
1596 static DEFINE_SPINLOCK(nfit_test_lock
);
1597 static struct nfit_test
*instances
[NUM_NFITS
];
1599 static void release_nfit_res(void *data
)
1601 struct nfit_test_resource
*nfit_res
= data
;
1603 spin_lock(&nfit_test_lock
);
1604 list_del(&nfit_res
->list
);
1605 spin_unlock(&nfit_test_lock
);
1607 if (resource_size(&nfit_res
->res
) >= DIMM_SIZE
)
1608 gen_pool_free(nfit_pool
, nfit_res
->res
.start
,
1609 resource_size(&nfit_res
->res
));
1610 vfree(nfit_res
->buf
);
1614 static void *__test_alloc(struct nfit_test
*t
, size_t size
, dma_addr_t
*dma
,
1617 struct device
*dev
= &t
->pdev
.dev
;
1618 struct nfit_test_resource
*nfit_res
= kzalloc(sizeof(*nfit_res
),
1622 if (!buf
|| !nfit_res
|| !*dma
)
1624 rc
= devm_add_action(dev
, release_nfit_res
, nfit_res
);
1627 INIT_LIST_HEAD(&nfit_res
->list
);
1628 memset(buf
, 0, size
);
1629 nfit_res
->dev
= dev
;
1630 nfit_res
->buf
= buf
;
1631 nfit_res
->res
.start
= *dma
;
1632 nfit_res
->res
.end
= *dma
+ size
- 1;
1633 nfit_res
->res
.name
= "NFIT";
1634 spin_lock_init(&nfit_res
->lock
);
1635 INIT_LIST_HEAD(&nfit_res
->requests
);
1636 spin_lock(&nfit_test_lock
);
1637 list_add(&nfit_res
->list
, &t
->resources
);
1638 spin_unlock(&nfit_test_lock
);
1640 return nfit_res
->buf
;
1642 if (*dma
&& size
>= DIMM_SIZE
)
1643 gen_pool_free(nfit_pool
, *dma
, size
);
1650 static void *test_alloc(struct nfit_test
*t
, size_t size
, dma_addr_t
*dma
)
1652 struct genpool_data_align data
= {
1655 void *buf
= vmalloc(size
);
1657 if (size
>= DIMM_SIZE
)
1658 *dma
= gen_pool_alloc_algo(nfit_pool
, size
,
1659 gen_pool_first_fit_align
, &data
);
1661 *dma
= (unsigned long) buf
;
1662 return __test_alloc(t
, size
, dma
, buf
);
1665 static struct nfit_test_resource
*nfit_test_lookup(resource_size_t addr
)
1669 for (i
= 0; i
< ARRAY_SIZE(instances
); i
++) {
1670 struct nfit_test_resource
*n
, *nfit_res
= NULL
;
1671 struct nfit_test
*t
= instances
[i
];
1675 spin_lock(&nfit_test_lock
);
1676 list_for_each_entry(n
, &t
->resources
, list
) {
1677 if (addr
>= n
->res
.start
&& (addr
< n
->res
.start
1678 + resource_size(&n
->res
))) {
1681 } else if (addr
>= (unsigned long) n
->buf
1682 && (addr
< (unsigned long) n
->buf
1683 + resource_size(&n
->res
))) {
1688 spin_unlock(&nfit_test_lock
);
1696 static int ars_state_init(struct device
*dev
, struct ars_state
*ars_state
)
1698 /* for testing, only store up to n records that fit within 4k */
1699 ars_state
->ars_status
= devm_kzalloc(dev
,
1700 sizeof(struct nd_cmd_ars_status
) + SZ_4K
, GFP_KERNEL
);
1701 if (!ars_state
->ars_status
)
1703 spin_lock_init(&ars_state
->lock
);
1707 static void put_dimms(void *data
)
1709 struct nfit_test
*t
= data
;
1712 for (i
= 0; i
< t
->num_dcr
; i
++)
1714 device_unregister(t
->dimm_dev
[i
]);
1717 static struct class *nfit_test_dimm
;
1719 static int dimm_name_to_id(struct device
*dev
)
1723 if (sscanf(dev_name(dev
), "test_dimm%d", &dimm
) != 1)
1728 static ssize_t
handle_show(struct device
*dev
, struct device_attribute
*attr
,
1731 int dimm
= dimm_name_to_id(dev
);
1736 return sprintf(buf
, "%#x\n", handle
[dimm
]);
1738 DEVICE_ATTR_RO(handle
);
1740 static ssize_t
fail_cmd_show(struct device
*dev
, struct device_attribute
*attr
,
1743 int dimm
= dimm_name_to_id(dev
);
1748 return sprintf(buf
, "%#lx\n", dimm_fail_cmd_flags
[dimm
]);
1751 static ssize_t
fail_cmd_store(struct device
*dev
, struct device_attribute
*attr
,
1752 const char *buf
, size_t size
)
1754 int dimm
= dimm_name_to_id(dev
);
1761 rc
= kstrtol(buf
, 0, &val
);
1765 dimm_fail_cmd_flags
[dimm
] = val
;
1768 static DEVICE_ATTR_RW(fail_cmd
);
1770 static ssize_t
fail_cmd_code_show(struct device
*dev
, struct device_attribute
*attr
,
1773 int dimm
= dimm_name_to_id(dev
);
1778 return sprintf(buf
, "%d\n", dimm_fail_cmd_code
[dimm
]);
1781 static ssize_t
fail_cmd_code_store(struct device
*dev
, struct device_attribute
*attr
,
1782 const char *buf
, size_t size
)
1784 int dimm
= dimm_name_to_id(dev
);
1791 rc
= kstrtol(buf
, 0, &val
);
1795 dimm_fail_cmd_code
[dimm
] = val
;
1798 static DEVICE_ATTR_RW(fail_cmd_code
);
1800 static ssize_t
lock_dimm_store(struct device
*dev
,
1801 struct device_attribute
*attr
, const char *buf
, size_t size
)
1803 int dimm
= dimm_name_to_id(dev
);
1804 struct nfit_test_sec
*sec
= &dimm_sec_info
[dimm
];
1806 sec
->state
= ND_INTEL_SEC_STATE_ENABLED
| ND_INTEL_SEC_STATE_LOCKED
;
1809 static DEVICE_ATTR_WO(lock_dimm
);
1811 static struct attribute
*nfit_test_dimm_attributes
[] = {
1812 &dev_attr_fail_cmd
.attr
,
1813 &dev_attr_fail_cmd_code
.attr
,
1814 &dev_attr_handle
.attr
,
1815 &dev_attr_lock_dimm
.attr
,
1819 static struct attribute_group nfit_test_dimm_attribute_group
= {
1820 .attrs
= nfit_test_dimm_attributes
,
1823 static const struct attribute_group
*nfit_test_dimm_attribute_groups
[] = {
1824 &nfit_test_dimm_attribute_group
,
1828 static int nfit_test_dimm_init(struct nfit_test
*t
)
1832 if (devm_add_action_or_reset(&t
->pdev
.dev
, put_dimms
, t
))
1834 for (i
= 0; i
< t
->num_dcr
; i
++) {
1835 t
->dimm_dev
[i
] = device_create_with_groups(nfit_test_dimm
,
1836 &t
->pdev
.dev
, 0, NULL
,
1837 nfit_test_dimm_attribute_groups
,
1838 "test_dimm%d", i
+ t
->dcr_idx
);
1839 if (!t
->dimm_dev
[i
])
1845 static void security_init(struct nfit_test
*t
)
1849 for (i
= 0; i
< t
->num_dcr
; i
++) {
1850 struct nfit_test_sec
*sec
= &dimm_sec_info
[i
];
1852 sec
->ext_state
= ND_INTEL_SEC_ESTATE_ENABLED
;
1856 static void smart_init(struct nfit_test
*t
)
1859 const struct nd_intel_smart_threshold smart_t_data
= {
1860 .alarm_control
= ND_INTEL_SMART_SPARE_TRIP
1861 | ND_INTEL_SMART_TEMP_TRIP
,
1862 .media_temperature
= 40 * 16,
1863 .ctrl_temperature
= 30 * 16,
1867 for (i
= 0; i
< t
->num_dcr
; i
++) {
1868 memcpy(&t
->smart
[i
], &smart_def
, sizeof(smart_def
));
1869 memcpy(&t
->smart_threshold
[i
], &smart_t_data
,
1870 sizeof(smart_t_data
));
1874 static int nfit_test0_alloc(struct nfit_test
*t
)
1876 size_t nfit_size
= sizeof(struct acpi_nfit_system_address
) * NUM_SPA
1877 + sizeof(struct acpi_nfit_memory_map
) * NUM_MEM
1878 + sizeof(struct acpi_nfit_control_region
) * NUM_DCR
1879 + offsetof(struct acpi_nfit_control_region
,
1880 window_size
) * NUM_DCR
1881 + sizeof(struct acpi_nfit_data_region
) * NUM_BDW
1882 + (sizeof(struct acpi_nfit_flush_address
)
1883 + sizeof(u64
) * NUM_HINTS
) * NUM_DCR
1884 + sizeof(struct acpi_nfit_capabilities
);
1887 t
->nfit_buf
= test_alloc(t
, nfit_size
, &t
->nfit_dma
);
1890 t
->nfit_size
= nfit_size
;
1892 t
->spa_set
[0] = test_alloc(t
, SPA0_SIZE
, &t
->spa_set_dma
[0]);
1896 t
->spa_set
[1] = test_alloc(t
, SPA1_SIZE
, &t
->spa_set_dma
[1]);
1900 t
->spa_set
[2] = test_alloc(t
, SPA0_SIZE
, &t
->spa_set_dma
[2]);
1904 for (i
= 0; i
< t
->num_dcr
; i
++) {
1905 t
->dimm
[i
] = test_alloc(t
, DIMM_SIZE
, &t
->dimm_dma
[i
]);
1909 t
->label
[i
] = test_alloc(t
, LABEL_SIZE
, &t
->label_dma
[i
]);
1912 sprintf(t
->label
[i
], "label%d", i
);
1914 t
->flush
[i
] = test_alloc(t
, max(PAGE_SIZE
,
1915 sizeof(u64
) * NUM_HINTS
),
1921 for (i
= 0; i
< t
->num_dcr
; i
++) {
1922 t
->dcr
[i
] = test_alloc(t
, LABEL_SIZE
, &t
->dcr_dma
[i
]);
1927 t
->_fit
= test_alloc(t
, sizeof(union acpi_object
**), &t
->_fit_dma
);
1931 if (nfit_test_dimm_init(t
))
1935 return ars_state_init(&t
->pdev
.dev
, &t
->ars_state
);
1938 static int nfit_test1_alloc(struct nfit_test
*t
)
1940 size_t nfit_size
= sizeof(struct acpi_nfit_system_address
) * 2
1941 + sizeof(struct acpi_nfit_memory_map
) * 2
1942 + offsetof(struct acpi_nfit_control_region
, window_size
) * 2;
1945 t
->nfit_buf
= test_alloc(t
, nfit_size
, &t
->nfit_dma
);
1948 t
->nfit_size
= nfit_size
;
1950 t
->spa_set
[0] = test_alloc(t
, SPA2_SIZE
, &t
->spa_set_dma
[0]);
1954 for (i
= 0; i
< t
->num_dcr
; i
++) {
1955 t
->label
[i
] = test_alloc(t
, LABEL_SIZE
, &t
->label_dma
[i
]);
1958 sprintf(t
->label
[i
], "label%d", i
);
1961 t
->spa_set
[1] = test_alloc(t
, SPA_VCD_SIZE
, &t
->spa_set_dma
[1]);
1965 if (nfit_test_dimm_init(t
))
1968 return ars_state_init(&t
->pdev
.dev
, &t
->ars_state
);
1971 static void dcr_common_init(struct acpi_nfit_control_region
*dcr
)
1973 dcr
->vendor_id
= 0xabcd;
1975 dcr
->revision_id
= 1;
1976 dcr
->valid_fields
= 1;
1977 dcr
->manufacturing_location
= 0xa;
1978 dcr
->manufacturing_date
= cpu_to_be16(2016);
1981 static void nfit_test0_setup(struct nfit_test
*t
)
1983 const int flush_hint_size
= sizeof(struct acpi_nfit_flush_address
)
1984 + (sizeof(u64
) * NUM_HINTS
);
1985 struct acpi_nfit_desc
*acpi_desc
;
1986 struct acpi_nfit_memory_map
*memdev
;
1987 void *nfit_buf
= t
->nfit_buf
;
1988 struct acpi_nfit_system_address
*spa
;
1989 struct acpi_nfit_control_region
*dcr
;
1990 struct acpi_nfit_data_region
*bdw
;
1991 struct acpi_nfit_flush_address
*flush
;
1992 struct acpi_nfit_capabilities
*pcap
;
1993 unsigned int offset
= 0, i
;
1994 unsigned long *acpi_mask
;
1997 * spa0 (interleave first half of dimm0 and dimm1, note storage
1998 * does not actually alias the related block-data-window
2002 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2003 spa
->header
.length
= sizeof(*spa
);
2004 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
2005 spa
->range_index
= 0+1;
2006 spa
->address
= t
->spa_set_dma
[0];
2007 spa
->length
= SPA0_SIZE
;
2008 offset
+= spa
->header
.length
;
2011 * spa1 (interleave last half of the 4 DIMMS, note storage
2012 * does not actually alias the related block-data-window
2015 spa
= nfit_buf
+ offset
;
2016 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2017 spa
->header
.length
= sizeof(*spa
);
2018 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
2019 spa
->range_index
= 1+1;
2020 spa
->address
= t
->spa_set_dma
[1];
2021 spa
->length
= SPA1_SIZE
;
2022 offset
+= spa
->header
.length
;
2024 /* spa2 (dcr0) dimm0 */
2025 spa
= nfit_buf
+ offset
;
2026 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2027 spa
->header
.length
= sizeof(*spa
);
2028 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
2029 spa
->range_index
= 2+1;
2030 spa
->address
= t
->dcr_dma
[0];
2031 spa
->length
= DCR_SIZE
;
2032 offset
+= spa
->header
.length
;
2034 /* spa3 (dcr1) dimm1 */
2035 spa
= nfit_buf
+ offset
;
2036 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2037 spa
->header
.length
= sizeof(*spa
);
2038 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
2039 spa
->range_index
= 3+1;
2040 spa
->address
= t
->dcr_dma
[1];
2041 spa
->length
= DCR_SIZE
;
2042 offset
+= spa
->header
.length
;
2044 /* spa4 (dcr2) dimm2 */
2045 spa
= nfit_buf
+ offset
;
2046 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2047 spa
->header
.length
= sizeof(*spa
);
2048 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
2049 spa
->range_index
= 4+1;
2050 spa
->address
= t
->dcr_dma
[2];
2051 spa
->length
= DCR_SIZE
;
2052 offset
+= spa
->header
.length
;
2054 /* spa5 (dcr3) dimm3 */
2055 spa
= nfit_buf
+ offset
;
2056 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2057 spa
->header
.length
= sizeof(*spa
);
2058 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
2059 spa
->range_index
= 5+1;
2060 spa
->address
= t
->dcr_dma
[3];
2061 spa
->length
= DCR_SIZE
;
2062 offset
+= spa
->header
.length
;
2064 /* spa6 (bdw for dcr0) dimm0 */
2065 spa
= nfit_buf
+ offset
;
2066 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2067 spa
->header
.length
= sizeof(*spa
);
2068 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
2069 spa
->range_index
= 6+1;
2070 spa
->address
= t
->dimm_dma
[0];
2071 spa
->length
= DIMM_SIZE
;
2072 offset
+= spa
->header
.length
;
2074 /* spa7 (bdw for dcr1) dimm1 */
2075 spa
= nfit_buf
+ offset
;
2076 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2077 spa
->header
.length
= sizeof(*spa
);
2078 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
2079 spa
->range_index
= 7+1;
2080 spa
->address
= t
->dimm_dma
[1];
2081 spa
->length
= DIMM_SIZE
;
2082 offset
+= spa
->header
.length
;
2084 /* spa8 (bdw for dcr2) dimm2 */
2085 spa
= nfit_buf
+ offset
;
2086 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2087 spa
->header
.length
= sizeof(*spa
);
2088 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
2089 spa
->range_index
= 8+1;
2090 spa
->address
= t
->dimm_dma
[2];
2091 spa
->length
= DIMM_SIZE
;
2092 offset
+= spa
->header
.length
;
2094 /* spa9 (bdw for dcr3) dimm3 */
2095 spa
= nfit_buf
+ offset
;
2096 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2097 spa
->header
.length
= sizeof(*spa
);
2098 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
2099 spa
->range_index
= 9+1;
2100 spa
->address
= t
->dimm_dma
[3];
2101 spa
->length
= DIMM_SIZE
;
2102 offset
+= spa
->header
.length
;
2104 /* mem-region0 (spa0, dimm0) */
2105 memdev
= nfit_buf
+ offset
;
2106 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2107 memdev
->header
.length
= sizeof(*memdev
);
2108 memdev
->device_handle
= handle
[0];
2109 memdev
->physical_id
= 0;
2110 memdev
->region_id
= 0;
2111 memdev
->range_index
= 0+1;
2112 memdev
->region_index
= 4+1;
2113 memdev
->region_size
= SPA0_SIZE
/2;
2114 memdev
->region_offset
= 1;
2115 memdev
->address
= 0;
2116 memdev
->interleave_index
= 0;
2117 memdev
->interleave_ways
= 2;
2118 offset
+= memdev
->header
.length
;
2120 /* mem-region1 (spa0, dimm1) */
2121 memdev
= nfit_buf
+ offset
;
2122 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2123 memdev
->header
.length
= sizeof(*memdev
);
2124 memdev
->device_handle
= handle
[1];
2125 memdev
->physical_id
= 1;
2126 memdev
->region_id
= 0;
2127 memdev
->range_index
= 0+1;
2128 memdev
->region_index
= 5+1;
2129 memdev
->region_size
= SPA0_SIZE
/2;
2130 memdev
->region_offset
= (1 << 8);
2131 memdev
->address
= 0;
2132 memdev
->interleave_index
= 0;
2133 memdev
->interleave_ways
= 2;
2134 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
2135 offset
+= memdev
->header
.length
;
2137 /* mem-region2 (spa1, dimm0) */
2138 memdev
= nfit_buf
+ offset
;
2139 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2140 memdev
->header
.length
= sizeof(*memdev
);
2141 memdev
->device_handle
= handle
[0];
2142 memdev
->physical_id
= 0;
2143 memdev
->region_id
= 1;
2144 memdev
->range_index
= 1+1;
2145 memdev
->region_index
= 4+1;
2146 memdev
->region_size
= SPA1_SIZE
/4;
2147 memdev
->region_offset
= (1 << 16);
2148 memdev
->address
= SPA0_SIZE
/2;
2149 memdev
->interleave_index
= 0;
2150 memdev
->interleave_ways
= 4;
2151 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
2152 offset
+= memdev
->header
.length
;
2154 /* mem-region3 (spa1, dimm1) */
2155 memdev
= nfit_buf
+ offset
;
2156 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2157 memdev
->header
.length
= sizeof(*memdev
);
2158 memdev
->device_handle
= handle
[1];
2159 memdev
->physical_id
= 1;
2160 memdev
->region_id
= 1;
2161 memdev
->range_index
= 1+1;
2162 memdev
->region_index
= 5+1;
2163 memdev
->region_size
= SPA1_SIZE
/4;
2164 memdev
->region_offset
= (1 << 24);
2165 memdev
->address
= SPA0_SIZE
/2;
2166 memdev
->interleave_index
= 0;
2167 memdev
->interleave_ways
= 4;
2168 offset
+= memdev
->header
.length
;
2170 /* mem-region4 (spa1, dimm2) */
2171 memdev
= nfit_buf
+ offset
;
2172 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2173 memdev
->header
.length
= sizeof(*memdev
);
2174 memdev
->device_handle
= handle
[2];
2175 memdev
->physical_id
= 2;
2176 memdev
->region_id
= 0;
2177 memdev
->range_index
= 1+1;
2178 memdev
->region_index
= 6+1;
2179 memdev
->region_size
= SPA1_SIZE
/4;
2180 memdev
->region_offset
= (1ULL << 32);
2181 memdev
->address
= SPA0_SIZE
/2;
2182 memdev
->interleave_index
= 0;
2183 memdev
->interleave_ways
= 4;
2184 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
2185 offset
+= memdev
->header
.length
;
2187 /* mem-region5 (spa1, dimm3) */
2188 memdev
= nfit_buf
+ offset
;
2189 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2190 memdev
->header
.length
= sizeof(*memdev
);
2191 memdev
->device_handle
= handle
[3];
2192 memdev
->physical_id
= 3;
2193 memdev
->region_id
= 0;
2194 memdev
->range_index
= 1+1;
2195 memdev
->region_index
= 7+1;
2196 memdev
->region_size
= SPA1_SIZE
/4;
2197 memdev
->region_offset
= (1ULL << 40);
2198 memdev
->address
= SPA0_SIZE
/2;
2199 memdev
->interleave_index
= 0;
2200 memdev
->interleave_ways
= 4;
2201 offset
+= memdev
->header
.length
;
2203 /* mem-region6 (spa/dcr0, dimm0) */
2204 memdev
= nfit_buf
+ offset
;
2205 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2206 memdev
->header
.length
= sizeof(*memdev
);
2207 memdev
->device_handle
= handle
[0];
2208 memdev
->physical_id
= 0;
2209 memdev
->region_id
= 0;
2210 memdev
->range_index
= 2+1;
2211 memdev
->region_index
= 0+1;
2212 memdev
->region_size
= 0;
2213 memdev
->region_offset
= 0;
2214 memdev
->address
= 0;
2215 memdev
->interleave_index
= 0;
2216 memdev
->interleave_ways
= 1;
2217 offset
+= memdev
->header
.length
;
2219 /* mem-region7 (spa/dcr1, dimm1) */
2220 memdev
= nfit_buf
+ offset
;
2221 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2222 memdev
->header
.length
= sizeof(*memdev
);
2223 memdev
->device_handle
= handle
[1];
2224 memdev
->physical_id
= 1;
2225 memdev
->region_id
= 0;
2226 memdev
->range_index
= 3+1;
2227 memdev
->region_index
= 1+1;
2228 memdev
->region_size
= 0;
2229 memdev
->region_offset
= 0;
2230 memdev
->address
= 0;
2231 memdev
->interleave_index
= 0;
2232 memdev
->interleave_ways
= 1;
2233 offset
+= memdev
->header
.length
;
2235 /* mem-region8 (spa/dcr2, dimm2) */
2236 memdev
= nfit_buf
+ offset
;
2237 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2238 memdev
->header
.length
= sizeof(*memdev
);
2239 memdev
->device_handle
= handle
[2];
2240 memdev
->physical_id
= 2;
2241 memdev
->region_id
= 0;
2242 memdev
->range_index
= 4+1;
2243 memdev
->region_index
= 2+1;
2244 memdev
->region_size
= 0;
2245 memdev
->region_offset
= 0;
2246 memdev
->address
= 0;
2247 memdev
->interleave_index
= 0;
2248 memdev
->interleave_ways
= 1;
2249 offset
+= memdev
->header
.length
;
2251 /* mem-region9 (spa/dcr3, dimm3) */
2252 memdev
= nfit_buf
+ offset
;
2253 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2254 memdev
->header
.length
= sizeof(*memdev
);
2255 memdev
->device_handle
= handle
[3];
2256 memdev
->physical_id
= 3;
2257 memdev
->region_id
= 0;
2258 memdev
->range_index
= 5+1;
2259 memdev
->region_index
= 3+1;
2260 memdev
->region_size
= 0;
2261 memdev
->region_offset
= 0;
2262 memdev
->address
= 0;
2263 memdev
->interleave_index
= 0;
2264 memdev
->interleave_ways
= 1;
2265 offset
+= memdev
->header
.length
;
2267 /* mem-region10 (spa/bdw0, dimm0) */
2268 memdev
= nfit_buf
+ offset
;
2269 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2270 memdev
->header
.length
= sizeof(*memdev
);
2271 memdev
->device_handle
= handle
[0];
2272 memdev
->physical_id
= 0;
2273 memdev
->region_id
= 0;
2274 memdev
->range_index
= 6+1;
2275 memdev
->region_index
= 0+1;
2276 memdev
->region_size
= 0;
2277 memdev
->region_offset
= 0;
2278 memdev
->address
= 0;
2279 memdev
->interleave_index
= 0;
2280 memdev
->interleave_ways
= 1;
2281 offset
+= memdev
->header
.length
;
2283 /* mem-region11 (spa/bdw1, dimm1) */
2284 memdev
= nfit_buf
+ offset
;
2285 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2286 memdev
->header
.length
= sizeof(*memdev
);
2287 memdev
->device_handle
= handle
[1];
2288 memdev
->physical_id
= 1;
2289 memdev
->region_id
= 0;
2290 memdev
->range_index
= 7+1;
2291 memdev
->region_index
= 1+1;
2292 memdev
->region_size
= 0;
2293 memdev
->region_offset
= 0;
2294 memdev
->address
= 0;
2295 memdev
->interleave_index
= 0;
2296 memdev
->interleave_ways
= 1;
2297 offset
+= memdev
->header
.length
;
2299 /* mem-region12 (spa/bdw2, dimm2) */
2300 memdev
= nfit_buf
+ offset
;
2301 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2302 memdev
->header
.length
= sizeof(*memdev
);
2303 memdev
->device_handle
= handle
[2];
2304 memdev
->physical_id
= 2;
2305 memdev
->region_id
= 0;
2306 memdev
->range_index
= 8+1;
2307 memdev
->region_index
= 2+1;
2308 memdev
->region_size
= 0;
2309 memdev
->region_offset
= 0;
2310 memdev
->address
= 0;
2311 memdev
->interleave_index
= 0;
2312 memdev
->interleave_ways
= 1;
2313 offset
+= memdev
->header
.length
;
2315 /* mem-region13 (spa/dcr3, dimm3) */
2316 memdev
= nfit_buf
+ offset
;
2317 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2318 memdev
->header
.length
= sizeof(*memdev
);
2319 memdev
->device_handle
= handle
[3];
2320 memdev
->physical_id
= 3;
2321 memdev
->region_id
= 0;
2322 memdev
->range_index
= 9+1;
2323 memdev
->region_index
= 3+1;
2324 memdev
->region_size
= 0;
2325 memdev
->region_offset
= 0;
2326 memdev
->address
= 0;
2327 memdev
->interleave_index
= 0;
2328 memdev
->interleave_ways
= 1;
2329 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
2330 offset
+= memdev
->header
.length
;
2332 /* dcr-descriptor0: blk */
2333 dcr
= nfit_buf
+ offset
;
2334 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2335 dcr
->header
.length
= sizeof(*dcr
);
2336 dcr
->region_index
= 0+1;
2337 dcr_common_init(dcr
);
2338 dcr
->serial_number
= ~handle
[0];
2339 dcr
->code
= NFIT_FIC_BLK
;
2341 dcr
->window_size
= DCR_SIZE
;
2342 dcr
->command_offset
= 0;
2343 dcr
->command_size
= 8;
2344 dcr
->status_offset
= 8;
2345 dcr
->status_size
= 4;
2346 offset
+= dcr
->header
.length
;
2348 /* dcr-descriptor1: blk */
2349 dcr
= nfit_buf
+ offset
;
2350 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2351 dcr
->header
.length
= sizeof(*dcr
);
2352 dcr
->region_index
= 1+1;
2353 dcr_common_init(dcr
);
2354 dcr
->serial_number
= ~handle
[1];
2355 dcr
->code
= NFIT_FIC_BLK
;
2357 dcr
->window_size
= DCR_SIZE
;
2358 dcr
->command_offset
= 0;
2359 dcr
->command_size
= 8;
2360 dcr
->status_offset
= 8;
2361 dcr
->status_size
= 4;
2362 offset
+= dcr
->header
.length
;
2364 /* dcr-descriptor2: blk */
2365 dcr
= nfit_buf
+ offset
;
2366 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2367 dcr
->header
.length
= sizeof(*dcr
);
2368 dcr
->region_index
= 2+1;
2369 dcr_common_init(dcr
);
2370 dcr
->serial_number
= ~handle
[2];
2371 dcr
->code
= NFIT_FIC_BLK
;
2373 dcr
->window_size
= DCR_SIZE
;
2374 dcr
->command_offset
= 0;
2375 dcr
->command_size
= 8;
2376 dcr
->status_offset
= 8;
2377 dcr
->status_size
= 4;
2378 offset
+= dcr
->header
.length
;
2380 /* dcr-descriptor3: blk */
2381 dcr
= nfit_buf
+ offset
;
2382 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2383 dcr
->header
.length
= sizeof(*dcr
);
2384 dcr
->region_index
= 3+1;
2385 dcr_common_init(dcr
);
2386 dcr
->serial_number
= ~handle
[3];
2387 dcr
->code
= NFIT_FIC_BLK
;
2389 dcr
->window_size
= DCR_SIZE
;
2390 dcr
->command_offset
= 0;
2391 dcr
->command_size
= 8;
2392 dcr
->status_offset
= 8;
2393 dcr
->status_size
= 4;
2394 offset
+= dcr
->header
.length
;
2396 /* dcr-descriptor0: pmem */
2397 dcr
= nfit_buf
+ offset
;
2398 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2399 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2401 dcr
->region_index
= 4+1;
2402 dcr_common_init(dcr
);
2403 dcr
->serial_number
= ~handle
[0];
2404 dcr
->code
= NFIT_FIC_BYTEN
;
2406 offset
+= dcr
->header
.length
;
2408 /* dcr-descriptor1: pmem */
2409 dcr
= nfit_buf
+ offset
;
2410 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2411 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2413 dcr
->region_index
= 5+1;
2414 dcr_common_init(dcr
);
2415 dcr
->serial_number
= ~handle
[1];
2416 dcr
->code
= NFIT_FIC_BYTEN
;
2418 offset
+= dcr
->header
.length
;
2420 /* dcr-descriptor2: pmem */
2421 dcr
= nfit_buf
+ offset
;
2422 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2423 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2425 dcr
->region_index
= 6+1;
2426 dcr_common_init(dcr
);
2427 dcr
->serial_number
= ~handle
[2];
2428 dcr
->code
= NFIT_FIC_BYTEN
;
2430 offset
+= dcr
->header
.length
;
2432 /* dcr-descriptor3: pmem */
2433 dcr
= nfit_buf
+ offset
;
2434 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2435 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2437 dcr
->region_index
= 7+1;
2438 dcr_common_init(dcr
);
2439 dcr
->serial_number
= ~handle
[3];
2440 dcr
->code
= NFIT_FIC_BYTEN
;
2442 offset
+= dcr
->header
.length
;
2444 /* bdw0 (spa/dcr0, dimm0) */
2445 bdw
= nfit_buf
+ offset
;
2446 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
2447 bdw
->header
.length
= sizeof(*bdw
);
2448 bdw
->region_index
= 0+1;
2451 bdw
->size
= BDW_SIZE
;
2452 bdw
->capacity
= DIMM_SIZE
;
2453 bdw
->start_address
= 0;
2454 offset
+= bdw
->header
.length
;
2456 /* bdw1 (spa/dcr1, dimm1) */
2457 bdw
= nfit_buf
+ offset
;
2458 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
2459 bdw
->header
.length
= sizeof(*bdw
);
2460 bdw
->region_index
= 1+1;
2463 bdw
->size
= BDW_SIZE
;
2464 bdw
->capacity
= DIMM_SIZE
;
2465 bdw
->start_address
= 0;
2466 offset
+= bdw
->header
.length
;
2468 /* bdw2 (spa/dcr2, dimm2) */
2469 bdw
= nfit_buf
+ offset
;
2470 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
2471 bdw
->header
.length
= sizeof(*bdw
);
2472 bdw
->region_index
= 2+1;
2475 bdw
->size
= BDW_SIZE
;
2476 bdw
->capacity
= DIMM_SIZE
;
2477 bdw
->start_address
= 0;
2478 offset
+= bdw
->header
.length
;
2480 /* bdw3 (spa/dcr3, dimm3) */
2481 bdw
= nfit_buf
+ offset
;
2482 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
2483 bdw
->header
.length
= sizeof(*bdw
);
2484 bdw
->region_index
= 3+1;
2487 bdw
->size
= BDW_SIZE
;
2488 bdw
->capacity
= DIMM_SIZE
;
2489 bdw
->start_address
= 0;
2490 offset
+= bdw
->header
.length
;
2492 /* flush0 (dimm0) */
2493 flush
= nfit_buf
+ offset
;
2494 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
2495 flush
->header
.length
= flush_hint_size
;
2496 flush
->device_handle
= handle
[0];
2497 flush
->hint_count
= NUM_HINTS
;
2498 for (i
= 0; i
< NUM_HINTS
; i
++)
2499 flush
->hint_address
[i
] = t
->flush_dma
[0] + i
* sizeof(u64
);
2500 offset
+= flush
->header
.length
;
2502 /* flush1 (dimm1) */
2503 flush
= nfit_buf
+ offset
;
2504 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
2505 flush
->header
.length
= flush_hint_size
;
2506 flush
->device_handle
= handle
[1];
2507 flush
->hint_count
= NUM_HINTS
;
2508 for (i
= 0; i
< NUM_HINTS
; i
++)
2509 flush
->hint_address
[i
] = t
->flush_dma
[1] + i
* sizeof(u64
);
2510 offset
+= flush
->header
.length
;
2512 /* flush2 (dimm2) */
2513 flush
= nfit_buf
+ offset
;
2514 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
2515 flush
->header
.length
= flush_hint_size
;
2516 flush
->device_handle
= handle
[2];
2517 flush
->hint_count
= NUM_HINTS
;
2518 for (i
= 0; i
< NUM_HINTS
; i
++)
2519 flush
->hint_address
[i
] = t
->flush_dma
[2] + i
* sizeof(u64
);
2520 offset
+= flush
->header
.length
;
2522 /* flush3 (dimm3) */
2523 flush
= nfit_buf
+ offset
;
2524 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
2525 flush
->header
.length
= flush_hint_size
;
2526 flush
->device_handle
= handle
[3];
2527 flush
->hint_count
= NUM_HINTS
;
2528 for (i
= 0; i
< NUM_HINTS
; i
++)
2529 flush
->hint_address
[i
] = t
->flush_dma
[3] + i
* sizeof(u64
);
2530 offset
+= flush
->header
.length
;
2532 /* platform capabilities */
2533 pcap
= nfit_buf
+ offset
;
2534 pcap
->header
.type
= ACPI_NFIT_TYPE_CAPABILITIES
;
2535 pcap
->header
.length
= sizeof(*pcap
);
2536 pcap
->highest_capability
= 1;
2537 pcap
->capabilities
= ACPI_NFIT_CAPABILITY_MEM_FLUSH
;
2538 offset
+= pcap
->header
.length
;
2540 if (t
->setup_hotplug
) {
2541 /* dcr-descriptor4: blk */
2542 dcr
= nfit_buf
+ offset
;
2543 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2544 dcr
->header
.length
= sizeof(*dcr
);
2545 dcr
->region_index
= 8+1;
2546 dcr_common_init(dcr
);
2547 dcr
->serial_number
= ~handle
[4];
2548 dcr
->code
= NFIT_FIC_BLK
;
2550 dcr
->window_size
= DCR_SIZE
;
2551 dcr
->command_offset
= 0;
2552 dcr
->command_size
= 8;
2553 dcr
->status_offset
= 8;
2554 dcr
->status_size
= 4;
2555 offset
+= dcr
->header
.length
;
2557 /* dcr-descriptor4: pmem */
2558 dcr
= nfit_buf
+ offset
;
2559 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2560 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2562 dcr
->region_index
= 9+1;
2563 dcr_common_init(dcr
);
2564 dcr
->serial_number
= ~handle
[4];
2565 dcr
->code
= NFIT_FIC_BYTEN
;
2567 offset
+= dcr
->header
.length
;
2569 /* bdw4 (spa/dcr4, dimm4) */
2570 bdw
= nfit_buf
+ offset
;
2571 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
2572 bdw
->header
.length
= sizeof(*bdw
);
2573 bdw
->region_index
= 8+1;
2576 bdw
->size
= BDW_SIZE
;
2577 bdw
->capacity
= DIMM_SIZE
;
2578 bdw
->start_address
= 0;
2579 offset
+= bdw
->header
.length
;
2581 /* spa10 (dcr4) dimm4 */
2582 spa
= nfit_buf
+ offset
;
2583 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2584 spa
->header
.length
= sizeof(*spa
);
2585 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
2586 spa
->range_index
= 10+1;
2587 spa
->address
= t
->dcr_dma
[4];
2588 spa
->length
= DCR_SIZE
;
2589 offset
+= spa
->header
.length
;
2592 * spa11 (single-dimm interleave for hotplug, note storage
2593 * does not actually alias the related block-data-window
2596 spa
= nfit_buf
+ offset
;
2597 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2598 spa
->header
.length
= sizeof(*spa
);
2599 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
2600 spa
->range_index
= 11+1;
2601 spa
->address
= t
->spa_set_dma
[2];
2602 spa
->length
= SPA0_SIZE
;
2603 offset
+= spa
->header
.length
;
2605 /* spa12 (bdw for dcr4) dimm4 */
2606 spa
= nfit_buf
+ offset
;
2607 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2608 spa
->header
.length
= sizeof(*spa
);
2609 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
2610 spa
->range_index
= 12+1;
2611 spa
->address
= t
->dimm_dma
[4];
2612 spa
->length
= DIMM_SIZE
;
2613 offset
+= spa
->header
.length
;
2615 /* mem-region14 (spa/dcr4, dimm4) */
2616 memdev
= nfit_buf
+ offset
;
2617 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2618 memdev
->header
.length
= sizeof(*memdev
);
2619 memdev
->device_handle
= handle
[4];
2620 memdev
->physical_id
= 4;
2621 memdev
->region_id
= 0;
2622 memdev
->range_index
= 10+1;
2623 memdev
->region_index
= 8+1;
2624 memdev
->region_size
= 0;
2625 memdev
->region_offset
= 0;
2626 memdev
->address
= 0;
2627 memdev
->interleave_index
= 0;
2628 memdev
->interleave_ways
= 1;
2629 offset
+= memdev
->header
.length
;
2631 /* mem-region15 (spa11, dimm4) */
2632 memdev
= nfit_buf
+ offset
;
2633 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2634 memdev
->header
.length
= sizeof(*memdev
);
2635 memdev
->device_handle
= handle
[4];
2636 memdev
->physical_id
= 4;
2637 memdev
->region_id
= 0;
2638 memdev
->range_index
= 11+1;
2639 memdev
->region_index
= 9+1;
2640 memdev
->region_size
= SPA0_SIZE
;
2641 memdev
->region_offset
= (1ULL << 48);
2642 memdev
->address
= 0;
2643 memdev
->interleave_index
= 0;
2644 memdev
->interleave_ways
= 1;
2645 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
2646 offset
+= memdev
->header
.length
;
2648 /* mem-region16 (spa/bdw4, dimm4) */
2649 memdev
= nfit_buf
+ offset
;
2650 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2651 memdev
->header
.length
= sizeof(*memdev
);
2652 memdev
->device_handle
= handle
[4];
2653 memdev
->physical_id
= 4;
2654 memdev
->region_id
= 0;
2655 memdev
->range_index
= 12+1;
2656 memdev
->region_index
= 8+1;
2657 memdev
->region_size
= 0;
2658 memdev
->region_offset
= 0;
2659 memdev
->address
= 0;
2660 memdev
->interleave_index
= 0;
2661 memdev
->interleave_ways
= 1;
2662 offset
+= memdev
->header
.length
;
2664 /* flush3 (dimm4) */
2665 flush
= nfit_buf
+ offset
;
2666 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
2667 flush
->header
.length
= flush_hint_size
;
2668 flush
->device_handle
= handle
[4];
2669 flush
->hint_count
= NUM_HINTS
;
2670 for (i
= 0; i
< NUM_HINTS
; i
++)
2671 flush
->hint_address
[i
] = t
->flush_dma
[4]
2673 offset
+= flush
->header
.length
;
2675 /* sanity check to make sure we've filled the buffer */
2676 WARN_ON(offset
!= t
->nfit_size
);
2679 t
->nfit_filled
= offset
;
2681 post_ars_status(&t
->ars_state
, &t
->badrange
, t
->spa_set_dma
[0],
2684 acpi_desc
= &t
->acpi_desc
;
2685 set_bit(ND_CMD_GET_CONFIG_SIZE
, &acpi_desc
->dimm_cmd_force_en
);
2686 set_bit(ND_CMD_GET_CONFIG_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2687 set_bit(ND_CMD_SET_CONFIG_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2688 set_bit(ND_INTEL_SMART
, &acpi_desc
->dimm_cmd_force_en
);
2689 set_bit(ND_INTEL_SMART_THRESHOLD
, &acpi_desc
->dimm_cmd_force_en
);
2690 set_bit(ND_INTEL_SMART_SET_THRESHOLD
, &acpi_desc
->dimm_cmd_force_en
);
2691 set_bit(ND_INTEL_SMART_INJECT
, &acpi_desc
->dimm_cmd_force_en
);
2692 set_bit(ND_CMD_ARS_CAP
, &acpi_desc
->bus_cmd_force_en
);
2693 set_bit(ND_CMD_ARS_START
, &acpi_desc
->bus_cmd_force_en
);
2694 set_bit(ND_CMD_ARS_STATUS
, &acpi_desc
->bus_cmd_force_en
);
2695 set_bit(ND_CMD_CLEAR_ERROR
, &acpi_desc
->bus_cmd_force_en
);
2696 set_bit(ND_CMD_CALL
, &acpi_desc
->bus_cmd_force_en
);
2697 set_bit(NFIT_CMD_TRANSLATE_SPA
, &acpi_desc
->bus_dsm_mask
);
2698 set_bit(NFIT_CMD_ARS_INJECT_SET
, &acpi_desc
->bus_dsm_mask
);
2699 set_bit(NFIT_CMD_ARS_INJECT_CLEAR
, &acpi_desc
->bus_dsm_mask
);
2700 set_bit(NFIT_CMD_ARS_INJECT_GET
, &acpi_desc
->bus_dsm_mask
);
2701 set_bit(ND_INTEL_FW_GET_INFO
, &acpi_desc
->dimm_cmd_force_en
);
2702 set_bit(ND_INTEL_FW_START_UPDATE
, &acpi_desc
->dimm_cmd_force_en
);
2703 set_bit(ND_INTEL_FW_SEND_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2704 set_bit(ND_INTEL_FW_FINISH_UPDATE
, &acpi_desc
->dimm_cmd_force_en
);
2705 set_bit(ND_INTEL_FW_FINISH_QUERY
, &acpi_desc
->dimm_cmd_force_en
);
2706 set_bit(ND_INTEL_ENABLE_LSS_STATUS
, &acpi_desc
->dimm_cmd_force_en
);
2707 set_bit(NVDIMM_INTEL_GET_SECURITY_STATE
,
2708 &acpi_desc
->dimm_cmd_force_en
);
2709 set_bit(NVDIMM_INTEL_SET_PASSPHRASE
, &acpi_desc
->dimm_cmd_force_en
);
2710 set_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE
,
2711 &acpi_desc
->dimm_cmd_force_en
);
2712 set_bit(NVDIMM_INTEL_UNLOCK_UNIT
, &acpi_desc
->dimm_cmd_force_en
);
2713 set_bit(NVDIMM_INTEL_FREEZE_LOCK
, &acpi_desc
->dimm_cmd_force_en
);
2714 set_bit(NVDIMM_INTEL_SECURE_ERASE
, &acpi_desc
->dimm_cmd_force_en
);
2715 set_bit(NVDIMM_INTEL_OVERWRITE
, &acpi_desc
->dimm_cmd_force_en
);
2716 set_bit(NVDIMM_INTEL_QUERY_OVERWRITE
, &acpi_desc
->dimm_cmd_force_en
);
2717 set_bit(NVDIMM_INTEL_SET_MASTER_PASSPHRASE
,
2718 &acpi_desc
->dimm_cmd_force_en
);
2719 set_bit(NVDIMM_INTEL_MASTER_SECURE_ERASE
,
2720 &acpi_desc
->dimm_cmd_force_en
);
2721 set_bit(NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO
, &acpi_desc
->dimm_cmd_force_en
);
2722 set_bit(NVDIMM_INTEL_FW_ACTIVATE_ARM
, &acpi_desc
->dimm_cmd_force_en
);
2724 acpi_mask
= &acpi_desc
->family_dsm_mask
[NVDIMM_BUS_FAMILY_INTEL
];
2725 set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO
, acpi_mask
);
2726 set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE
, acpi_mask
);
2729 static void nfit_test1_setup(struct nfit_test
*t
)
2732 void *nfit_buf
= t
->nfit_buf
;
2733 struct acpi_nfit_memory_map
*memdev
;
2734 struct acpi_nfit_control_region
*dcr
;
2735 struct acpi_nfit_system_address
*spa
;
2736 struct acpi_nfit_desc
*acpi_desc
;
2739 /* spa0 (flat range with no bdw aliasing) */
2740 spa
= nfit_buf
+ offset
;
2741 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2742 spa
->header
.length
= sizeof(*spa
);
2743 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
2744 spa
->range_index
= 0+1;
2745 spa
->address
= t
->spa_set_dma
[0];
2746 spa
->length
= SPA2_SIZE
;
2747 offset
+= spa
->header
.length
;
2749 /* virtual cd region */
2750 spa
= nfit_buf
+ offset
;
2751 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2752 spa
->header
.length
= sizeof(*spa
);
2753 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_VCD
), 16);
2754 spa
->range_index
= 0;
2755 spa
->address
= t
->spa_set_dma
[1];
2756 spa
->length
= SPA_VCD_SIZE
;
2757 offset
+= spa
->header
.length
;
2759 /* mem-region0 (spa0, dimm0) */
2760 memdev
= nfit_buf
+ offset
;
2761 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2762 memdev
->header
.length
= sizeof(*memdev
);
2763 memdev
->device_handle
= handle
[5];
2764 memdev
->physical_id
= 0;
2765 memdev
->region_id
= 0;
2766 memdev
->range_index
= 0+1;
2767 memdev
->region_index
= 0+1;
2768 memdev
->region_size
= SPA2_SIZE
;
2769 memdev
->region_offset
= 0;
2770 memdev
->address
= 0;
2771 memdev
->interleave_index
= 0;
2772 memdev
->interleave_ways
= 1;
2773 memdev
->flags
= ACPI_NFIT_MEM_SAVE_FAILED
| ACPI_NFIT_MEM_RESTORE_FAILED
2774 | ACPI_NFIT_MEM_FLUSH_FAILED
| ACPI_NFIT_MEM_HEALTH_OBSERVED
2775 | ACPI_NFIT_MEM_NOT_ARMED
;
2776 offset
+= memdev
->header
.length
;
2778 /* dcr-descriptor0 */
2779 dcr
= nfit_buf
+ offset
;
2780 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2781 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2783 dcr
->region_index
= 0+1;
2784 dcr_common_init(dcr
);
2785 dcr
->serial_number
= ~handle
[5];
2786 dcr
->code
= NFIT_FIC_BYTE
;
2788 offset
+= dcr
->header
.length
;
2790 memdev
= nfit_buf
+ offset
;
2791 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2792 memdev
->header
.length
= sizeof(*memdev
);
2793 memdev
->device_handle
= handle
[6];
2794 memdev
->physical_id
= 0;
2795 memdev
->region_id
= 0;
2796 memdev
->range_index
= 0;
2797 memdev
->region_index
= 0+2;
2798 memdev
->region_size
= SPA2_SIZE
;
2799 memdev
->region_offset
= 0;
2800 memdev
->address
= 0;
2801 memdev
->interleave_index
= 0;
2802 memdev
->interleave_ways
= 1;
2803 memdev
->flags
= ACPI_NFIT_MEM_MAP_FAILED
;
2804 offset
+= memdev
->header
.length
;
2806 /* dcr-descriptor1 */
2807 dcr
= nfit_buf
+ offset
;
2808 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2809 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2811 dcr
->region_index
= 0+2;
2812 dcr_common_init(dcr
);
2813 dcr
->serial_number
= ~handle
[6];
2814 dcr
->code
= NFIT_FIC_BYTE
;
2816 offset
+= dcr
->header
.length
;
2818 /* sanity check to make sure we've filled the buffer */
2819 WARN_ON(offset
!= t
->nfit_size
);
2821 t
->nfit_filled
= offset
;
2823 post_ars_status(&t
->ars_state
, &t
->badrange
, t
->spa_set_dma
[0],
2826 acpi_desc
= &t
->acpi_desc
;
2827 set_bit(ND_CMD_ARS_CAP
, &acpi_desc
->bus_cmd_force_en
);
2828 set_bit(ND_CMD_ARS_START
, &acpi_desc
->bus_cmd_force_en
);
2829 set_bit(ND_CMD_ARS_STATUS
, &acpi_desc
->bus_cmd_force_en
);
2830 set_bit(ND_CMD_CLEAR_ERROR
, &acpi_desc
->bus_cmd_force_en
);
2831 set_bit(ND_INTEL_ENABLE_LSS_STATUS
, &acpi_desc
->dimm_cmd_force_en
);
2832 set_bit(ND_CMD_GET_CONFIG_SIZE
, &acpi_desc
->dimm_cmd_force_en
);
2833 set_bit(ND_CMD_GET_CONFIG_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2834 set_bit(ND_CMD_SET_CONFIG_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2837 static int nfit_test_blk_do_io(struct nd_blk_region
*ndbr
, resource_size_t dpa
,
2838 void *iobuf
, u64 len
, int rw
)
2840 struct nfit_blk
*nfit_blk
= ndbr
->blk_provider_data
;
2841 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2842 struct nd_region
*nd_region
= &ndbr
->nd_region
;
2845 lane
= nd_region_acquire_lane(nd_region
);
2847 memcpy(mmio
->addr
.base
+ dpa
, iobuf
, len
);
2849 memcpy(iobuf
, mmio
->addr
.base
+ dpa
, len
);
2851 /* give us some some coverage of the arch_invalidate_pmem() API */
2852 arch_invalidate_pmem(mmio
->addr
.base
+ dpa
, len
);
2854 nd_region_release_lane(nd_region
, lane
);
2859 static unsigned long nfit_ctl_handle
;
2861 union acpi_object
*result
;
2863 static union acpi_object
*nfit_test_evaluate_dsm(acpi_handle handle
,
2864 const guid_t
*guid
, u64 rev
, u64 func
, union acpi_object
*argv4
)
2866 if (handle
!= &nfit_ctl_handle
)
2867 return ERR_PTR(-ENXIO
);
2872 static int setup_result(void *buf
, size_t size
)
2874 result
= kmalloc(sizeof(union acpi_object
) + size
, GFP_KERNEL
);
2877 result
->package
.type
= ACPI_TYPE_BUFFER
,
2878 result
->buffer
.pointer
= (void *) (result
+ 1);
2879 result
->buffer
.length
= size
;
2880 memcpy(result
->buffer
.pointer
, buf
, size
);
2881 memset(buf
, 0, size
);
2885 static int nfit_ctl_test(struct device
*dev
)
2888 struct nvdimm
*nvdimm
;
2889 struct acpi_device
*adev
;
2890 struct nfit_mem
*nfit_mem
;
2891 struct nd_ars_record
*record
;
2892 struct acpi_nfit_desc
*acpi_desc
;
2893 const u64 test_val
= 0x0123456789abcdefULL
;
2894 unsigned long mask
, cmd_size
, offset
;
2895 struct nfit_ctl_test_cmd
{
2896 struct nd_cmd_pkg pkg
;
2898 struct nd_cmd_get_config_size cfg_size
;
2899 struct nd_cmd_clear_error clear_err
;
2900 struct nd_cmd_ars_status ars_stat
;
2901 struct nd_cmd_ars_cap ars_cap
;
2902 struct nd_intel_bus_fw_activate_businfo fwa_info
;
2903 char buf
[sizeof(struct nd_cmd_ars_status
)
2904 + sizeof(struct nd_ars_record
)];
2908 adev
= devm_kzalloc(dev
, sizeof(*adev
), GFP_KERNEL
);
2911 *adev
= (struct acpi_device
) {
2912 .handle
= &nfit_ctl_handle
,
2914 .init_name
= "test-adev",
2918 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2921 *acpi_desc
= (struct acpi_nfit_desc
) {
2923 .cmd_mask
= 1UL << ND_CMD_ARS_CAP
2924 | 1UL << ND_CMD_ARS_START
2925 | 1UL << ND_CMD_ARS_STATUS
2926 | 1UL << ND_CMD_CLEAR_ERROR
2927 | 1UL << ND_CMD_CALL
,
2928 .module
= THIS_MODULE
,
2929 .provider_name
= "ACPI.NFIT",
2930 .ndctl
= acpi_nfit_ctl
,
2931 .bus_family_mask
= 1UL << NVDIMM_BUS_FAMILY_NFIT
2932 | 1UL << NVDIMM_BUS_FAMILY_INTEL
,
2934 .bus_dsm_mask
= 1UL << NFIT_CMD_TRANSLATE_SPA
2935 | 1UL << NFIT_CMD_ARS_INJECT_SET
2936 | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
2937 | 1UL << NFIT_CMD_ARS_INJECT_GET
,
2938 .family_dsm_mask
[NVDIMM_BUS_FAMILY_INTEL
] =
2939 NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK
,
2943 nfit_mem
= devm_kzalloc(dev
, sizeof(*nfit_mem
), GFP_KERNEL
);
2947 mask
= 1UL << ND_CMD_SMART
| 1UL << ND_CMD_SMART_THRESHOLD
2948 | 1UL << ND_CMD_DIMM_FLAGS
| 1UL << ND_CMD_GET_CONFIG_SIZE
2949 | 1UL << ND_CMD_GET_CONFIG_DATA
| 1UL << ND_CMD_SET_CONFIG_DATA
2950 | 1UL << ND_CMD_VENDOR
;
2951 *nfit_mem
= (struct nfit_mem
) {
2953 .family
= NVDIMM_FAMILY_INTEL
,
2957 nvdimm
= devm_kzalloc(dev
, sizeof(*nvdimm
), GFP_KERNEL
);
2960 *nvdimm
= (struct nvdimm
) {
2961 .provider_data
= nfit_mem
,
2964 .init_name
= "test-dimm",
2969 /* basic checkout of a typical 'get config size' command */
2970 cmd_size
= sizeof(cmd
.cfg_size
);
2971 cmd
.cfg_size
= (struct nd_cmd_get_config_size
) {
2973 .config_size
= SZ_128K
,
2976 rc
= setup_result(cmd
.buf
, cmd_size
);
2979 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, nvdimm
, ND_CMD_GET_CONFIG_SIZE
,
2980 cmd
.buf
, cmd_size
, &cmd_rc
);
2982 if (rc
< 0 || cmd_rc
|| cmd
.cfg_size
.status
!= 0
2983 || cmd
.cfg_size
.config_size
!= SZ_128K
2984 || cmd
.cfg_size
.max_xfer
!= SZ_4K
) {
2985 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2986 __func__
, __LINE__
, rc
, cmd_rc
);
2991 /* test ars_status with zero output */
2992 cmd_size
= offsetof(struct nd_cmd_ars_status
, address
);
2993 cmd
.ars_stat
= (struct nd_cmd_ars_status
) {
2996 rc
= setup_result(cmd
.buf
, cmd_size
);
2999 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_STATUS
,
3000 cmd
.buf
, cmd_size
, &cmd_rc
);
3002 if (rc
< 0 || cmd_rc
) {
3003 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3004 __func__
, __LINE__
, rc
, cmd_rc
);
3009 /* test ars_cap with benign extended status */
3010 cmd_size
= sizeof(cmd
.ars_cap
);
3011 cmd
.ars_cap
= (struct nd_cmd_ars_cap
) {
3012 .status
= ND_ARS_PERSISTENT
<< 16,
3014 offset
= offsetof(struct nd_cmd_ars_cap
, status
);
3015 rc
= setup_result(cmd
.buf
+ offset
, cmd_size
- offset
);
3018 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_CAP
,
3019 cmd
.buf
, cmd_size
, &cmd_rc
);
3021 if (rc
< 0 || cmd_rc
) {
3022 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3023 __func__
, __LINE__
, rc
, cmd_rc
);
3028 /* test ars_status with 'status' trimmed from 'out_length' */
3029 cmd_size
= sizeof(cmd
.ars_stat
) + sizeof(struct nd_ars_record
);
3030 cmd
.ars_stat
= (struct nd_cmd_ars_status
) {
3031 .out_length
= cmd_size
- 4,
3033 record
= &cmd
.ars_stat
.records
[0];
3034 *record
= (struct nd_ars_record
) {
3037 rc
= setup_result(cmd
.buf
, cmd_size
);
3040 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_STATUS
,
3041 cmd
.buf
, cmd_size
, &cmd_rc
);
3043 if (rc
< 0 || cmd_rc
|| record
->length
!= test_val
) {
3044 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3045 __func__
, __LINE__
, rc
, cmd_rc
);
3050 /* test ars_status with 'Output (Size)' including 'status' */
3051 cmd_size
= sizeof(cmd
.ars_stat
) + sizeof(struct nd_ars_record
);
3052 cmd
.ars_stat
= (struct nd_cmd_ars_status
) {
3053 .out_length
= cmd_size
,
3055 record
= &cmd
.ars_stat
.records
[0];
3056 *record
= (struct nd_ars_record
) {
3059 rc
= setup_result(cmd
.buf
, cmd_size
);
3062 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_STATUS
,
3063 cmd
.buf
, cmd_size
, &cmd_rc
);
3065 if (rc
< 0 || cmd_rc
|| record
->length
!= test_val
) {
3066 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3067 __func__
, __LINE__
, rc
, cmd_rc
);
3072 /* test extended status for get_config_size results in failure */
3073 cmd_size
= sizeof(cmd
.cfg_size
);
3074 cmd
.cfg_size
= (struct nd_cmd_get_config_size
) {
3077 rc
= setup_result(cmd
.buf
, cmd_size
);
3080 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, nvdimm
, ND_CMD_GET_CONFIG_SIZE
,
3081 cmd
.buf
, cmd_size
, &cmd_rc
);
3083 if (rc
< 0 || cmd_rc
>= 0) {
3084 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3085 __func__
, __LINE__
, rc
, cmd_rc
);
3089 /* test clear error */
3090 cmd_size
= sizeof(cmd
.clear_err
);
3091 cmd
.clear_err
= (struct nd_cmd_clear_error
) {
3095 rc
= setup_result(cmd
.buf
, cmd_size
);
3098 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_CLEAR_ERROR
,
3099 cmd
.buf
, cmd_size
, &cmd_rc
);
3100 if (rc
< 0 || cmd_rc
) {
3101 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3102 __func__
, __LINE__
, rc
, cmd_rc
);
3106 /* test firmware activate bus info */
3107 cmd_size
= sizeof(cmd
.fwa_info
);
3108 cmd
= (struct nfit_ctl_test_cmd
) {
3110 .nd_command
= NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO
,
3111 .nd_family
= NVDIMM_BUS_FAMILY_INTEL
,
3112 .nd_size_out
= cmd_size
,
3113 .nd_fw_size
= cmd_size
,
3116 .state
= ND_INTEL_FWA_IDLE
,
3117 .capability
= ND_INTEL_BUS_FWA_CAP_FWQUIESCE
3118 | ND_INTEL_BUS_FWA_CAP_OSQUIESCE
,
3120 .cpu_quiesce_tmo
= 1,
3121 .io_quiesce_tmo
= 1,
3122 .max_quiesce_tmo
= 1,
3125 rc
= setup_result(cmd
.buf
, cmd_size
);
3128 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_CALL
,
3129 &cmd
, sizeof(cmd
.pkg
) + cmd_size
, &cmd_rc
);
3130 if (rc
< 0 || cmd_rc
) {
3131 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
3132 __func__
, __LINE__
, rc
, cmd_rc
);
3139 static int nfit_test_probe(struct platform_device
*pdev
)
3141 struct nvdimm_bus_descriptor
*nd_desc
;
3142 struct acpi_nfit_desc
*acpi_desc
;
3143 struct device
*dev
= &pdev
->dev
;
3144 struct nfit_test
*nfit_test
;
3145 struct nfit_mem
*nfit_mem
;
3146 union acpi_object
*obj
;
3149 if (strcmp(dev_name(&pdev
->dev
), "nfit_test.0") == 0) {
3150 rc
= nfit_ctl_test(&pdev
->dev
);
3155 nfit_test
= to_nfit_test(&pdev
->dev
);
3158 if (nfit_test
->num_dcr
) {
3159 int num
= nfit_test
->num_dcr
;
3161 nfit_test
->dimm
= devm_kcalloc(dev
, num
, sizeof(void *),
3163 nfit_test
->dimm_dma
= devm_kcalloc(dev
, num
, sizeof(dma_addr_t
),
3165 nfit_test
->flush
= devm_kcalloc(dev
, num
, sizeof(void *),
3167 nfit_test
->flush_dma
= devm_kcalloc(dev
, num
, sizeof(dma_addr_t
),
3169 nfit_test
->label
= devm_kcalloc(dev
, num
, sizeof(void *),
3171 nfit_test
->label_dma
= devm_kcalloc(dev
, num
,
3172 sizeof(dma_addr_t
), GFP_KERNEL
);
3173 nfit_test
->dcr
= devm_kcalloc(dev
, num
,
3174 sizeof(struct nfit_test_dcr
*), GFP_KERNEL
);
3175 nfit_test
->dcr_dma
= devm_kcalloc(dev
, num
,
3176 sizeof(dma_addr_t
), GFP_KERNEL
);
3177 nfit_test
->smart
= devm_kcalloc(dev
, num
,
3178 sizeof(struct nd_intel_smart
), GFP_KERNEL
);
3179 nfit_test
->smart_threshold
= devm_kcalloc(dev
, num
,
3180 sizeof(struct nd_intel_smart_threshold
),
3182 nfit_test
->fw
= devm_kcalloc(dev
, num
,
3183 sizeof(struct nfit_test_fw
), GFP_KERNEL
);
3184 if (nfit_test
->dimm
&& nfit_test
->dimm_dma
&& nfit_test
->label
3185 && nfit_test
->label_dma
&& nfit_test
->dcr
3186 && nfit_test
->dcr_dma
&& nfit_test
->flush
3187 && nfit_test
->flush_dma
3194 if (nfit_test
->num_pm
) {
3195 int num
= nfit_test
->num_pm
;
3197 nfit_test
->spa_set
= devm_kcalloc(dev
, num
, sizeof(void *),
3199 nfit_test
->spa_set_dma
= devm_kcalloc(dev
, num
,
3200 sizeof(dma_addr_t
), GFP_KERNEL
);
3201 if (nfit_test
->spa_set
&& nfit_test
->spa_set_dma
)
3207 /* per-nfit specific alloc */
3208 if (nfit_test
->alloc(nfit_test
))
3211 nfit_test
->setup(nfit_test
);
3212 acpi_desc
= &nfit_test
->acpi_desc
;
3213 acpi_nfit_desc_init(acpi_desc
, &pdev
->dev
);
3214 acpi_desc
->blk_do_io
= nfit_test_blk_do_io
;
3215 nd_desc
= &acpi_desc
->nd_desc
;
3216 nd_desc
->provider_name
= NULL
;
3217 nd_desc
->module
= THIS_MODULE
;
3218 nd_desc
->ndctl
= nfit_test_ctl
;
3220 rc
= acpi_nfit_init(acpi_desc
, nfit_test
->nfit_buf
,
3221 nfit_test
->nfit_filled
);
3225 rc
= devm_add_action_or_reset(&pdev
->dev
, acpi_nfit_shutdown
, acpi_desc
);
3229 if (nfit_test
->setup
!= nfit_test0_setup
)
3232 nfit_test
->setup_hotplug
= 1;
3233 nfit_test
->setup(nfit_test
);
3235 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
3238 obj
->type
= ACPI_TYPE_BUFFER
;
3239 obj
->buffer
.length
= nfit_test
->nfit_size
;
3240 obj
->buffer
.pointer
= nfit_test
->nfit_buf
;
3241 *(nfit_test
->_fit
) = obj
;
3242 __acpi_nfit_notify(&pdev
->dev
, nfit_test
, 0x80);
3244 /* associate dimm devices with nfit_mem data for notification testing */
3245 mutex_lock(&acpi_desc
->init_mutex
);
3246 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
3247 u32 nfit_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
3250 for (i
= 0; i
< ARRAY_SIZE(handle
); i
++)
3251 if (nfit_handle
== handle
[i
])
3252 dev_set_drvdata(nfit_test
->dimm_dev
[i
],
3255 mutex_unlock(&acpi_desc
->init_mutex
);
3260 static int nfit_test_remove(struct platform_device
*pdev
)
3265 static void nfit_test_release(struct device
*dev
)
3267 struct nfit_test
*nfit_test
= to_nfit_test(dev
);
3272 static const struct platform_device_id nfit_test_id
[] = {
3277 static struct platform_driver nfit_test_driver
= {
3278 .probe
= nfit_test_probe
,
3279 .remove
= nfit_test_remove
,
3281 .name
= KBUILD_MODNAME
,
3283 .id_table
= nfit_test_id
,
3286 static __init
int nfit_test_init(void)
3295 dax_pmem_core_test();
3296 #ifdef CONFIG_DEV_DAX_PMEM_COMPAT
3297 dax_pmem_compat_test();
3300 nfit_test_setup(nfit_test_lookup
, nfit_test_evaluate_dsm
);
3302 nfit_wq
= create_singlethread_workqueue("nfit");
3306 nfit_test_dimm
= class_create(THIS_MODULE
, "nfit_test_dimm");
3307 if (IS_ERR(nfit_test_dimm
)) {
3308 rc
= PTR_ERR(nfit_test_dimm
);
3312 nfit_pool
= gen_pool_create(ilog2(SZ_4M
), NUMA_NO_NODE
);
3318 if (gen_pool_add(nfit_pool
, SZ_4G
, SZ_4G
, NUMA_NO_NODE
)) {
3323 for (i
= 0; i
< NUM_NFITS
; i
++) {
3324 struct nfit_test
*nfit_test
;
3325 struct platform_device
*pdev
;
3327 nfit_test
= kzalloc(sizeof(*nfit_test
), GFP_KERNEL
);
3332 INIT_LIST_HEAD(&nfit_test
->resources
);
3333 badrange_init(&nfit_test
->badrange
);
3336 nfit_test
->num_pm
= NUM_PM
;
3337 nfit_test
->dcr_idx
= 0;
3338 nfit_test
->num_dcr
= NUM_DCR
;
3339 nfit_test
->alloc
= nfit_test0_alloc
;
3340 nfit_test
->setup
= nfit_test0_setup
;
3343 nfit_test
->num_pm
= 2;
3344 nfit_test
->dcr_idx
= NUM_DCR
;
3345 nfit_test
->num_dcr
= 2;
3346 nfit_test
->alloc
= nfit_test1_alloc
;
3347 nfit_test
->setup
= nfit_test1_setup
;
3353 pdev
= &nfit_test
->pdev
;
3354 pdev
->name
= KBUILD_MODNAME
;
3356 pdev
->dev
.release
= nfit_test_release
;
3357 rc
= platform_device_register(pdev
);
3359 put_device(&pdev
->dev
);
3362 get_device(&pdev
->dev
);
3364 rc
= dma_coerce_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
3368 instances
[i
] = nfit_test
;
3369 INIT_WORK(&nfit_test
->work
, uc_error_notify
);
3372 rc
= platform_driver_register(&nfit_test_driver
);
3379 gen_pool_destroy(nfit_pool
);
3381 destroy_workqueue(nfit_wq
);
3382 for (i
= 0; i
< NUM_NFITS
; i
++)
3384 platform_device_unregister(&instances
[i
]->pdev
);
3385 nfit_test_teardown();
3386 for (i
= 0; i
< NUM_NFITS
; i
++)
3388 put_device(&instances
[i
]->pdev
.dev
);
3393 static __exit
void nfit_test_exit(void)
3397 flush_workqueue(nfit_wq
);
3398 destroy_workqueue(nfit_wq
);
3399 for (i
= 0; i
< NUM_NFITS
; i
++)
3400 platform_device_unregister(&instances
[i
]->pdev
);
3401 platform_driver_unregister(&nfit_test_driver
);
3402 nfit_test_teardown();
3404 gen_pool_destroy(nfit_pool
);
3406 for (i
= 0; i
< NUM_NFITS
; i
++)
3407 put_device(&instances
[i
]->pdev
.dev
);
3408 class_destroy(nfit_test_dimm
);
3411 module_init(nfit_test_init
);
3412 module_exit(nfit_test_exit
);
3413 MODULE_LICENSE("GPL v2");
3414 MODULE_AUTHOR("Intel Corporation");