2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/workqueue.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/ndctl.h>
23 #include <linux/sizes.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
29 #include "nfit_test.h"
30 #include "../watermark.h"
33 * Generate an NFIT table to describe the following topology:
35 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
37 * (a) (b) DIMM BLK-REGION
38 * +----------+--------------+----------+---------+
39 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
40 * | imc0 +--+- - - - - region0 - - - -+----------+ +
41 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
42 * | +----------+--------------v----------v v
46 * | +-------------------------^----------^ ^
47 * +--+---+ | blk4.0 | pm1.0 | 2 region4
48 * | imc1 +--+-------------------------+----------+ +
49 * +------+ | blk5.0 | pm1.0 | 3 region5
50 * +-------------------------+----------+-+-------+
54 * +--+---+ (Hotplug DIMM)
55 * | +----------------------------------------------+
56 * +--+---+ | blk6.0/pm7.0 | 4 region6/7
57 * | imc0 +--+----------------------------------------------+
61 * *) In this layout we have four dimms and two memory controllers in one
62 * socket. Each unique interface (BLK or PMEM) to DPA space
63 * is identified by a region device with a dynamically assigned id.
65 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
66 * A single PMEM namespace "pm0.0" is created using half of the
67 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
68 * allocate from from the bottom of a region. The unallocated
69 * portion of REGION0 aliases with REGION2 and REGION3. That
70 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
71 * "blk3.0") starting at the base of each DIMM to offset (a) in those
72 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
73 * names that can be assigned to a namespace.
75 * *) In the last portion of dimm0 and dimm1 we have an interleaved
76 * SPA range, REGION1, that spans those two dimms as well as dimm2
77 * and dimm3. Some of REGION1 allocated to a PMEM namespace named
78 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
79 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
82 * *) The portion of dimm2 and dimm3 that do not participate in the
83 * REGION1 interleaved SPA range (i.e. the DPA address below offset
84 * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
85 * Note, that BLK namespaces need not be contiguous in DPA-space, and
86 * can consume aliased capacity from multiple interleave sets.
88 * BUS1: Legacy NVDIMM (single contiguous range)
91 * +---------------------+
92 * |---------------------|
94 * |---------------------|
95 * +---------------------+
97 * *) A NFIT-table may describe a simple system-physical-address range
98 * with no BLK aliasing. This type of region may optionally
99 * reference an NVDIMM.
106 NUM_SPA
= NUM_PM
+ NUM_DCR
+ NUM_BDW
,
107 NUM_MEM
= NUM_DCR
+ NUM_BDW
+ 2 /* spa0 iset */ + 4 /* spa1 iset */,
109 LABEL_SIZE
= SZ_128K
,
110 SPA_VCD_SIZE
= SZ_4M
,
111 SPA0_SIZE
= DIMM_SIZE
,
112 SPA1_SIZE
= DIMM_SIZE
*2,
113 SPA2_SIZE
= DIMM_SIZE
,
116 NUM_NFITS
= 2, /* permit testing multiple NFITs per system */
119 struct nfit_test_dcr
{
122 __u8 aperature
[BDW_SIZE
];
125 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
126 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
127 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
129 static u32 handle
[] = {
130 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
131 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
132 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
133 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
134 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
135 [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
136 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
139 static unsigned long dimm_fail_cmd_flags
[NUM_DCR
];
141 struct nfit_test_fw
{
142 enum intel_fw_update_state state
;
150 struct acpi_nfit_desc acpi_desc
;
151 struct platform_device pdev
;
152 struct list_head resources
;
160 dma_addr_t
*dimm_dma
;
162 dma_addr_t
*flush_dma
;
164 dma_addr_t
*label_dma
;
166 dma_addr_t
*spa_set_dma
;
167 struct nfit_test_dcr
**dcr
;
169 int (*alloc
)(struct nfit_test
*t
);
170 void (*setup
)(struct nfit_test
*t
);
172 union acpi_object
**_fit
;
175 struct nd_cmd_ars_status
*ars_status
;
176 unsigned long deadline
;
179 struct device
*dimm_dev
[NUM_DCR
];
180 struct nd_intel_smart
*smart
;
181 struct nd_intel_smart_threshold
*smart_threshold
;
182 struct badrange badrange
;
183 struct work_struct work
;
184 struct nfit_test_fw
*fw
;
187 static struct workqueue_struct
*nfit_wq
;
189 static struct nfit_test
*to_nfit_test(struct device
*dev
)
191 struct platform_device
*pdev
= to_platform_device(dev
);
193 return container_of(pdev
, struct nfit_test
, pdev
);
196 static int nd_intel_test_get_fw_info(struct nfit_test
*t
,
197 struct nd_intel_fw_info
*nd_cmd
, unsigned int buf_len
,
200 struct device
*dev
= &t
->pdev
.dev
;
201 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
203 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
204 __func__
, t
, nd_cmd
, buf_len
, idx
);
206 if (buf_len
< sizeof(*nd_cmd
))
210 nd_cmd
->storage_size
= INTEL_FW_STORAGE_SIZE
;
211 nd_cmd
->max_send_len
= INTEL_FW_MAX_SEND_LEN
;
212 nd_cmd
->query_interval
= INTEL_FW_QUERY_INTERVAL
;
213 nd_cmd
->max_query_time
= INTEL_FW_QUERY_MAX_TIME
;
214 nd_cmd
->update_cap
= 0;
215 nd_cmd
->fis_version
= INTEL_FW_FIS_VERSION
;
216 nd_cmd
->run_version
= 0;
217 nd_cmd
->updated_version
= fw
->version
;
222 static int nd_intel_test_start_update(struct nfit_test
*t
,
223 struct nd_intel_fw_start
*nd_cmd
, unsigned int buf_len
,
226 struct device
*dev
= &t
->pdev
.dev
;
227 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
229 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
230 __func__
, t
, nd_cmd
, buf_len
, idx
);
232 if (buf_len
< sizeof(*nd_cmd
))
235 if (fw
->state
!= FW_STATE_NEW
) {
236 /* extended status, FW update in progress */
237 nd_cmd
->status
= 0x10007;
241 fw
->state
= FW_STATE_IN_PROGRESS
;
243 fw
->size_received
= 0;
245 nd_cmd
->context
= fw
->context
;
247 dev_dbg(dev
, "%s: context issued: %#x\n", __func__
, nd_cmd
->context
);
252 static int nd_intel_test_send_data(struct nfit_test
*t
,
253 struct nd_intel_fw_send_data
*nd_cmd
, unsigned int buf_len
,
256 struct device
*dev
= &t
->pdev
.dev
;
257 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
258 u32
*status
= (u32
*)&nd_cmd
->data
[nd_cmd
->length
];
260 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
261 __func__
, t
, nd_cmd
, buf_len
, idx
);
263 if (buf_len
< sizeof(*nd_cmd
))
267 dev_dbg(dev
, "%s: cmd->status: %#x\n", __func__
, *status
);
268 dev_dbg(dev
, "%s: cmd->data[0]: %#x\n", __func__
, nd_cmd
->data
[0]);
269 dev_dbg(dev
, "%s: cmd->data[%u]: %#x\n", __func__
, nd_cmd
->length
-1,
270 nd_cmd
->data
[nd_cmd
->length
-1]);
272 if (fw
->state
!= FW_STATE_IN_PROGRESS
) {
273 dev_dbg(dev
, "%s: not in IN_PROGRESS state\n", __func__
);
278 if (nd_cmd
->context
!= fw
->context
) {
279 dev_dbg(dev
, "%s: incorrect context: in: %#x correct: %#x\n",
280 __func__
, nd_cmd
->context
, fw
->context
);
286 * check offset + len > size of fw storage
287 * check length is > max send length
289 if (nd_cmd
->offset
+ nd_cmd
->length
> INTEL_FW_STORAGE_SIZE
||
290 nd_cmd
->length
> INTEL_FW_MAX_SEND_LEN
) {
292 dev_dbg(dev
, "%s: buffer boundary violation\n", __func__
);
296 fw
->size_received
+= nd_cmd
->length
;
297 dev_dbg(dev
, "%s: copying %u bytes, %u bytes so far\n",
298 __func__
, nd_cmd
->length
, fw
->size_received
);
303 static int nd_intel_test_finish_fw(struct nfit_test
*t
,
304 struct nd_intel_fw_finish_update
*nd_cmd
,
305 unsigned int buf_len
, int idx
)
307 struct device
*dev
= &t
->pdev
.dev
;
308 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
310 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
311 __func__
, t
, nd_cmd
, buf_len
, idx
);
313 if (fw
->state
== FW_STATE_UPDATED
) {
314 /* update already done, need cold boot */
315 nd_cmd
->status
= 0x20007;
319 dev_dbg(dev
, "%s: context: %#x ctrl_flags: %#x\n",
320 __func__
, nd_cmd
->context
, nd_cmd
->ctrl_flags
);
322 switch (nd_cmd
->ctrl_flags
) {
324 if (nd_cmd
->context
!= fw
->context
) {
325 dev_dbg(dev
, "%s: incorrect context: in: %#x correct: %#x\n",
326 __func__
, nd_cmd
->context
,
328 nd_cmd
->status
= 0x10007;
332 fw
->state
= FW_STATE_VERIFY
;
333 /* set 1 second of time for firmware "update" */
334 fw
->end_time
= jiffies
+ HZ
;
338 fw
->size_received
= 0;
339 /* successfully aborted status */
340 nd_cmd
->status
= 0x40007;
341 fw
->state
= FW_STATE_NEW
;
342 dev_dbg(dev
, "%s: abort successful\n", __func__
);
345 default: /* bad control flag */
346 dev_warn(dev
, "%s: unknown control flag: %#x\n",
347 __func__
, nd_cmd
->ctrl_flags
);
354 static int nd_intel_test_finish_query(struct nfit_test
*t
,
355 struct nd_intel_fw_finish_query
*nd_cmd
,
356 unsigned int buf_len
, int idx
)
358 struct device
*dev
= &t
->pdev
.dev
;
359 struct nfit_test_fw
*fw
= &t
->fw
[idx
];
361 dev_dbg(dev
, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
362 __func__
, t
, nd_cmd
, buf_len
, idx
);
364 if (buf_len
< sizeof(*nd_cmd
))
367 if (nd_cmd
->context
!= fw
->context
) {
368 dev_dbg(dev
, "%s: incorrect context: in: %#x correct: %#x\n",
369 __func__
, nd_cmd
->context
, fw
->context
);
370 nd_cmd
->status
= 0x10007;
374 dev_dbg(dev
, "%s context: %#x\n", __func__
, nd_cmd
->context
);
378 nd_cmd
->updated_fw_rev
= 0;
380 dev_dbg(dev
, "%s: new state\n", __func__
);
383 case FW_STATE_IN_PROGRESS
:
384 /* sequencing error */
385 nd_cmd
->status
= 0x40007;
386 nd_cmd
->updated_fw_rev
= 0;
387 dev_dbg(dev
, "%s: sequence error\n", __func__
);
390 case FW_STATE_VERIFY
:
391 if (time_is_after_jiffies64(fw
->end_time
)) {
392 nd_cmd
->updated_fw_rev
= 0;
393 nd_cmd
->status
= 0x20007;
394 dev_dbg(dev
, "%s: still verifying\n", __func__
);
398 dev_dbg(dev
, "%s: transition out verify\n", __func__
);
399 fw
->state
= FW_STATE_UPDATED
;
400 /* we are going to fall through if it's "done" */
401 case FW_STATE_UPDATED
:
403 /* bogus test version */
404 fw
->version
= nd_cmd
->updated_fw_rev
=
405 INTEL_FW_FAKE_VERSION
;
406 dev_dbg(dev
, "%s: updated\n", __func__
);
409 default: /* we should never get here */
416 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size
*nd_cmd
,
417 unsigned int buf_len
)
419 if (buf_len
< sizeof(*nd_cmd
))
423 nd_cmd
->config_size
= LABEL_SIZE
;
424 nd_cmd
->max_xfer
= SZ_4K
;
429 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
430 *nd_cmd
, unsigned int buf_len
, void *label
)
432 unsigned int len
, offset
= nd_cmd
->in_offset
;
435 if (buf_len
< sizeof(*nd_cmd
))
437 if (offset
>= LABEL_SIZE
)
439 if (nd_cmd
->in_length
+ sizeof(*nd_cmd
) > buf_len
)
443 len
= min(nd_cmd
->in_length
, LABEL_SIZE
- offset
);
444 memcpy(nd_cmd
->out_buf
, label
+ offset
, len
);
445 rc
= buf_len
- sizeof(*nd_cmd
) - len
;
450 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr
*nd_cmd
,
451 unsigned int buf_len
, void *label
)
453 unsigned int len
, offset
= nd_cmd
->in_offset
;
457 if (buf_len
< sizeof(*nd_cmd
))
459 if (offset
>= LABEL_SIZE
)
461 if (nd_cmd
->in_length
+ sizeof(*nd_cmd
) + 4 > buf_len
)
464 status
= (void *)nd_cmd
+ nd_cmd
->in_length
+ sizeof(*nd_cmd
);
466 len
= min(nd_cmd
->in_length
, LABEL_SIZE
- offset
);
467 memcpy(label
+ offset
, nd_cmd
->in_buf
, len
);
468 rc
= buf_len
- sizeof(*nd_cmd
) - (len
+ 4);
473 #define NFIT_TEST_CLEAR_ERR_UNIT 256
475 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap
*nd_cmd
,
476 unsigned int buf_len
)
480 if (buf_len
< sizeof(*nd_cmd
))
483 /* for testing, only store up to n records that fit within 4k */
484 ars_recs
= SZ_4K
/ sizeof(struct nd_ars_record
);
486 nd_cmd
->max_ars_out
= sizeof(struct nd_cmd_ars_status
)
487 + ars_recs
* sizeof(struct nd_ars_record
);
488 nd_cmd
->status
= (ND_ARS_PERSISTENT
| ND_ARS_VOLATILE
) << 16;
489 nd_cmd
->clear_err_unit
= NFIT_TEST_CLEAR_ERR_UNIT
;
494 static void post_ars_status(struct ars_state
*ars_state
,
495 struct badrange
*badrange
, u64 addr
, u64 len
)
497 struct nd_cmd_ars_status
*ars_status
;
498 struct nd_ars_record
*ars_record
;
499 struct badrange_entry
*be
;
500 u64 end
= addr
+ len
- 1;
503 ars_state
->deadline
= jiffies
+ 1*HZ
;
504 ars_status
= ars_state
->ars_status
;
505 ars_status
->status
= 0;
506 ars_status
->address
= addr
;
507 ars_status
->length
= len
;
508 ars_status
->type
= ND_ARS_PERSISTENT
;
510 spin_lock(&badrange
->lock
);
511 list_for_each_entry(be
, &badrange
->list
, list
) {
512 u64 be_end
= be
->start
+ be
->length
- 1;
515 /* skip entries outside the range */
516 if (be_end
< addr
|| be
->start
> end
)
519 rstart
= (be
->start
< addr
) ? addr
: be
->start
;
520 rend
= (be_end
< end
) ? be_end
: end
;
521 ars_record
= &ars_status
->records
[i
];
522 ars_record
->handle
= 0;
523 ars_record
->err_address
= rstart
;
524 ars_record
->length
= rend
- rstart
+ 1;
527 spin_unlock(&badrange
->lock
);
528 ars_status
->num_records
= i
;
529 ars_status
->out_length
= sizeof(struct nd_cmd_ars_status
)
530 + i
* sizeof(struct nd_ars_record
);
533 static int nfit_test_cmd_ars_start(struct nfit_test
*t
,
534 struct ars_state
*ars_state
,
535 struct nd_cmd_ars_start
*ars_start
, unsigned int buf_len
,
538 if (buf_len
< sizeof(*ars_start
))
541 spin_lock(&ars_state
->lock
);
542 if (time_before(jiffies
, ars_state
->deadline
)) {
543 ars_start
->status
= NFIT_ARS_START_BUSY
;
546 ars_start
->status
= 0;
547 ars_start
->scrub_time
= 1;
548 post_ars_status(ars_state
, &t
->badrange
, ars_start
->address
,
552 spin_unlock(&ars_state
->lock
);
557 static int nfit_test_cmd_ars_status(struct ars_state
*ars_state
,
558 struct nd_cmd_ars_status
*ars_status
, unsigned int buf_len
,
561 if (buf_len
< ars_state
->ars_status
->out_length
)
564 spin_lock(&ars_state
->lock
);
565 if (time_before(jiffies
, ars_state
->deadline
)) {
566 memset(ars_status
, 0, buf_len
);
567 ars_status
->status
= NFIT_ARS_STATUS_BUSY
;
568 ars_status
->out_length
= sizeof(*ars_status
);
571 memcpy(ars_status
, ars_state
->ars_status
,
572 ars_state
->ars_status
->out_length
);
575 spin_unlock(&ars_state
->lock
);
579 static int nfit_test_cmd_clear_error(struct nfit_test
*t
,
580 struct nd_cmd_clear_error
*clear_err
,
581 unsigned int buf_len
, int *cmd_rc
)
583 const u64 mask
= NFIT_TEST_CLEAR_ERR_UNIT
- 1;
584 if (buf_len
< sizeof(*clear_err
))
587 if ((clear_err
->address
& mask
) || (clear_err
->length
& mask
))
590 badrange_forget(&t
->badrange
, clear_err
->address
, clear_err
->length
);
591 clear_err
->status
= 0;
592 clear_err
->cleared
= clear_err
->length
;
597 struct region_search_spa
{
599 struct nd_region
*region
;
602 static int is_region_device(struct device
*dev
)
604 return !strncmp(dev
->kobj
.name
, "region", 6);
607 static int nfit_test_search_region_spa(struct device
*dev
, void *data
)
609 struct region_search_spa
*ctx
= data
;
610 struct nd_region
*nd_region
;
611 resource_size_t ndr_end
;
613 if (!is_region_device(dev
))
616 nd_region
= to_nd_region(dev
);
617 ndr_end
= nd_region
->ndr_start
+ nd_region
->ndr_size
;
619 if (ctx
->addr
>= nd_region
->ndr_start
&& ctx
->addr
< ndr_end
) {
620 ctx
->region
= nd_region
;
627 static int nfit_test_search_spa(struct nvdimm_bus
*bus
,
628 struct nd_cmd_translate_spa
*spa
)
631 struct nd_region
*nd_region
= NULL
;
632 struct nvdimm
*nvdimm
= NULL
;
633 struct nd_mapping
*nd_mapping
= NULL
;
634 struct region_search_spa ctx
= {
640 ret
= device_for_each_child(&bus
->dev
, &ctx
,
641 nfit_test_search_region_spa
);
646 nd_region
= ctx
.region
;
648 dpa
= ctx
.addr
- nd_region
->ndr_start
;
651 * last dimm is selected for test
653 nd_mapping
= &nd_region
->mapping
[nd_region
->ndr_mappings
- 1];
654 nvdimm
= nd_mapping
->nvdimm
;
656 spa
->devices
[0].nfit_device_handle
= handle
[nvdimm
->id
];
657 spa
->num_nvdimms
= 1;
658 spa
->devices
[0].dpa
= dpa
;
663 static int nfit_test_cmd_translate_spa(struct nvdimm_bus
*bus
,
664 struct nd_cmd_translate_spa
*spa
, unsigned int buf_len
)
666 if (buf_len
< spa
->translate_length
)
669 if (nfit_test_search_spa(bus
, spa
) < 0 || !spa
->num_nvdimms
)
675 static int nfit_test_cmd_smart(struct nd_intel_smart
*smart
, unsigned int buf_len
,
676 struct nd_intel_smart
*smart_data
)
678 if (buf_len
< sizeof(*smart
))
680 memcpy(smart
, smart_data
, sizeof(*smart
));
684 static int nfit_test_cmd_smart_threshold(
685 struct nd_intel_smart_threshold
*out
,
686 unsigned int buf_len
,
687 struct nd_intel_smart_threshold
*smart_t
)
689 if (buf_len
< sizeof(*smart_t
))
691 memcpy(out
, smart_t
, sizeof(*smart_t
));
695 static void smart_notify(struct device
*bus_dev
,
696 struct device
*dimm_dev
, struct nd_intel_smart
*smart
,
697 struct nd_intel_smart_threshold
*thresh
)
699 dev_dbg(dimm_dev
, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
700 __func__
, thresh
->alarm_control
, thresh
->spares
,
701 smart
->spares
, thresh
->media_temperature
,
702 smart
->media_temperature
, thresh
->ctrl_temperature
,
703 smart
->ctrl_temperature
);
704 if (((thresh
->alarm_control
& ND_INTEL_SMART_SPARE_TRIP
)
707 || ((thresh
->alarm_control
& ND_INTEL_SMART_TEMP_TRIP
)
708 && smart
->media_temperature
709 >= thresh
->media_temperature
)
710 || ((thresh
->alarm_control
& ND_INTEL_SMART_CTEMP_TRIP
)
711 && smart
->ctrl_temperature
712 >= thresh
->ctrl_temperature
)) {
713 device_lock(bus_dev
);
714 __acpi_nvdimm_notify(dimm_dev
, 0x81);
715 device_unlock(bus_dev
);
719 static int nfit_test_cmd_smart_set_threshold(
720 struct nd_intel_smart_set_threshold
*in
,
721 unsigned int buf_len
,
722 struct nd_intel_smart_threshold
*thresh
,
723 struct nd_intel_smart
*smart
,
724 struct device
*bus_dev
, struct device
*dimm_dev
)
728 size
= sizeof(*in
) - 4;
731 memcpy(thresh
->data
, in
, size
);
733 smart_notify(bus_dev
, dimm_dev
, smart
, thresh
);
738 static void uc_error_notify(struct work_struct
*work
)
740 struct nfit_test
*t
= container_of(work
, typeof(*t
), work
);
742 __acpi_nfit_notify(&t
->pdev
.dev
, t
, NFIT_NOTIFY_UC_MEMORY_ERROR
);
745 static int nfit_test_cmd_ars_error_inject(struct nfit_test
*t
,
746 struct nd_cmd_ars_err_inj
*err_inj
, unsigned int buf_len
)
750 if (buf_len
!= sizeof(*err_inj
)) {
755 if (err_inj
->err_inj_spa_range_length
<= 0) {
760 rc
= badrange_add(&t
->badrange
, err_inj
->err_inj_spa_range_base
,
761 err_inj
->err_inj_spa_range_length
);
765 if (err_inj
->err_inj_options
& (1 << ND_ARS_ERR_INJ_OPT_NOTIFY
))
766 queue_work(nfit_wq
, &t
->work
);
772 err_inj
->status
= NFIT_ARS_INJECT_INVALID
;
776 static int nfit_test_cmd_ars_inject_clear(struct nfit_test
*t
,
777 struct nd_cmd_ars_err_inj_clr
*err_clr
, unsigned int buf_len
)
781 if (buf_len
!= sizeof(*err_clr
)) {
786 if (err_clr
->err_inj_clr_spa_range_length
<= 0) {
791 badrange_forget(&t
->badrange
, err_clr
->err_inj_clr_spa_range_base
,
792 err_clr
->err_inj_clr_spa_range_length
);
798 err_clr
->status
= NFIT_ARS_INJECT_INVALID
;
802 static int nfit_test_cmd_ars_inject_status(struct nfit_test
*t
,
803 struct nd_cmd_ars_err_inj_stat
*err_stat
,
804 unsigned int buf_len
)
806 struct badrange_entry
*be
;
807 int max
= SZ_4K
/ sizeof(struct nd_error_stat_query_record
);
810 err_stat
->status
= 0;
811 spin_lock(&t
->badrange
.lock
);
812 list_for_each_entry(be
, &t
->badrange
.list
, list
) {
813 err_stat
->record
[i
].err_inj_stat_spa_range_base
= be
->start
;
814 err_stat
->record
[i
].err_inj_stat_spa_range_length
= be
->length
;
819 spin_unlock(&t
->badrange
.lock
);
820 err_stat
->inj_err_rec_count
= i
;
825 static int nd_intel_test_cmd_set_lss_status(struct nfit_test
*t
,
826 struct nd_intel_lss
*nd_cmd
, unsigned int buf_len
)
828 struct device
*dev
= &t
->pdev
.dev
;
830 if (buf_len
< sizeof(*nd_cmd
))
833 switch (nd_cmd
->enable
) {
836 dev_dbg(dev
, "%s: Latch System Shutdown Status disabled\n",
841 dev_dbg(dev
, "%s: Latch System Shutdown Status enabled\n",
845 dev_warn(dev
, "Unknown enable value: %#x\n", nd_cmd
->enable
);
846 nd_cmd
->status
= 0x3;
854 static int get_dimm(struct nfit_mem
*nfit_mem
, unsigned int func
)
858 /* lookup per-dimm data */
859 for (i
= 0; i
< ARRAY_SIZE(handle
); i
++)
860 if (__to_nfit_memdev(nfit_mem
)->device_handle
== handle
[i
])
862 if (i
>= ARRAY_SIZE(handle
))
865 if ((1 << func
) & dimm_fail_cmd_flags
[i
])
871 static int nfit_test_ctl(struct nvdimm_bus_descriptor
*nd_desc
,
872 struct nvdimm
*nvdimm
, unsigned int cmd
, void *buf
,
873 unsigned int buf_len
, int *cmd_rc
)
875 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
876 struct nfit_test
*t
= container_of(acpi_desc
, typeof(*t
), acpi_desc
);
877 unsigned int func
= cmd
;
878 int i
, rc
= 0, __cmd_rc
;
885 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
886 unsigned long cmd_mask
= nvdimm_cmd_mask(nvdimm
);
891 if (cmd
== ND_CMD_CALL
) {
892 struct nd_cmd_pkg
*call_pkg
= buf
;
894 buf_len
= call_pkg
->nd_size_in
+ call_pkg
->nd_size_out
;
895 buf
= (void *) call_pkg
->nd_payload
;
896 func
= call_pkg
->nd_command
;
897 if (call_pkg
->nd_family
!= nfit_mem
->family
)
900 i
= get_dimm(nfit_mem
, func
);
905 case ND_INTEL_ENABLE_LSS_STATUS
:
906 return nd_intel_test_cmd_set_lss_status(t
,
908 case ND_INTEL_FW_GET_INFO
:
909 return nd_intel_test_get_fw_info(t
, buf
,
910 buf_len
, i
- t
->dcr_idx
);
911 case ND_INTEL_FW_START_UPDATE
:
912 return nd_intel_test_start_update(t
, buf
,
913 buf_len
, i
- t
->dcr_idx
);
914 case ND_INTEL_FW_SEND_DATA
:
915 return nd_intel_test_send_data(t
, buf
,
916 buf_len
, i
- t
->dcr_idx
);
917 case ND_INTEL_FW_FINISH_UPDATE
:
918 return nd_intel_test_finish_fw(t
, buf
,
919 buf_len
, i
- t
->dcr_idx
);
920 case ND_INTEL_FW_FINISH_QUERY
:
921 return nd_intel_test_finish_query(t
, buf
,
922 buf_len
, i
- t
->dcr_idx
);
924 return nfit_test_cmd_smart(buf
, buf_len
,
925 &t
->smart
[i
- t
->dcr_idx
]);
926 case ND_INTEL_SMART_THRESHOLD
:
927 return nfit_test_cmd_smart_threshold(buf
,
929 &t
->smart_threshold
[i
-
931 case ND_INTEL_SMART_SET_THRESHOLD
:
932 return nfit_test_cmd_smart_set_threshold(buf
,
934 &t
->smart_threshold
[i
-
936 &t
->smart
[i
- t
->dcr_idx
],
937 &t
->pdev
.dev
, t
->dimm_dev
[i
]);
943 if (!test_bit(cmd
, &cmd_mask
)
944 || !test_bit(func
, &nfit_mem
->dsm_mask
))
947 i
= get_dimm(nfit_mem
, func
);
952 case ND_CMD_GET_CONFIG_SIZE
:
953 rc
= nfit_test_cmd_get_config_size(buf
, buf_len
);
955 case ND_CMD_GET_CONFIG_DATA
:
956 rc
= nfit_test_cmd_get_config_data(buf
, buf_len
,
957 t
->label
[i
- t
->dcr_idx
]);
959 case ND_CMD_SET_CONFIG_DATA
:
960 rc
= nfit_test_cmd_set_config_data(buf
, buf_len
,
961 t
->label
[i
- t
->dcr_idx
]);
967 struct ars_state
*ars_state
= &t
->ars_state
;
968 struct nd_cmd_pkg
*call_pkg
= buf
;
973 if (cmd
== ND_CMD_CALL
) {
974 func
= call_pkg
->nd_command
;
976 buf_len
= call_pkg
->nd_size_in
+ call_pkg
->nd_size_out
;
977 buf
= (void *) call_pkg
->nd_payload
;
980 case NFIT_CMD_TRANSLATE_SPA
:
981 rc
= nfit_test_cmd_translate_spa(
982 acpi_desc
->nvdimm_bus
, buf
, buf_len
);
984 case NFIT_CMD_ARS_INJECT_SET
:
985 rc
= nfit_test_cmd_ars_error_inject(t
, buf
,
988 case NFIT_CMD_ARS_INJECT_CLEAR
:
989 rc
= nfit_test_cmd_ars_inject_clear(t
, buf
,
992 case NFIT_CMD_ARS_INJECT_GET
:
993 rc
= nfit_test_cmd_ars_inject_status(t
, buf
,
1001 if (!nd_desc
|| !test_bit(cmd
, &nd_desc
->cmd_mask
))
1005 case ND_CMD_ARS_CAP
:
1006 rc
= nfit_test_cmd_ars_cap(buf
, buf_len
);
1008 case ND_CMD_ARS_START
:
1009 rc
= nfit_test_cmd_ars_start(t
, ars_state
, buf
,
1012 case ND_CMD_ARS_STATUS
:
1013 rc
= nfit_test_cmd_ars_status(ars_state
, buf
, buf_len
,
1016 case ND_CMD_CLEAR_ERROR
:
1017 rc
= nfit_test_cmd_clear_error(t
, buf
, buf_len
, cmd_rc
);
1027 static DEFINE_SPINLOCK(nfit_test_lock
);
1028 static struct nfit_test
*instances
[NUM_NFITS
];
1030 static void release_nfit_res(void *data
)
1032 struct nfit_test_resource
*nfit_res
= data
;
1034 spin_lock(&nfit_test_lock
);
1035 list_del(&nfit_res
->list
);
1036 spin_unlock(&nfit_test_lock
);
1038 vfree(nfit_res
->buf
);
1042 static void *__test_alloc(struct nfit_test
*t
, size_t size
, dma_addr_t
*dma
,
1045 struct device
*dev
= &t
->pdev
.dev
;
1046 struct nfit_test_resource
*nfit_res
= kzalloc(sizeof(*nfit_res
),
1050 if (!buf
|| !nfit_res
)
1052 rc
= devm_add_action(dev
, release_nfit_res
, nfit_res
);
1055 INIT_LIST_HEAD(&nfit_res
->list
);
1056 memset(buf
, 0, size
);
1057 nfit_res
->dev
= dev
;
1058 nfit_res
->buf
= buf
;
1059 nfit_res
->res
.start
= *dma
;
1060 nfit_res
->res
.end
= *dma
+ size
- 1;
1061 nfit_res
->res
.name
= "NFIT";
1062 spin_lock_init(&nfit_res
->lock
);
1063 INIT_LIST_HEAD(&nfit_res
->requests
);
1064 spin_lock(&nfit_test_lock
);
1065 list_add(&nfit_res
->list
, &t
->resources
);
1066 spin_unlock(&nfit_test_lock
);
1068 return nfit_res
->buf
;
1076 static void *test_alloc(struct nfit_test
*t
, size_t size
, dma_addr_t
*dma
)
1078 void *buf
= vmalloc(size
);
1080 *dma
= (unsigned long) buf
;
1081 return __test_alloc(t
, size
, dma
, buf
);
1084 static struct nfit_test_resource
*nfit_test_lookup(resource_size_t addr
)
1088 for (i
= 0; i
< ARRAY_SIZE(instances
); i
++) {
1089 struct nfit_test_resource
*n
, *nfit_res
= NULL
;
1090 struct nfit_test
*t
= instances
[i
];
1094 spin_lock(&nfit_test_lock
);
1095 list_for_each_entry(n
, &t
->resources
, list
) {
1096 if (addr
>= n
->res
.start
&& (addr
< n
->res
.start
1097 + resource_size(&n
->res
))) {
1100 } else if (addr
>= (unsigned long) n
->buf
1101 && (addr
< (unsigned long) n
->buf
1102 + resource_size(&n
->res
))) {
1107 spin_unlock(&nfit_test_lock
);
1115 static int ars_state_init(struct device
*dev
, struct ars_state
*ars_state
)
1117 /* for testing, only store up to n records that fit within 4k */
1118 ars_state
->ars_status
= devm_kzalloc(dev
,
1119 sizeof(struct nd_cmd_ars_status
) + SZ_4K
, GFP_KERNEL
);
1120 if (!ars_state
->ars_status
)
1122 spin_lock_init(&ars_state
->lock
);
1126 static void put_dimms(void *data
)
1128 struct device
**dimm_dev
= data
;
1131 for (i
= 0; i
< NUM_DCR
; i
++)
1133 device_unregister(dimm_dev
[i
]);
1136 static struct class *nfit_test_dimm
;
1138 static int dimm_name_to_id(struct device
*dev
)
1142 if (sscanf(dev_name(dev
), "test_dimm%d", &dimm
) != 1
1143 || dimm
>= NUM_DCR
|| dimm
< 0)
1149 static ssize_t
handle_show(struct device
*dev
, struct device_attribute
*attr
,
1152 int dimm
= dimm_name_to_id(dev
);
1157 return sprintf(buf
, "%#x", handle
[dimm
]);
1159 DEVICE_ATTR_RO(handle
);
1161 static ssize_t
fail_cmd_show(struct device
*dev
, struct device_attribute
*attr
,
1164 int dimm
= dimm_name_to_id(dev
);
1169 return sprintf(buf
, "%#lx\n", dimm_fail_cmd_flags
[dimm
]);
1172 static ssize_t
fail_cmd_store(struct device
*dev
, struct device_attribute
*attr
,
1173 const char *buf
, size_t size
)
1175 int dimm
= dimm_name_to_id(dev
);
1182 rc
= kstrtol(buf
, 0, &val
);
1186 dimm_fail_cmd_flags
[dimm
] = val
;
1189 static DEVICE_ATTR_RW(fail_cmd
);
1191 static struct attribute
*nfit_test_dimm_attributes
[] = {
1192 &dev_attr_fail_cmd
.attr
,
1193 &dev_attr_handle
.attr
,
1197 static struct attribute_group nfit_test_dimm_attribute_group
= {
1198 .attrs
= nfit_test_dimm_attributes
,
1201 static const struct attribute_group
*nfit_test_dimm_attribute_groups
[] = {
1202 &nfit_test_dimm_attribute_group
,
1206 static void smart_init(struct nfit_test
*t
)
1209 const struct nd_intel_smart_threshold smart_t_data
= {
1210 .alarm_control
= ND_INTEL_SMART_SPARE_TRIP
1211 | ND_INTEL_SMART_TEMP_TRIP
,
1212 .media_temperature
= 40 * 16,
1213 .ctrl_temperature
= 30 * 16,
1216 const struct nd_intel_smart smart_data
= {
1217 .flags
= ND_INTEL_SMART_HEALTH_VALID
1218 | ND_INTEL_SMART_SPARES_VALID
1219 | ND_INTEL_SMART_ALARM_VALID
1220 | ND_INTEL_SMART_USED_VALID
1221 | ND_INTEL_SMART_SHUTDOWN_VALID
1222 | ND_INTEL_SMART_MTEMP_VALID
,
1223 .health
= ND_INTEL_SMART_NON_CRITICAL_HEALTH
,
1224 .media_temperature
= 23 * 16,
1225 .ctrl_temperature
= 30 * 16,
1226 .pmic_temperature
= 40 * 16,
1228 .alarm_flags
= ND_INTEL_SMART_SPARE_TRIP
1229 | ND_INTEL_SMART_TEMP_TRIP
,
1232 .shutdown_state
= 0,
1234 .shutdown_count
= 100,
1237 for (i
= 0; i
< t
->num_dcr
; i
++) {
1238 memcpy(&t
->smart
[i
], &smart_data
, sizeof(smart_data
));
1239 memcpy(&t
->smart_threshold
[i
], &smart_t_data
,
1240 sizeof(smart_t_data
));
1244 static int nfit_test0_alloc(struct nfit_test
*t
)
1246 size_t nfit_size
= sizeof(struct acpi_nfit_system_address
) * NUM_SPA
1247 + sizeof(struct acpi_nfit_memory_map
) * NUM_MEM
1248 + sizeof(struct acpi_nfit_control_region
) * NUM_DCR
1249 + offsetof(struct acpi_nfit_control_region
,
1250 window_size
) * NUM_DCR
1251 + sizeof(struct acpi_nfit_data_region
) * NUM_BDW
1252 + (sizeof(struct acpi_nfit_flush_address
)
1253 + sizeof(u64
) * NUM_HINTS
) * NUM_DCR
1254 + sizeof(struct acpi_nfit_capabilities
);
1257 t
->nfit_buf
= test_alloc(t
, nfit_size
, &t
->nfit_dma
);
1260 t
->nfit_size
= nfit_size
;
1262 t
->spa_set
[0] = test_alloc(t
, SPA0_SIZE
, &t
->spa_set_dma
[0]);
1266 t
->spa_set
[1] = test_alloc(t
, SPA1_SIZE
, &t
->spa_set_dma
[1]);
1270 t
->spa_set
[2] = test_alloc(t
, SPA0_SIZE
, &t
->spa_set_dma
[2]);
1274 for (i
= 0; i
< t
->num_dcr
; i
++) {
1275 t
->dimm
[i
] = test_alloc(t
, DIMM_SIZE
, &t
->dimm_dma
[i
]);
1279 t
->label
[i
] = test_alloc(t
, LABEL_SIZE
, &t
->label_dma
[i
]);
1282 sprintf(t
->label
[i
], "label%d", i
);
1284 t
->flush
[i
] = test_alloc(t
, max(PAGE_SIZE
,
1285 sizeof(u64
) * NUM_HINTS
),
1291 for (i
= 0; i
< t
->num_dcr
; i
++) {
1292 t
->dcr
[i
] = test_alloc(t
, LABEL_SIZE
, &t
->dcr_dma
[i
]);
1297 t
->_fit
= test_alloc(t
, sizeof(union acpi_object
**), &t
->_fit_dma
);
1301 if (devm_add_action_or_reset(&t
->pdev
.dev
, put_dimms
, t
->dimm_dev
))
1303 for (i
= 0; i
< NUM_DCR
; i
++) {
1304 t
->dimm_dev
[i
] = device_create_with_groups(nfit_test_dimm
,
1305 &t
->pdev
.dev
, 0, NULL
,
1306 nfit_test_dimm_attribute_groups
,
1308 if (!t
->dimm_dev
[i
])
1313 return ars_state_init(&t
->pdev
.dev
, &t
->ars_state
);
1316 static int nfit_test1_alloc(struct nfit_test
*t
)
1318 size_t nfit_size
= sizeof(struct acpi_nfit_system_address
) * 2
1319 + sizeof(struct acpi_nfit_memory_map
) * 2
1320 + offsetof(struct acpi_nfit_control_region
, window_size
) * 2;
1323 t
->nfit_buf
= test_alloc(t
, nfit_size
, &t
->nfit_dma
);
1326 t
->nfit_size
= nfit_size
;
1328 t
->spa_set
[0] = test_alloc(t
, SPA2_SIZE
, &t
->spa_set_dma
[0]);
1332 for (i
= 0; i
< t
->num_dcr
; i
++) {
1333 t
->label
[i
] = test_alloc(t
, LABEL_SIZE
, &t
->label_dma
[i
]);
1336 sprintf(t
->label
[i
], "label%d", i
);
1339 t
->spa_set
[1] = test_alloc(t
, SPA_VCD_SIZE
, &t
->spa_set_dma
[1]);
1344 return ars_state_init(&t
->pdev
.dev
, &t
->ars_state
);
1347 static void dcr_common_init(struct acpi_nfit_control_region
*dcr
)
1349 dcr
->vendor_id
= 0xabcd;
1351 dcr
->revision_id
= 1;
1352 dcr
->valid_fields
= 1;
1353 dcr
->manufacturing_location
= 0xa;
1354 dcr
->manufacturing_date
= cpu_to_be16(2016);
1357 static void nfit_test0_setup(struct nfit_test
*t
)
1359 const int flush_hint_size
= sizeof(struct acpi_nfit_flush_address
)
1360 + (sizeof(u64
) * NUM_HINTS
);
1361 struct acpi_nfit_desc
*acpi_desc
;
1362 struct acpi_nfit_memory_map
*memdev
;
1363 void *nfit_buf
= t
->nfit_buf
;
1364 struct acpi_nfit_system_address
*spa
;
1365 struct acpi_nfit_control_region
*dcr
;
1366 struct acpi_nfit_data_region
*bdw
;
1367 struct acpi_nfit_flush_address
*flush
;
1368 struct acpi_nfit_capabilities
*pcap
;
1369 unsigned int offset
, i
;
1372 * spa0 (interleave first half of dimm0 and dimm1, note storage
1373 * does not actually alias the related block-data-window
1377 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1378 spa
->header
.length
= sizeof(*spa
);
1379 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
1380 spa
->range_index
= 0+1;
1381 spa
->address
= t
->spa_set_dma
[0];
1382 spa
->length
= SPA0_SIZE
;
1385 * spa1 (interleave last half of the 4 DIMMS, note storage
1386 * does not actually alias the related block-data-window
1389 spa
= nfit_buf
+ sizeof(*spa
);
1390 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1391 spa
->header
.length
= sizeof(*spa
);
1392 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
1393 spa
->range_index
= 1+1;
1394 spa
->address
= t
->spa_set_dma
[1];
1395 spa
->length
= SPA1_SIZE
;
1397 /* spa2 (dcr0) dimm0 */
1398 spa
= nfit_buf
+ sizeof(*spa
) * 2;
1399 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1400 spa
->header
.length
= sizeof(*spa
);
1401 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
1402 spa
->range_index
= 2+1;
1403 spa
->address
= t
->dcr_dma
[0];
1404 spa
->length
= DCR_SIZE
;
1406 /* spa3 (dcr1) dimm1 */
1407 spa
= nfit_buf
+ sizeof(*spa
) * 3;
1408 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1409 spa
->header
.length
= sizeof(*spa
);
1410 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
1411 spa
->range_index
= 3+1;
1412 spa
->address
= t
->dcr_dma
[1];
1413 spa
->length
= DCR_SIZE
;
1415 /* spa4 (dcr2) dimm2 */
1416 spa
= nfit_buf
+ sizeof(*spa
) * 4;
1417 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1418 spa
->header
.length
= sizeof(*spa
);
1419 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
1420 spa
->range_index
= 4+1;
1421 spa
->address
= t
->dcr_dma
[2];
1422 spa
->length
= DCR_SIZE
;
1424 /* spa5 (dcr3) dimm3 */
1425 spa
= nfit_buf
+ sizeof(*spa
) * 5;
1426 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1427 spa
->header
.length
= sizeof(*spa
);
1428 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
1429 spa
->range_index
= 5+1;
1430 spa
->address
= t
->dcr_dma
[3];
1431 spa
->length
= DCR_SIZE
;
1433 /* spa6 (bdw for dcr0) dimm0 */
1434 spa
= nfit_buf
+ sizeof(*spa
) * 6;
1435 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1436 spa
->header
.length
= sizeof(*spa
);
1437 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
1438 spa
->range_index
= 6+1;
1439 spa
->address
= t
->dimm_dma
[0];
1440 spa
->length
= DIMM_SIZE
;
1442 /* spa7 (bdw for dcr1) dimm1 */
1443 spa
= nfit_buf
+ sizeof(*spa
) * 7;
1444 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1445 spa
->header
.length
= sizeof(*spa
);
1446 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
1447 spa
->range_index
= 7+1;
1448 spa
->address
= t
->dimm_dma
[1];
1449 spa
->length
= DIMM_SIZE
;
1451 /* spa8 (bdw for dcr2) dimm2 */
1452 spa
= nfit_buf
+ sizeof(*spa
) * 8;
1453 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1454 spa
->header
.length
= sizeof(*spa
);
1455 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
1456 spa
->range_index
= 8+1;
1457 spa
->address
= t
->dimm_dma
[2];
1458 spa
->length
= DIMM_SIZE
;
1460 /* spa9 (bdw for dcr3) dimm3 */
1461 spa
= nfit_buf
+ sizeof(*spa
) * 9;
1462 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1463 spa
->header
.length
= sizeof(*spa
);
1464 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
1465 spa
->range_index
= 9+1;
1466 spa
->address
= t
->dimm_dma
[3];
1467 spa
->length
= DIMM_SIZE
;
1469 offset
= sizeof(*spa
) * 10;
1470 /* mem-region0 (spa0, dimm0) */
1471 memdev
= nfit_buf
+ offset
;
1472 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1473 memdev
->header
.length
= sizeof(*memdev
);
1474 memdev
->device_handle
= handle
[0];
1475 memdev
->physical_id
= 0;
1476 memdev
->region_id
= 0;
1477 memdev
->range_index
= 0+1;
1478 memdev
->region_index
= 4+1;
1479 memdev
->region_size
= SPA0_SIZE
/2;
1480 memdev
->region_offset
= 1;
1481 memdev
->address
= 0;
1482 memdev
->interleave_index
= 0;
1483 memdev
->interleave_ways
= 2;
1485 /* mem-region1 (spa0, dimm1) */
1486 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
);
1487 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1488 memdev
->header
.length
= sizeof(*memdev
);
1489 memdev
->device_handle
= handle
[1];
1490 memdev
->physical_id
= 1;
1491 memdev
->region_id
= 0;
1492 memdev
->range_index
= 0+1;
1493 memdev
->region_index
= 5+1;
1494 memdev
->region_size
= SPA0_SIZE
/2;
1495 memdev
->region_offset
= (1 << 8);
1496 memdev
->address
= 0;
1497 memdev
->interleave_index
= 0;
1498 memdev
->interleave_ways
= 2;
1499 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
1501 /* mem-region2 (spa1, dimm0) */
1502 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 2;
1503 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1504 memdev
->header
.length
= sizeof(*memdev
);
1505 memdev
->device_handle
= handle
[0];
1506 memdev
->physical_id
= 0;
1507 memdev
->region_id
= 1;
1508 memdev
->range_index
= 1+1;
1509 memdev
->region_index
= 4+1;
1510 memdev
->region_size
= SPA1_SIZE
/4;
1511 memdev
->region_offset
= (1 << 16);
1512 memdev
->address
= SPA0_SIZE
/2;
1513 memdev
->interleave_index
= 0;
1514 memdev
->interleave_ways
= 4;
1515 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
1517 /* mem-region3 (spa1, dimm1) */
1518 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 3;
1519 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1520 memdev
->header
.length
= sizeof(*memdev
);
1521 memdev
->device_handle
= handle
[1];
1522 memdev
->physical_id
= 1;
1523 memdev
->region_id
= 1;
1524 memdev
->range_index
= 1+1;
1525 memdev
->region_index
= 5+1;
1526 memdev
->region_size
= SPA1_SIZE
/4;
1527 memdev
->region_offset
= (1 << 24);
1528 memdev
->address
= SPA0_SIZE
/2;
1529 memdev
->interleave_index
= 0;
1530 memdev
->interleave_ways
= 4;
1532 /* mem-region4 (spa1, dimm2) */
1533 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 4;
1534 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1535 memdev
->header
.length
= sizeof(*memdev
);
1536 memdev
->device_handle
= handle
[2];
1537 memdev
->physical_id
= 2;
1538 memdev
->region_id
= 0;
1539 memdev
->range_index
= 1+1;
1540 memdev
->region_index
= 6+1;
1541 memdev
->region_size
= SPA1_SIZE
/4;
1542 memdev
->region_offset
= (1ULL << 32);
1543 memdev
->address
= SPA0_SIZE
/2;
1544 memdev
->interleave_index
= 0;
1545 memdev
->interleave_ways
= 4;
1546 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
1548 /* mem-region5 (spa1, dimm3) */
1549 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 5;
1550 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1551 memdev
->header
.length
= sizeof(*memdev
);
1552 memdev
->device_handle
= handle
[3];
1553 memdev
->physical_id
= 3;
1554 memdev
->region_id
= 0;
1555 memdev
->range_index
= 1+1;
1556 memdev
->region_index
= 7+1;
1557 memdev
->region_size
= SPA1_SIZE
/4;
1558 memdev
->region_offset
= (1ULL << 40);
1559 memdev
->address
= SPA0_SIZE
/2;
1560 memdev
->interleave_index
= 0;
1561 memdev
->interleave_ways
= 4;
1563 /* mem-region6 (spa/dcr0, dimm0) */
1564 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 6;
1565 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1566 memdev
->header
.length
= sizeof(*memdev
);
1567 memdev
->device_handle
= handle
[0];
1568 memdev
->physical_id
= 0;
1569 memdev
->region_id
= 0;
1570 memdev
->range_index
= 2+1;
1571 memdev
->region_index
= 0+1;
1572 memdev
->region_size
= 0;
1573 memdev
->region_offset
= 0;
1574 memdev
->address
= 0;
1575 memdev
->interleave_index
= 0;
1576 memdev
->interleave_ways
= 1;
1578 /* mem-region7 (spa/dcr1, dimm1) */
1579 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 7;
1580 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1581 memdev
->header
.length
= sizeof(*memdev
);
1582 memdev
->device_handle
= handle
[1];
1583 memdev
->physical_id
= 1;
1584 memdev
->region_id
= 0;
1585 memdev
->range_index
= 3+1;
1586 memdev
->region_index
= 1+1;
1587 memdev
->region_size
= 0;
1588 memdev
->region_offset
= 0;
1589 memdev
->address
= 0;
1590 memdev
->interleave_index
= 0;
1591 memdev
->interleave_ways
= 1;
1593 /* mem-region8 (spa/dcr2, dimm2) */
1594 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 8;
1595 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1596 memdev
->header
.length
= sizeof(*memdev
);
1597 memdev
->device_handle
= handle
[2];
1598 memdev
->physical_id
= 2;
1599 memdev
->region_id
= 0;
1600 memdev
->range_index
= 4+1;
1601 memdev
->region_index
= 2+1;
1602 memdev
->region_size
= 0;
1603 memdev
->region_offset
= 0;
1604 memdev
->address
= 0;
1605 memdev
->interleave_index
= 0;
1606 memdev
->interleave_ways
= 1;
1608 /* mem-region9 (spa/dcr3, dimm3) */
1609 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 9;
1610 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1611 memdev
->header
.length
= sizeof(*memdev
);
1612 memdev
->device_handle
= handle
[3];
1613 memdev
->physical_id
= 3;
1614 memdev
->region_id
= 0;
1615 memdev
->range_index
= 5+1;
1616 memdev
->region_index
= 3+1;
1617 memdev
->region_size
= 0;
1618 memdev
->region_offset
= 0;
1619 memdev
->address
= 0;
1620 memdev
->interleave_index
= 0;
1621 memdev
->interleave_ways
= 1;
1623 /* mem-region10 (spa/bdw0, dimm0) */
1624 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 10;
1625 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1626 memdev
->header
.length
= sizeof(*memdev
);
1627 memdev
->device_handle
= handle
[0];
1628 memdev
->physical_id
= 0;
1629 memdev
->region_id
= 0;
1630 memdev
->range_index
= 6+1;
1631 memdev
->region_index
= 0+1;
1632 memdev
->region_size
= 0;
1633 memdev
->region_offset
= 0;
1634 memdev
->address
= 0;
1635 memdev
->interleave_index
= 0;
1636 memdev
->interleave_ways
= 1;
1638 /* mem-region11 (spa/bdw1, dimm1) */
1639 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 11;
1640 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1641 memdev
->header
.length
= sizeof(*memdev
);
1642 memdev
->device_handle
= handle
[1];
1643 memdev
->physical_id
= 1;
1644 memdev
->region_id
= 0;
1645 memdev
->range_index
= 7+1;
1646 memdev
->region_index
= 1+1;
1647 memdev
->region_size
= 0;
1648 memdev
->region_offset
= 0;
1649 memdev
->address
= 0;
1650 memdev
->interleave_index
= 0;
1651 memdev
->interleave_ways
= 1;
1653 /* mem-region12 (spa/bdw2, dimm2) */
1654 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 12;
1655 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1656 memdev
->header
.length
= sizeof(*memdev
);
1657 memdev
->device_handle
= handle
[2];
1658 memdev
->physical_id
= 2;
1659 memdev
->region_id
= 0;
1660 memdev
->range_index
= 8+1;
1661 memdev
->region_index
= 2+1;
1662 memdev
->region_size
= 0;
1663 memdev
->region_offset
= 0;
1664 memdev
->address
= 0;
1665 memdev
->interleave_index
= 0;
1666 memdev
->interleave_ways
= 1;
1668 /* mem-region13 (spa/dcr3, dimm3) */
1669 memdev
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_memory_map
) * 13;
1670 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1671 memdev
->header
.length
= sizeof(*memdev
);
1672 memdev
->device_handle
= handle
[3];
1673 memdev
->physical_id
= 3;
1674 memdev
->region_id
= 0;
1675 memdev
->range_index
= 9+1;
1676 memdev
->region_index
= 3+1;
1677 memdev
->region_size
= 0;
1678 memdev
->region_offset
= 0;
1679 memdev
->address
= 0;
1680 memdev
->interleave_index
= 0;
1681 memdev
->interleave_ways
= 1;
1682 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
1684 offset
= offset
+ sizeof(struct acpi_nfit_memory_map
) * 14;
1685 /* dcr-descriptor0: blk */
1686 dcr
= nfit_buf
+ offset
;
1687 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1688 dcr
->header
.length
= sizeof(struct acpi_nfit_control_region
);
1689 dcr
->region_index
= 0+1;
1690 dcr_common_init(dcr
);
1691 dcr
->serial_number
= ~handle
[0];
1692 dcr
->code
= NFIT_FIC_BLK
;
1694 dcr
->window_size
= DCR_SIZE
;
1695 dcr
->command_offset
= 0;
1696 dcr
->command_size
= 8;
1697 dcr
->status_offset
= 8;
1698 dcr
->status_size
= 4;
1700 /* dcr-descriptor1: blk */
1701 dcr
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_control_region
);
1702 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1703 dcr
->header
.length
= sizeof(struct acpi_nfit_control_region
);
1704 dcr
->region_index
= 1+1;
1705 dcr_common_init(dcr
);
1706 dcr
->serial_number
= ~handle
[1];
1707 dcr
->code
= NFIT_FIC_BLK
;
1709 dcr
->window_size
= DCR_SIZE
;
1710 dcr
->command_offset
= 0;
1711 dcr
->command_size
= 8;
1712 dcr
->status_offset
= 8;
1713 dcr
->status_size
= 4;
1715 /* dcr-descriptor2: blk */
1716 dcr
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_control_region
) * 2;
1717 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1718 dcr
->header
.length
= sizeof(struct acpi_nfit_control_region
);
1719 dcr
->region_index
= 2+1;
1720 dcr_common_init(dcr
);
1721 dcr
->serial_number
= ~handle
[2];
1722 dcr
->code
= NFIT_FIC_BLK
;
1724 dcr
->window_size
= DCR_SIZE
;
1725 dcr
->command_offset
= 0;
1726 dcr
->command_size
= 8;
1727 dcr
->status_offset
= 8;
1728 dcr
->status_size
= 4;
1730 /* dcr-descriptor3: blk */
1731 dcr
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_control_region
) * 3;
1732 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1733 dcr
->header
.length
= sizeof(struct acpi_nfit_control_region
);
1734 dcr
->region_index
= 3+1;
1735 dcr_common_init(dcr
);
1736 dcr
->serial_number
= ~handle
[3];
1737 dcr
->code
= NFIT_FIC_BLK
;
1739 dcr
->window_size
= DCR_SIZE
;
1740 dcr
->command_offset
= 0;
1741 dcr
->command_size
= 8;
1742 dcr
->status_offset
= 8;
1743 dcr
->status_size
= 4;
1745 offset
= offset
+ sizeof(struct acpi_nfit_control_region
) * 4;
1746 /* dcr-descriptor0: pmem */
1747 dcr
= nfit_buf
+ offset
;
1748 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1749 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
1751 dcr
->region_index
= 4+1;
1752 dcr_common_init(dcr
);
1753 dcr
->serial_number
= ~handle
[0];
1754 dcr
->code
= NFIT_FIC_BYTEN
;
1757 /* dcr-descriptor1: pmem */
1758 dcr
= nfit_buf
+ offset
+ offsetof(struct acpi_nfit_control_region
,
1760 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1761 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
1763 dcr
->region_index
= 5+1;
1764 dcr_common_init(dcr
);
1765 dcr
->serial_number
= ~handle
[1];
1766 dcr
->code
= NFIT_FIC_BYTEN
;
1769 /* dcr-descriptor2: pmem */
1770 dcr
= nfit_buf
+ offset
+ offsetof(struct acpi_nfit_control_region
,
1772 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1773 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
1775 dcr
->region_index
= 6+1;
1776 dcr_common_init(dcr
);
1777 dcr
->serial_number
= ~handle
[2];
1778 dcr
->code
= NFIT_FIC_BYTEN
;
1781 /* dcr-descriptor3: pmem */
1782 dcr
= nfit_buf
+ offset
+ offsetof(struct acpi_nfit_control_region
,
1784 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1785 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
1787 dcr
->region_index
= 7+1;
1788 dcr_common_init(dcr
);
1789 dcr
->serial_number
= ~handle
[3];
1790 dcr
->code
= NFIT_FIC_BYTEN
;
1793 offset
= offset
+ offsetof(struct acpi_nfit_control_region
,
1795 /* bdw0 (spa/dcr0, dimm0) */
1796 bdw
= nfit_buf
+ offset
;
1797 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
1798 bdw
->header
.length
= sizeof(struct acpi_nfit_data_region
);
1799 bdw
->region_index
= 0+1;
1802 bdw
->size
= BDW_SIZE
;
1803 bdw
->capacity
= DIMM_SIZE
;
1804 bdw
->start_address
= 0;
1806 /* bdw1 (spa/dcr1, dimm1) */
1807 bdw
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_data_region
);
1808 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
1809 bdw
->header
.length
= sizeof(struct acpi_nfit_data_region
);
1810 bdw
->region_index
= 1+1;
1813 bdw
->size
= BDW_SIZE
;
1814 bdw
->capacity
= DIMM_SIZE
;
1815 bdw
->start_address
= 0;
1817 /* bdw2 (spa/dcr2, dimm2) */
1818 bdw
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_data_region
) * 2;
1819 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
1820 bdw
->header
.length
= sizeof(struct acpi_nfit_data_region
);
1821 bdw
->region_index
= 2+1;
1824 bdw
->size
= BDW_SIZE
;
1825 bdw
->capacity
= DIMM_SIZE
;
1826 bdw
->start_address
= 0;
1828 /* bdw3 (spa/dcr3, dimm3) */
1829 bdw
= nfit_buf
+ offset
+ sizeof(struct acpi_nfit_data_region
) * 3;
1830 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
1831 bdw
->header
.length
= sizeof(struct acpi_nfit_data_region
);
1832 bdw
->region_index
= 3+1;
1835 bdw
->size
= BDW_SIZE
;
1836 bdw
->capacity
= DIMM_SIZE
;
1837 bdw
->start_address
= 0;
1839 offset
= offset
+ sizeof(struct acpi_nfit_data_region
) * 4;
1840 /* flush0 (dimm0) */
1841 flush
= nfit_buf
+ offset
;
1842 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
1843 flush
->header
.length
= flush_hint_size
;
1844 flush
->device_handle
= handle
[0];
1845 flush
->hint_count
= NUM_HINTS
;
1846 for (i
= 0; i
< NUM_HINTS
; i
++)
1847 flush
->hint_address
[i
] = t
->flush_dma
[0] + i
* sizeof(u64
);
1849 /* flush1 (dimm1) */
1850 flush
= nfit_buf
+ offset
+ flush_hint_size
* 1;
1851 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
1852 flush
->header
.length
= flush_hint_size
;
1853 flush
->device_handle
= handle
[1];
1854 flush
->hint_count
= NUM_HINTS
;
1855 for (i
= 0; i
< NUM_HINTS
; i
++)
1856 flush
->hint_address
[i
] = t
->flush_dma
[1] + i
* sizeof(u64
);
1858 /* flush2 (dimm2) */
1859 flush
= nfit_buf
+ offset
+ flush_hint_size
* 2;
1860 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
1861 flush
->header
.length
= flush_hint_size
;
1862 flush
->device_handle
= handle
[2];
1863 flush
->hint_count
= NUM_HINTS
;
1864 for (i
= 0; i
< NUM_HINTS
; i
++)
1865 flush
->hint_address
[i
] = t
->flush_dma
[2] + i
* sizeof(u64
);
1867 /* flush3 (dimm3) */
1868 flush
= nfit_buf
+ offset
+ flush_hint_size
* 3;
1869 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
1870 flush
->header
.length
= flush_hint_size
;
1871 flush
->device_handle
= handle
[3];
1872 flush
->hint_count
= NUM_HINTS
;
1873 for (i
= 0; i
< NUM_HINTS
; i
++)
1874 flush
->hint_address
[i
] = t
->flush_dma
[3] + i
* sizeof(u64
);
1876 /* platform capabilities */
1877 pcap
= nfit_buf
+ offset
+ flush_hint_size
* 4;
1878 pcap
->header
.type
= ACPI_NFIT_TYPE_CAPABILITIES
;
1879 pcap
->header
.length
= sizeof(*pcap
);
1880 pcap
->highest_capability
= 1;
1881 pcap
->capabilities
= ACPI_NFIT_CAPABILITY_CACHE_FLUSH
|
1882 ACPI_NFIT_CAPABILITY_MEM_FLUSH
;
1884 if (t
->setup_hotplug
) {
1885 offset
= offset
+ flush_hint_size
* 4 + sizeof(*pcap
);
1886 /* dcr-descriptor4: blk */
1887 dcr
= nfit_buf
+ offset
;
1888 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1889 dcr
->header
.length
= sizeof(struct acpi_nfit_control_region
);
1890 dcr
->region_index
= 8+1;
1891 dcr_common_init(dcr
);
1892 dcr
->serial_number
= ~handle
[4];
1893 dcr
->code
= NFIT_FIC_BLK
;
1895 dcr
->window_size
= DCR_SIZE
;
1896 dcr
->command_offset
= 0;
1897 dcr
->command_size
= 8;
1898 dcr
->status_offset
= 8;
1899 dcr
->status_size
= 4;
1901 offset
= offset
+ sizeof(struct acpi_nfit_control_region
);
1902 /* dcr-descriptor4: pmem */
1903 dcr
= nfit_buf
+ offset
;
1904 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
1905 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
1907 dcr
->region_index
= 9+1;
1908 dcr_common_init(dcr
);
1909 dcr
->serial_number
= ~handle
[4];
1910 dcr
->code
= NFIT_FIC_BYTEN
;
1913 offset
= offset
+ offsetof(struct acpi_nfit_control_region
,
1915 /* bdw4 (spa/dcr4, dimm4) */
1916 bdw
= nfit_buf
+ offset
;
1917 bdw
->header
.type
= ACPI_NFIT_TYPE_DATA_REGION
;
1918 bdw
->header
.length
= sizeof(struct acpi_nfit_data_region
);
1919 bdw
->region_index
= 8+1;
1922 bdw
->size
= BDW_SIZE
;
1923 bdw
->capacity
= DIMM_SIZE
;
1924 bdw
->start_address
= 0;
1926 offset
= offset
+ sizeof(struct acpi_nfit_data_region
);
1927 /* spa10 (dcr4) dimm4 */
1928 spa
= nfit_buf
+ offset
;
1929 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1930 spa
->header
.length
= sizeof(*spa
);
1931 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_DCR
), 16);
1932 spa
->range_index
= 10+1;
1933 spa
->address
= t
->dcr_dma
[4];
1934 spa
->length
= DCR_SIZE
;
1937 * spa11 (single-dimm interleave for hotplug, note storage
1938 * does not actually alias the related block-data-window
1941 spa
= nfit_buf
+ offset
+ sizeof(*spa
);
1942 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1943 spa
->header
.length
= sizeof(*spa
);
1944 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
1945 spa
->range_index
= 11+1;
1946 spa
->address
= t
->spa_set_dma
[2];
1947 spa
->length
= SPA0_SIZE
;
1949 /* spa12 (bdw for dcr4) dimm4 */
1950 spa
= nfit_buf
+ offset
+ sizeof(*spa
) * 2;
1951 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
1952 spa
->header
.length
= sizeof(*spa
);
1953 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_BDW
), 16);
1954 spa
->range_index
= 12+1;
1955 spa
->address
= t
->dimm_dma
[4];
1956 spa
->length
= DIMM_SIZE
;
1958 offset
= offset
+ sizeof(*spa
) * 3;
1959 /* mem-region14 (spa/dcr4, dimm4) */
1960 memdev
= nfit_buf
+ offset
;
1961 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1962 memdev
->header
.length
= sizeof(*memdev
);
1963 memdev
->device_handle
= handle
[4];
1964 memdev
->physical_id
= 4;
1965 memdev
->region_id
= 0;
1966 memdev
->range_index
= 10+1;
1967 memdev
->region_index
= 8+1;
1968 memdev
->region_size
= 0;
1969 memdev
->region_offset
= 0;
1970 memdev
->address
= 0;
1971 memdev
->interleave_index
= 0;
1972 memdev
->interleave_ways
= 1;
1974 /* mem-region15 (spa0, dimm4) */
1975 memdev
= nfit_buf
+ offset
+
1976 sizeof(struct acpi_nfit_memory_map
);
1977 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1978 memdev
->header
.length
= sizeof(*memdev
);
1979 memdev
->device_handle
= handle
[4];
1980 memdev
->physical_id
= 4;
1981 memdev
->region_id
= 0;
1982 memdev
->range_index
= 11+1;
1983 memdev
->region_index
= 9+1;
1984 memdev
->region_size
= SPA0_SIZE
;
1985 memdev
->region_offset
= (1ULL << 48);
1986 memdev
->address
= 0;
1987 memdev
->interleave_index
= 0;
1988 memdev
->interleave_ways
= 1;
1989 memdev
->flags
= ACPI_NFIT_MEM_HEALTH_ENABLED
;
1991 /* mem-region16 (spa/bdw4, dimm4) */
1992 memdev
= nfit_buf
+ offset
+
1993 sizeof(struct acpi_nfit_memory_map
) * 2;
1994 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
1995 memdev
->header
.length
= sizeof(*memdev
);
1996 memdev
->device_handle
= handle
[4];
1997 memdev
->physical_id
= 4;
1998 memdev
->region_id
= 0;
1999 memdev
->range_index
= 12+1;
2000 memdev
->region_index
= 8+1;
2001 memdev
->region_size
= 0;
2002 memdev
->region_offset
= 0;
2003 memdev
->address
= 0;
2004 memdev
->interleave_index
= 0;
2005 memdev
->interleave_ways
= 1;
2007 offset
= offset
+ sizeof(struct acpi_nfit_memory_map
) * 3;
2008 /* flush3 (dimm4) */
2009 flush
= nfit_buf
+ offset
;
2010 flush
->header
.type
= ACPI_NFIT_TYPE_FLUSH_ADDRESS
;
2011 flush
->header
.length
= flush_hint_size
;
2012 flush
->device_handle
= handle
[4];
2013 flush
->hint_count
= NUM_HINTS
;
2014 for (i
= 0; i
< NUM_HINTS
; i
++)
2015 flush
->hint_address
[i
] = t
->flush_dma
[4]
2019 post_ars_status(&t
->ars_state
, &t
->badrange
, t
->spa_set_dma
[0],
2022 acpi_desc
= &t
->acpi_desc
;
2023 set_bit(ND_CMD_GET_CONFIG_SIZE
, &acpi_desc
->dimm_cmd_force_en
);
2024 set_bit(ND_CMD_GET_CONFIG_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2025 set_bit(ND_CMD_SET_CONFIG_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2026 set_bit(ND_INTEL_SMART
, &acpi_desc
->dimm_cmd_force_en
);
2027 set_bit(ND_INTEL_SMART_THRESHOLD
, &acpi_desc
->dimm_cmd_force_en
);
2028 set_bit(ND_INTEL_SMART_SET_THRESHOLD
, &acpi_desc
->dimm_cmd_force_en
);
2029 set_bit(ND_CMD_ARS_CAP
, &acpi_desc
->bus_cmd_force_en
);
2030 set_bit(ND_CMD_ARS_START
, &acpi_desc
->bus_cmd_force_en
);
2031 set_bit(ND_CMD_ARS_STATUS
, &acpi_desc
->bus_cmd_force_en
);
2032 set_bit(ND_CMD_CLEAR_ERROR
, &acpi_desc
->bus_cmd_force_en
);
2033 set_bit(ND_CMD_CALL
, &acpi_desc
->bus_cmd_force_en
);
2034 set_bit(NFIT_CMD_TRANSLATE_SPA
, &acpi_desc
->bus_nfit_cmd_force_en
);
2035 set_bit(NFIT_CMD_ARS_INJECT_SET
, &acpi_desc
->bus_nfit_cmd_force_en
);
2036 set_bit(NFIT_CMD_ARS_INJECT_CLEAR
, &acpi_desc
->bus_nfit_cmd_force_en
);
2037 set_bit(NFIT_CMD_ARS_INJECT_GET
, &acpi_desc
->bus_nfit_cmd_force_en
);
2038 set_bit(ND_INTEL_FW_GET_INFO
, &acpi_desc
->dimm_cmd_force_en
);
2039 set_bit(ND_INTEL_FW_START_UPDATE
, &acpi_desc
->dimm_cmd_force_en
);
2040 set_bit(ND_INTEL_FW_SEND_DATA
, &acpi_desc
->dimm_cmd_force_en
);
2041 set_bit(ND_INTEL_FW_FINISH_UPDATE
, &acpi_desc
->dimm_cmd_force_en
);
2042 set_bit(ND_INTEL_FW_FINISH_QUERY
, &acpi_desc
->dimm_cmd_force_en
);
2043 set_bit(ND_INTEL_ENABLE_LSS_STATUS
, &acpi_desc
->dimm_cmd_force_en
);
2046 static void nfit_test1_setup(struct nfit_test
*t
)
2049 void *nfit_buf
= t
->nfit_buf
;
2050 struct acpi_nfit_memory_map
*memdev
;
2051 struct acpi_nfit_control_region
*dcr
;
2052 struct acpi_nfit_system_address
*spa
;
2053 struct acpi_nfit_desc
*acpi_desc
;
2056 /* spa0 (flat range with no bdw aliasing) */
2057 spa
= nfit_buf
+ offset
;
2058 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2059 spa
->header
.length
= sizeof(*spa
);
2060 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_PM
), 16);
2061 spa
->range_index
= 0+1;
2062 spa
->address
= t
->spa_set_dma
[0];
2063 spa
->length
= SPA2_SIZE
;
2065 /* virtual cd region */
2066 spa
= nfit_buf
+ sizeof(*spa
);
2067 spa
->header
.type
= ACPI_NFIT_TYPE_SYSTEM_ADDRESS
;
2068 spa
->header
.length
= sizeof(*spa
);
2069 memcpy(spa
->range_guid
, to_nfit_uuid(NFIT_SPA_VCD
), 16);
2070 spa
->range_index
= 0;
2071 spa
->address
= t
->spa_set_dma
[1];
2072 spa
->length
= SPA_VCD_SIZE
;
2074 offset
+= sizeof(*spa
) * 2;
2075 /* mem-region0 (spa0, dimm0) */
2076 memdev
= nfit_buf
+ offset
;
2077 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2078 memdev
->header
.length
= sizeof(*memdev
);
2079 memdev
->device_handle
= handle
[5];
2080 memdev
->physical_id
= 0;
2081 memdev
->region_id
= 0;
2082 memdev
->range_index
= 0+1;
2083 memdev
->region_index
= 0+1;
2084 memdev
->region_size
= SPA2_SIZE
;
2085 memdev
->region_offset
= 0;
2086 memdev
->address
= 0;
2087 memdev
->interleave_index
= 0;
2088 memdev
->interleave_ways
= 1;
2089 memdev
->flags
= ACPI_NFIT_MEM_SAVE_FAILED
| ACPI_NFIT_MEM_RESTORE_FAILED
2090 | ACPI_NFIT_MEM_FLUSH_FAILED
| ACPI_NFIT_MEM_HEALTH_OBSERVED
2091 | ACPI_NFIT_MEM_NOT_ARMED
;
2093 offset
+= sizeof(*memdev
);
2094 /* dcr-descriptor0 */
2095 dcr
= nfit_buf
+ offset
;
2096 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2097 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2099 dcr
->region_index
= 0+1;
2100 dcr_common_init(dcr
);
2101 dcr
->serial_number
= ~handle
[5];
2102 dcr
->code
= NFIT_FIC_BYTE
;
2105 offset
+= dcr
->header
.length
;
2106 memdev
= nfit_buf
+ offset
;
2107 memdev
->header
.type
= ACPI_NFIT_TYPE_MEMORY_MAP
;
2108 memdev
->header
.length
= sizeof(*memdev
);
2109 memdev
->device_handle
= handle
[6];
2110 memdev
->physical_id
= 0;
2111 memdev
->region_id
= 0;
2112 memdev
->range_index
= 0;
2113 memdev
->region_index
= 0+2;
2114 memdev
->region_size
= SPA2_SIZE
;
2115 memdev
->region_offset
= 0;
2116 memdev
->address
= 0;
2117 memdev
->interleave_index
= 0;
2118 memdev
->interleave_ways
= 1;
2119 memdev
->flags
= ACPI_NFIT_MEM_MAP_FAILED
;
2121 /* dcr-descriptor1 */
2122 offset
+= sizeof(*memdev
);
2123 dcr
= nfit_buf
+ offset
;
2124 dcr
->header
.type
= ACPI_NFIT_TYPE_CONTROL_REGION
;
2125 dcr
->header
.length
= offsetof(struct acpi_nfit_control_region
,
2127 dcr
->region_index
= 0+2;
2128 dcr_common_init(dcr
);
2129 dcr
->serial_number
= ~handle
[6];
2130 dcr
->code
= NFIT_FIC_BYTE
;
2133 post_ars_status(&t
->ars_state
, &t
->badrange
, t
->spa_set_dma
[0],
2136 acpi_desc
= &t
->acpi_desc
;
2137 set_bit(ND_CMD_ARS_CAP
, &acpi_desc
->bus_cmd_force_en
);
2138 set_bit(ND_CMD_ARS_START
, &acpi_desc
->bus_cmd_force_en
);
2139 set_bit(ND_CMD_ARS_STATUS
, &acpi_desc
->bus_cmd_force_en
);
2140 set_bit(ND_CMD_CLEAR_ERROR
, &acpi_desc
->bus_cmd_force_en
);
2141 set_bit(ND_INTEL_ENABLE_LSS_STATUS
, &acpi_desc
->dimm_cmd_force_en
);
2144 static int nfit_test_blk_do_io(struct nd_blk_region
*ndbr
, resource_size_t dpa
,
2145 void *iobuf
, u64 len
, int rw
)
2147 struct nfit_blk
*nfit_blk
= ndbr
->blk_provider_data
;
2148 struct nfit_blk_mmio
*mmio
= &nfit_blk
->mmio
[BDW
];
2149 struct nd_region
*nd_region
= &ndbr
->nd_region
;
2152 lane
= nd_region_acquire_lane(nd_region
);
2154 memcpy(mmio
->addr
.base
+ dpa
, iobuf
, len
);
2156 memcpy(iobuf
, mmio
->addr
.base
+ dpa
, len
);
2158 /* give us some some coverage of the arch_invalidate_pmem() API */
2159 arch_invalidate_pmem(mmio
->addr
.base
+ dpa
, len
);
2161 nd_region_release_lane(nd_region
, lane
);
2166 static unsigned long nfit_ctl_handle
;
2168 union acpi_object
*result
;
2170 static union acpi_object
*nfit_test_evaluate_dsm(acpi_handle handle
,
2171 const guid_t
*guid
, u64 rev
, u64 func
, union acpi_object
*argv4
)
2173 if (handle
!= &nfit_ctl_handle
)
2174 return ERR_PTR(-ENXIO
);
2179 static int setup_result(void *buf
, size_t size
)
2181 result
= kmalloc(sizeof(union acpi_object
) + size
, GFP_KERNEL
);
2184 result
->package
.type
= ACPI_TYPE_BUFFER
,
2185 result
->buffer
.pointer
= (void *) (result
+ 1);
2186 result
->buffer
.length
= size
;
2187 memcpy(result
->buffer
.pointer
, buf
, size
);
2188 memset(buf
, 0, size
);
2192 static int nfit_ctl_test(struct device
*dev
)
2195 struct nvdimm
*nvdimm
;
2196 struct acpi_device
*adev
;
2197 struct nfit_mem
*nfit_mem
;
2198 struct nd_ars_record
*record
;
2199 struct acpi_nfit_desc
*acpi_desc
;
2200 const u64 test_val
= 0x0123456789abcdefULL
;
2201 unsigned long mask
, cmd_size
, offset
;
2203 struct nd_cmd_get_config_size cfg_size
;
2204 struct nd_cmd_clear_error clear_err
;
2205 struct nd_cmd_ars_status ars_stat
;
2206 struct nd_cmd_ars_cap ars_cap
;
2207 char buf
[sizeof(struct nd_cmd_ars_status
)
2208 + sizeof(struct nd_ars_record
)];
2211 adev
= devm_kzalloc(dev
, sizeof(*adev
), GFP_KERNEL
);
2214 *adev
= (struct acpi_device
) {
2215 .handle
= &nfit_ctl_handle
,
2217 .init_name
= "test-adev",
2221 acpi_desc
= devm_kzalloc(dev
, sizeof(*acpi_desc
), GFP_KERNEL
);
2224 *acpi_desc
= (struct acpi_nfit_desc
) {
2226 .cmd_mask
= 1UL << ND_CMD_ARS_CAP
2227 | 1UL << ND_CMD_ARS_START
2228 | 1UL << ND_CMD_ARS_STATUS
2229 | 1UL << ND_CMD_CLEAR_ERROR
2230 | 1UL << ND_CMD_CALL
,
2231 .module
= THIS_MODULE
,
2232 .provider_name
= "ACPI.NFIT",
2233 .ndctl
= acpi_nfit_ctl
,
2234 .bus_dsm_mask
= 1UL << NFIT_CMD_TRANSLATE_SPA
2235 | 1UL << NFIT_CMD_ARS_INJECT_SET
2236 | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
2237 | 1UL << NFIT_CMD_ARS_INJECT_GET
,
2242 nfit_mem
= devm_kzalloc(dev
, sizeof(*nfit_mem
), GFP_KERNEL
);
2246 mask
= 1UL << ND_CMD_SMART
| 1UL << ND_CMD_SMART_THRESHOLD
2247 | 1UL << ND_CMD_DIMM_FLAGS
| 1UL << ND_CMD_GET_CONFIG_SIZE
2248 | 1UL << ND_CMD_GET_CONFIG_DATA
| 1UL << ND_CMD_SET_CONFIG_DATA
2249 | 1UL << ND_CMD_VENDOR
;
2250 *nfit_mem
= (struct nfit_mem
) {
2252 .family
= NVDIMM_FAMILY_INTEL
,
2256 nvdimm
= devm_kzalloc(dev
, sizeof(*nvdimm
), GFP_KERNEL
);
2259 *nvdimm
= (struct nvdimm
) {
2260 .provider_data
= nfit_mem
,
2263 .init_name
= "test-dimm",
2268 /* basic checkout of a typical 'get config size' command */
2269 cmd_size
= sizeof(cmds
.cfg_size
);
2270 cmds
.cfg_size
= (struct nd_cmd_get_config_size
) {
2272 .config_size
= SZ_128K
,
2275 rc
= setup_result(cmds
.buf
, cmd_size
);
2278 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, nvdimm
, ND_CMD_GET_CONFIG_SIZE
,
2279 cmds
.buf
, cmd_size
, &cmd_rc
);
2281 if (rc
< 0 || cmd_rc
|| cmds
.cfg_size
.status
!= 0
2282 || cmds
.cfg_size
.config_size
!= SZ_128K
2283 || cmds
.cfg_size
.max_xfer
!= SZ_4K
) {
2284 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2285 __func__
, __LINE__
, rc
, cmd_rc
);
2290 /* test ars_status with zero output */
2291 cmd_size
= offsetof(struct nd_cmd_ars_status
, address
);
2292 cmds
.ars_stat
= (struct nd_cmd_ars_status
) {
2295 rc
= setup_result(cmds
.buf
, cmd_size
);
2298 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_STATUS
,
2299 cmds
.buf
, cmd_size
, &cmd_rc
);
2301 if (rc
< 0 || cmd_rc
) {
2302 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2303 __func__
, __LINE__
, rc
, cmd_rc
);
2308 /* test ars_cap with benign extended status */
2309 cmd_size
= sizeof(cmds
.ars_cap
);
2310 cmds
.ars_cap
= (struct nd_cmd_ars_cap
) {
2311 .status
= ND_ARS_PERSISTENT
<< 16,
2313 offset
= offsetof(struct nd_cmd_ars_cap
, status
);
2314 rc
= setup_result(cmds
.buf
+ offset
, cmd_size
- offset
);
2317 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_CAP
,
2318 cmds
.buf
, cmd_size
, &cmd_rc
);
2320 if (rc
< 0 || cmd_rc
) {
2321 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2322 __func__
, __LINE__
, rc
, cmd_rc
);
2327 /* test ars_status with 'status' trimmed from 'out_length' */
2328 cmd_size
= sizeof(cmds
.ars_stat
) + sizeof(struct nd_ars_record
);
2329 cmds
.ars_stat
= (struct nd_cmd_ars_status
) {
2330 .out_length
= cmd_size
- 4,
2332 record
= &cmds
.ars_stat
.records
[0];
2333 *record
= (struct nd_ars_record
) {
2336 rc
= setup_result(cmds
.buf
, cmd_size
);
2339 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_STATUS
,
2340 cmds
.buf
, cmd_size
, &cmd_rc
);
2342 if (rc
< 0 || cmd_rc
|| record
->length
!= test_val
) {
2343 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2344 __func__
, __LINE__
, rc
, cmd_rc
);
2349 /* test ars_status with 'Output (Size)' including 'status' */
2350 cmd_size
= sizeof(cmds
.ars_stat
) + sizeof(struct nd_ars_record
);
2351 cmds
.ars_stat
= (struct nd_cmd_ars_status
) {
2352 .out_length
= cmd_size
,
2354 record
= &cmds
.ars_stat
.records
[0];
2355 *record
= (struct nd_ars_record
) {
2358 rc
= setup_result(cmds
.buf
, cmd_size
);
2361 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_ARS_STATUS
,
2362 cmds
.buf
, cmd_size
, &cmd_rc
);
2364 if (rc
< 0 || cmd_rc
|| record
->length
!= test_val
) {
2365 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2366 __func__
, __LINE__
, rc
, cmd_rc
);
2371 /* test extended status for get_config_size results in failure */
2372 cmd_size
= sizeof(cmds
.cfg_size
);
2373 cmds
.cfg_size
= (struct nd_cmd_get_config_size
) {
2376 rc
= setup_result(cmds
.buf
, cmd_size
);
2379 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, nvdimm
, ND_CMD_GET_CONFIG_SIZE
,
2380 cmds
.buf
, cmd_size
, &cmd_rc
);
2382 if (rc
< 0 || cmd_rc
>= 0) {
2383 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2384 __func__
, __LINE__
, rc
, cmd_rc
);
2388 /* test clear error */
2389 cmd_size
= sizeof(cmds
.clear_err
);
2390 cmds
.clear_err
= (struct nd_cmd_clear_error
) {
2394 rc
= setup_result(cmds
.buf
, cmd_size
);
2397 rc
= acpi_nfit_ctl(&acpi_desc
->nd_desc
, NULL
, ND_CMD_CLEAR_ERROR
,
2398 cmds
.buf
, cmd_size
, &cmd_rc
);
2399 if (rc
< 0 || cmd_rc
) {
2400 dev_dbg(dev
, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2401 __func__
, __LINE__
, rc
, cmd_rc
);
2408 static int nfit_test_probe(struct platform_device
*pdev
)
2410 struct nvdimm_bus_descriptor
*nd_desc
;
2411 struct acpi_nfit_desc
*acpi_desc
;
2412 struct device
*dev
= &pdev
->dev
;
2413 struct nfit_test
*nfit_test
;
2414 struct nfit_mem
*nfit_mem
;
2415 union acpi_object
*obj
;
2418 if (strcmp(dev_name(&pdev
->dev
), "nfit_test.0") == 0) {
2419 rc
= nfit_ctl_test(&pdev
->dev
);
2424 nfit_test
= to_nfit_test(&pdev
->dev
);
2427 if (nfit_test
->num_dcr
) {
2428 int num
= nfit_test
->num_dcr
;
2430 nfit_test
->dimm
= devm_kcalloc(dev
, num
, sizeof(void *),
2432 nfit_test
->dimm_dma
= devm_kcalloc(dev
, num
, sizeof(dma_addr_t
),
2434 nfit_test
->flush
= devm_kcalloc(dev
, num
, sizeof(void *),
2436 nfit_test
->flush_dma
= devm_kcalloc(dev
, num
, sizeof(dma_addr_t
),
2438 nfit_test
->label
= devm_kcalloc(dev
, num
, sizeof(void *),
2440 nfit_test
->label_dma
= devm_kcalloc(dev
, num
,
2441 sizeof(dma_addr_t
), GFP_KERNEL
);
2442 nfit_test
->dcr
= devm_kcalloc(dev
, num
,
2443 sizeof(struct nfit_test_dcr
*), GFP_KERNEL
);
2444 nfit_test
->dcr_dma
= devm_kcalloc(dev
, num
,
2445 sizeof(dma_addr_t
), GFP_KERNEL
);
2446 nfit_test
->smart
= devm_kcalloc(dev
, num
,
2447 sizeof(struct nd_intel_smart
), GFP_KERNEL
);
2448 nfit_test
->smart_threshold
= devm_kcalloc(dev
, num
,
2449 sizeof(struct nd_intel_smart_threshold
),
2451 nfit_test
->fw
= devm_kcalloc(dev
, num
,
2452 sizeof(struct nfit_test_fw
), GFP_KERNEL
);
2453 if (nfit_test
->dimm
&& nfit_test
->dimm_dma
&& nfit_test
->label
2454 && nfit_test
->label_dma
&& nfit_test
->dcr
2455 && nfit_test
->dcr_dma
&& nfit_test
->flush
2456 && nfit_test
->flush_dma
2463 if (nfit_test
->num_pm
) {
2464 int num
= nfit_test
->num_pm
;
2466 nfit_test
->spa_set
= devm_kcalloc(dev
, num
, sizeof(void *),
2468 nfit_test
->spa_set_dma
= devm_kcalloc(dev
, num
,
2469 sizeof(dma_addr_t
), GFP_KERNEL
);
2470 if (nfit_test
->spa_set
&& nfit_test
->spa_set_dma
)
2476 /* per-nfit specific alloc */
2477 if (nfit_test
->alloc(nfit_test
))
2480 nfit_test
->setup(nfit_test
);
2481 acpi_desc
= &nfit_test
->acpi_desc
;
2482 acpi_nfit_desc_init(acpi_desc
, &pdev
->dev
);
2483 acpi_desc
->blk_do_io
= nfit_test_blk_do_io
;
2484 nd_desc
= &acpi_desc
->nd_desc
;
2485 nd_desc
->provider_name
= NULL
;
2486 nd_desc
->module
= THIS_MODULE
;
2487 nd_desc
->ndctl
= nfit_test_ctl
;
2489 rc
= acpi_nfit_init(acpi_desc
, nfit_test
->nfit_buf
,
2490 nfit_test
->nfit_size
);
2494 rc
= devm_add_action_or_reset(&pdev
->dev
, acpi_nfit_shutdown
, acpi_desc
);
2498 if (nfit_test
->setup
!= nfit_test0_setup
)
2501 nfit_test
->setup_hotplug
= 1;
2502 nfit_test
->setup(nfit_test
);
2504 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
2507 obj
->type
= ACPI_TYPE_BUFFER
;
2508 obj
->buffer
.length
= nfit_test
->nfit_size
;
2509 obj
->buffer
.pointer
= nfit_test
->nfit_buf
;
2510 *(nfit_test
->_fit
) = obj
;
2511 __acpi_nfit_notify(&pdev
->dev
, nfit_test
, 0x80);
2513 /* associate dimm devices with nfit_mem data for notification testing */
2514 mutex_lock(&acpi_desc
->init_mutex
);
2515 list_for_each_entry(nfit_mem
, &acpi_desc
->dimms
, list
) {
2516 u32 nfit_handle
= __to_nfit_memdev(nfit_mem
)->device_handle
;
2519 for (i
= 0; i
< NUM_DCR
; i
++)
2520 if (nfit_handle
== handle
[i
])
2521 dev_set_drvdata(nfit_test
->dimm_dev
[i
],
2524 mutex_unlock(&acpi_desc
->init_mutex
);
2529 static int nfit_test_remove(struct platform_device
*pdev
)
2534 static void nfit_test_release(struct device
*dev
)
2536 struct nfit_test
*nfit_test
= to_nfit_test(dev
);
2541 static const struct platform_device_id nfit_test_id
[] = {
2546 static struct platform_driver nfit_test_driver
= {
2547 .probe
= nfit_test_probe
,
2548 .remove
= nfit_test_remove
,
2550 .name
= KBUILD_MODNAME
,
2552 .id_table
= nfit_test_id
,
2555 static __init
int nfit_test_init(void)
2564 nfit_test_setup(nfit_test_lookup
, nfit_test_evaluate_dsm
);
2566 nfit_wq
= create_singlethread_workqueue("nfit");
2570 nfit_test_dimm
= class_create(THIS_MODULE
, "nfit_test_dimm");
2571 if (IS_ERR(nfit_test_dimm
)) {
2572 rc
= PTR_ERR(nfit_test_dimm
);
2576 for (i
= 0; i
< NUM_NFITS
; i
++) {
2577 struct nfit_test
*nfit_test
;
2578 struct platform_device
*pdev
;
2580 nfit_test
= kzalloc(sizeof(*nfit_test
), GFP_KERNEL
);
2585 INIT_LIST_HEAD(&nfit_test
->resources
);
2586 badrange_init(&nfit_test
->badrange
);
2589 nfit_test
->num_pm
= NUM_PM
;
2590 nfit_test
->dcr_idx
= 0;
2591 nfit_test
->num_dcr
= NUM_DCR
;
2592 nfit_test
->alloc
= nfit_test0_alloc
;
2593 nfit_test
->setup
= nfit_test0_setup
;
2596 nfit_test
->num_pm
= 2;
2597 nfit_test
->dcr_idx
= NUM_DCR
;
2598 nfit_test
->num_dcr
= 2;
2599 nfit_test
->alloc
= nfit_test1_alloc
;
2600 nfit_test
->setup
= nfit_test1_setup
;
2606 pdev
= &nfit_test
->pdev
;
2607 pdev
->name
= KBUILD_MODNAME
;
2609 pdev
->dev
.release
= nfit_test_release
;
2610 rc
= platform_device_register(pdev
);
2612 put_device(&pdev
->dev
);
2615 get_device(&pdev
->dev
);
2617 rc
= dma_coerce_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2621 instances
[i
] = nfit_test
;
2622 INIT_WORK(&nfit_test
->work
, uc_error_notify
);
2625 rc
= platform_driver_register(&nfit_test_driver
);
2631 destroy_workqueue(nfit_wq
);
2632 for (i
= 0; i
< NUM_NFITS
; i
++)
2634 platform_device_unregister(&instances
[i
]->pdev
);
2635 nfit_test_teardown();
2636 for (i
= 0; i
< NUM_NFITS
; i
++)
2638 put_device(&instances
[i
]->pdev
.dev
);
2643 static __exit
void nfit_test_exit(void)
2647 flush_workqueue(nfit_wq
);
2648 destroy_workqueue(nfit_wq
);
2649 for (i
= 0; i
< NUM_NFITS
; i
++)
2650 platform_device_unregister(&instances
[i
]->pdev
);
2651 platform_driver_unregister(&nfit_test_driver
);
2652 nfit_test_teardown();
2654 for (i
= 0; i
< NUM_NFITS
; i
++)
2655 put_device(&instances
[i
]->pdev
.dev
);
2656 class_destroy(nfit_test_dimm
);
2659 module_init(nfit_test_init
);
2660 module_exit(nfit_test_exit
);
2661 MODULE_LICENSE("GPL v2");
2662 MODULE_AUTHOR("Intel Corporation");