1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <linux/ndctl.h>
5 #include <linux/acpi.h>
6 #include <linux/memregion.h>
11 static ssize_t
firmware_activate_noidle_show(struct device
*dev
,
12 struct device_attribute
*attr
, char *buf
)
14 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
15 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
16 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
18 return sprintf(buf
, "%s\n", acpi_desc
->fwa_noidle
? "Y" : "N");
21 static ssize_t
firmware_activate_noidle_store(struct device
*dev
,
22 struct device_attribute
*attr
, const char *buf
, size_t size
)
24 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
25 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
26 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
30 rc
= kstrtobool(buf
, &val
);
33 if (val
!= acpi_desc
->fwa_noidle
)
34 acpi_desc
->fwa_cap
= NVDIMM_FWA_CAP_INVALID
;
35 acpi_desc
->fwa_noidle
= val
;
38 DEVICE_ATTR_RW(firmware_activate_noidle
);
40 bool intel_fwa_supported(struct nvdimm_bus
*nvdimm_bus
)
42 struct nvdimm_bus_descriptor
*nd_desc
= to_nd_desc(nvdimm_bus
);
43 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
46 if (!test_bit(NVDIMM_BUS_FAMILY_INTEL
, &nd_desc
->bus_family_mask
))
49 mask
= &acpi_desc
->family_dsm_mask
[NVDIMM_BUS_FAMILY_INTEL
];
50 return *mask
== NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK
;
53 static unsigned long intel_security_flags(struct nvdimm
*nvdimm
,
54 enum nvdimm_passphrase_type ptype
)
56 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
57 unsigned long security_flags
= 0;
59 struct nd_cmd_pkg pkg
;
60 struct nd_intel_get_security_state cmd
;
63 .nd_command
= NVDIMM_INTEL_GET_SECURITY_STATE
,
64 .nd_family
= NVDIMM_FAMILY_INTEL
,
66 sizeof(struct nd_intel_get_security_state
),
68 sizeof(struct nd_intel_get_security_state
),
73 if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE
, &nfit_mem
->dsm_mask
))
77 * Short circuit the state retrieval while we are doing overwrite.
78 * The DSM spec states that the security state is indeterminate
79 * until the overwrite DSM completes.
81 if (nvdimm_in_overwrite(nvdimm
) && ptype
== NVDIMM_USER
)
82 return BIT(NVDIMM_SECURITY_OVERWRITE
);
84 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
85 if (rc
< 0 || nd_cmd
.cmd
.status
) {
86 pr_err("%s: security state retrieval failed (%d:%#x)\n",
87 nvdimm_name(nvdimm
), rc
, nd_cmd
.cmd
.status
);
91 /* check and see if security is enabled and locked */
92 if (ptype
== NVDIMM_MASTER
) {
93 if (nd_cmd
.cmd
.extended_state
& ND_INTEL_SEC_ESTATE_ENABLED
)
94 set_bit(NVDIMM_SECURITY_UNLOCKED
, &security_flags
);
96 set_bit(NVDIMM_SECURITY_DISABLED
, &security_flags
);
97 if (nd_cmd
.cmd
.extended_state
& ND_INTEL_SEC_ESTATE_PLIMIT
)
98 set_bit(NVDIMM_SECURITY_FROZEN
, &security_flags
);
99 return security_flags
;
102 if (nd_cmd
.cmd
.state
& ND_INTEL_SEC_STATE_UNSUPPORTED
)
105 if (nd_cmd
.cmd
.state
& ND_INTEL_SEC_STATE_ENABLED
) {
106 if (nd_cmd
.cmd
.state
& ND_INTEL_SEC_STATE_FROZEN
||
107 nd_cmd
.cmd
.state
& ND_INTEL_SEC_STATE_PLIMIT
)
108 set_bit(NVDIMM_SECURITY_FROZEN
, &security_flags
);
110 if (nd_cmd
.cmd
.state
& ND_INTEL_SEC_STATE_LOCKED
)
111 set_bit(NVDIMM_SECURITY_LOCKED
, &security_flags
);
113 set_bit(NVDIMM_SECURITY_UNLOCKED
, &security_flags
);
115 set_bit(NVDIMM_SECURITY_DISABLED
, &security_flags
);
117 return security_flags
;
120 static int intel_security_freeze(struct nvdimm
*nvdimm
)
122 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
124 struct nd_cmd_pkg pkg
;
125 struct nd_intel_freeze_lock cmd
;
128 .nd_command
= NVDIMM_INTEL_FREEZE_LOCK
,
129 .nd_family
= NVDIMM_FAMILY_INTEL
,
130 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
131 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
136 if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK
, &nfit_mem
->dsm_mask
))
139 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
142 if (nd_cmd
.cmd
.status
)
147 static int intel_security_change_key(struct nvdimm
*nvdimm
,
148 const struct nvdimm_key_data
*old_data
,
149 const struct nvdimm_key_data
*new_data
,
150 enum nvdimm_passphrase_type ptype
)
152 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
153 unsigned int cmd
= ptype
== NVDIMM_MASTER
?
154 NVDIMM_INTEL_SET_MASTER_PASSPHRASE
:
155 NVDIMM_INTEL_SET_PASSPHRASE
;
157 struct nd_cmd_pkg pkg
;
158 struct nd_intel_set_passphrase cmd
;
161 .nd_family
= NVDIMM_FAMILY_INTEL
,
162 .nd_size_in
= ND_INTEL_PASSPHRASE_SIZE
* 2,
163 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
164 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
170 if (!test_bit(cmd
, &nfit_mem
->dsm_mask
))
173 memcpy(nd_cmd
.cmd
.old_pass
, old_data
->data
,
174 sizeof(nd_cmd
.cmd
.old_pass
));
175 memcpy(nd_cmd
.cmd
.new_pass
, new_data
->data
,
176 sizeof(nd_cmd
.cmd
.new_pass
));
177 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
181 switch (nd_cmd
.cmd
.status
) {
184 case ND_INTEL_STATUS_INVALID_PASS
:
186 case ND_INTEL_STATUS_NOT_SUPPORTED
:
188 case ND_INTEL_STATUS_INVALID_STATE
:
194 static int __maybe_unused
intel_security_unlock(struct nvdimm
*nvdimm
,
195 const struct nvdimm_key_data
*key_data
)
197 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
199 struct nd_cmd_pkg pkg
;
200 struct nd_intel_unlock_unit cmd
;
203 .nd_command
= NVDIMM_INTEL_UNLOCK_UNIT
,
204 .nd_family
= NVDIMM_FAMILY_INTEL
,
205 .nd_size_in
= ND_INTEL_PASSPHRASE_SIZE
,
206 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
207 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
212 if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT
, &nfit_mem
->dsm_mask
))
215 memcpy(nd_cmd
.cmd
.passphrase
, key_data
->data
,
216 sizeof(nd_cmd
.cmd
.passphrase
));
217 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
220 switch (nd_cmd
.cmd
.status
) {
223 case ND_INTEL_STATUS_INVALID_PASS
:
232 static int intel_security_disable(struct nvdimm
*nvdimm
,
233 const struct nvdimm_key_data
*key_data
)
236 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
238 struct nd_cmd_pkg pkg
;
239 struct nd_intel_disable_passphrase cmd
;
242 .nd_command
= NVDIMM_INTEL_DISABLE_PASSPHRASE
,
243 .nd_family
= NVDIMM_FAMILY_INTEL
,
244 .nd_size_in
= ND_INTEL_PASSPHRASE_SIZE
,
245 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
246 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
250 if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE
, &nfit_mem
->dsm_mask
))
253 memcpy(nd_cmd
.cmd
.passphrase
, key_data
->data
,
254 sizeof(nd_cmd
.cmd
.passphrase
));
255 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
259 switch (nd_cmd
.cmd
.status
) {
262 case ND_INTEL_STATUS_INVALID_PASS
:
264 case ND_INTEL_STATUS_INVALID_STATE
:
272 static int __maybe_unused
intel_security_erase(struct nvdimm
*nvdimm
,
273 const struct nvdimm_key_data
*key
,
274 enum nvdimm_passphrase_type ptype
)
277 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
278 unsigned int cmd
= ptype
== NVDIMM_MASTER
?
279 NVDIMM_INTEL_MASTER_SECURE_ERASE
: NVDIMM_INTEL_SECURE_ERASE
;
281 struct nd_cmd_pkg pkg
;
282 struct nd_intel_secure_erase cmd
;
285 .nd_family
= NVDIMM_FAMILY_INTEL
,
286 .nd_size_in
= ND_INTEL_PASSPHRASE_SIZE
,
287 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
288 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
293 if (!test_bit(cmd
, &nfit_mem
->dsm_mask
))
296 memcpy(nd_cmd
.cmd
.passphrase
, key
->data
,
297 sizeof(nd_cmd
.cmd
.passphrase
));
298 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
302 switch (nd_cmd
.cmd
.status
) {
305 case ND_INTEL_STATUS_NOT_SUPPORTED
:
307 case ND_INTEL_STATUS_INVALID_PASS
:
309 case ND_INTEL_STATUS_INVALID_STATE
:
317 static int __maybe_unused
intel_security_query_overwrite(struct nvdimm
*nvdimm
)
320 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
322 struct nd_cmd_pkg pkg
;
323 struct nd_intel_query_overwrite cmd
;
326 .nd_command
= NVDIMM_INTEL_QUERY_OVERWRITE
,
327 .nd_family
= NVDIMM_FAMILY_INTEL
,
328 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
329 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
333 if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE
, &nfit_mem
->dsm_mask
))
336 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
340 switch (nd_cmd
.cmd
.status
) {
343 case ND_INTEL_STATUS_OQUERY_INPROGRESS
:
352 static int __maybe_unused
intel_security_overwrite(struct nvdimm
*nvdimm
,
353 const struct nvdimm_key_data
*nkey
)
356 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
358 struct nd_cmd_pkg pkg
;
359 struct nd_intel_overwrite cmd
;
362 .nd_command
= NVDIMM_INTEL_OVERWRITE
,
363 .nd_family
= NVDIMM_FAMILY_INTEL
,
364 .nd_size_in
= ND_INTEL_PASSPHRASE_SIZE
,
365 .nd_size_out
= ND_INTEL_STATUS_SIZE
,
366 .nd_fw_size
= ND_INTEL_STATUS_SIZE
,
370 if (!test_bit(NVDIMM_INTEL_OVERWRITE
, &nfit_mem
->dsm_mask
))
373 memcpy(nd_cmd
.cmd
.passphrase
, nkey
->data
,
374 sizeof(nd_cmd
.cmd
.passphrase
));
375 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
379 switch (nd_cmd
.cmd
.status
) {
382 case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED
:
384 case ND_INTEL_STATUS_INVALID_PASS
:
386 case ND_INTEL_STATUS_INVALID_STATE
:
392 static const struct nvdimm_security_ops __intel_security_ops
= {
393 .get_flags
= intel_security_flags
,
394 .freeze
= intel_security_freeze
,
395 .change_key
= intel_security_change_key
,
396 .disable
= intel_security_disable
,
398 .unlock
= intel_security_unlock
,
399 .erase
= intel_security_erase
,
400 .overwrite
= intel_security_overwrite
,
401 .query_overwrite
= intel_security_query_overwrite
,
405 const struct nvdimm_security_ops
*intel_security_ops
= &__intel_security_ops
;
407 static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor
*nd_desc
,
408 struct nd_intel_bus_fw_activate_businfo
*info
)
411 struct nd_cmd_pkg pkg
;
412 struct nd_intel_bus_fw_activate_businfo cmd
;
415 .nd_command
= NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO
,
416 .nd_family
= NVDIMM_BUS_FAMILY_INTEL
,
418 sizeof(struct nd_intel_bus_fw_activate_businfo
),
420 sizeof(struct nd_intel_bus_fw_activate_businfo
),
425 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
),
431 /* The fw_ops expect to be called with the nvdimm_bus_lock() held */
432 static enum nvdimm_fwa_state
intel_bus_fwa_state(
433 struct nvdimm_bus_descriptor
*nd_desc
)
435 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
436 struct nd_intel_bus_fw_activate_businfo info
;
437 struct device
*dev
= acpi_desc
->dev
;
438 enum nvdimm_fwa_state state
;
442 * It should not be possible for platform firmware to return
443 * busy because activate is a synchronous operation. Treat it
444 * similar to invalid, i.e. always refresh / poll the status.
446 switch (acpi_desc
->fwa_state
) {
447 case NVDIMM_FWA_INVALID
:
448 case NVDIMM_FWA_BUSY
:
451 /* check if capability needs to be refreshed */
452 if (acpi_desc
->fwa_cap
== NVDIMM_FWA_CAP_INVALID
)
454 return acpi_desc
->fwa_state
;
457 /* Refresh with platform firmware */
458 rc
= intel_bus_fwa_businfo(nd_desc
, &info
);
460 return NVDIMM_FWA_INVALID
;
462 switch (info
.state
) {
463 case ND_INTEL_FWA_IDLE
:
464 state
= NVDIMM_FWA_IDLE
;
466 case ND_INTEL_FWA_BUSY
:
467 state
= NVDIMM_FWA_BUSY
;
469 case ND_INTEL_FWA_ARMED
:
470 if (info
.activate_tmo
> info
.max_quiesce_tmo
)
471 state
= NVDIMM_FWA_ARM_OVERFLOW
;
473 state
= NVDIMM_FWA_ARMED
;
476 dev_err_once(dev
, "invalid firmware activate state %d\n",
478 return NVDIMM_FWA_INVALID
;
482 * Capability data is available in the same payload as state. It
483 * is expected to be static.
485 if (acpi_desc
->fwa_cap
== NVDIMM_FWA_CAP_INVALID
) {
486 if (info
.capability
& ND_INTEL_BUS_FWA_CAP_FWQUIESCE
)
487 acpi_desc
->fwa_cap
= NVDIMM_FWA_CAP_QUIESCE
;
488 else if (info
.capability
& ND_INTEL_BUS_FWA_CAP_OSQUIESCE
) {
490 * Skip hibernate cycle by default if platform
491 * indicates that it does not need devices to be
494 acpi_desc
->fwa_cap
= NVDIMM_FWA_CAP_LIVE
;
496 acpi_desc
->fwa_cap
= NVDIMM_FWA_CAP_NONE
;
499 acpi_desc
->fwa_state
= state
;
504 static enum nvdimm_fwa_capability
intel_bus_fwa_capability(
505 struct nvdimm_bus_descriptor
*nd_desc
)
507 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
509 if (acpi_desc
->fwa_cap
> NVDIMM_FWA_CAP_INVALID
)
510 return acpi_desc
->fwa_cap
;
512 if (intel_bus_fwa_state(nd_desc
) > NVDIMM_FWA_INVALID
)
513 return acpi_desc
->fwa_cap
;
515 return NVDIMM_FWA_CAP_INVALID
;
518 static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor
*nd_desc
)
520 struct acpi_nfit_desc
*acpi_desc
= to_acpi_desc(nd_desc
);
522 struct nd_cmd_pkg pkg
;
523 struct nd_intel_bus_fw_activate cmd
;
526 .nd_command
= NVDIMM_BUS_INTEL_FW_ACTIVATE
,
527 .nd_family
= NVDIMM_BUS_FAMILY_INTEL
,
528 .nd_size_in
= sizeof(nd_cmd
.cmd
.iodev_state
),
530 sizeof(struct nd_intel_bus_fw_activate
),
532 sizeof(struct nd_intel_bus_fw_activate
),
535 * Even though activate is run from a suspended context,
536 * for safety, still ask platform firmware to force
537 * quiesce devices by default. Let a module
538 * parameter override that policy.
541 .iodev_state
= acpi_desc
->fwa_noidle
542 ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
543 : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE
,
548 switch (intel_bus_fwa_state(nd_desc
)) {
549 case NVDIMM_FWA_ARMED
:
550 case NVDIMM_FWA_ARM_OVERFLOW
:
556 rc
= nd_desc
->ndctl(nd_desc
, NULL
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
),
560 * Whether the command succeeded, or failed, the agent checking
561 * for the result needs to query the DIMMs individually.
562 * Increment the activation count to invalidate all the DIMM
563 * states at once (it's otherwise not possible to take
564 * acpi_desc->init_mutex in this context)
566 acpi_desc
->fwa_state
= NVDIMM_FWA_INVALID
;
567 acpi_desc
->fwa_count
++;
569 dev_dbg(acpi_desc
->dev
, "result: %d\n", rc
);
574 static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops
= {
575 .activate_state
= intel_bus_fwa_state
,
576 .capability
= intel_bus_fwa_capability
,
577 .activate
= intel_bus_fwa_activate
,
580 const struct nvdimm_bus_fw_ops
*intel_bus_fw_ops
= &__intel_bus_fw_ops
;
582 static int intel_fwa_dimminfo(struct nvdimm
*nvdimm
,
583 struct nd_intel_fw_activate_dimminfo
*info
)
586 struct nd_cmd_pkg pkg
;
587 struct nd_intel_fw_activate_dimminfo cmd
;
590 .nd_command
= NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO
,
591 .nd_family
= NVDIMM_FAMILY_INTEL
,
593 sizeof(struct nd_intel_fw_activate_dimminfo
),
595 sizeof(struct nd_intel_fw_activate_dimminfo
),
600 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
605 static enum nvdimm_fwa_state
intel_fwa_state(struct nvdimm
*nvdimm
)
607 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
608 struct acpi_nfit_desc
*acpi_desc
= nfit_mem
->acpi_desc
;
609 struct nd_intel_fw_activate_dimminfo info
;
613 * Similar to the bus state, since activate is synchronous the
614 * busy state should resolve within the context of 'activate'.
616 switch (nfit_mem
->fwa_state
) {
617 case NVDIMM_FWA_INVALID
:
618 case NVDIMM_FWA_BUSY
:
621 /* If no activations occurred the old state is still valid */
622 if (nfit_mem
->fwa_count
== acpi_desc
->fwa_count
)
623 return nfit_mem
->fwa_state
;
626 rc
= intel_fwa_dimminfo(nvdimm
, &info
);
628 return NVDIMM_FWA_INVALID
;
630 switch (info
.state
) {
631 case ND_INTEL_FWA_IDLE
:
632 nfit_mem
->fwa_state
= NVDIMM_FWA_IDLE
;
634 case ND_INTEL_FWA_BUSY
:
635 nfit_mem
->fwa_state
= NVDIMM_FWA_BUSY
;
637 case ND_INTEL_FWA_ARMED
:
638 nfit_mem
->fwa_state
= NVDIMM_FWA_ARMED
;
641 nfit_mem
->fwa_state
= NVDIMM_FWA_INVALID
;
645 switch (info
.result
) {
646 case ND_INTEL_DIMM_FWA_NONE
:
647 nfit_mem
->fwa_result
= NVDIMM_FWA_RESULT_NONE
;
649 case ND_INTEL_DIMM_FWA_SUCCESS
:
650 nfit_mem
->fwa_result
= NVDIMM_FWA_RESULT_SUCCESS
;
652 case ND_INTEL_DIMM_FWA_NOTSTAGED
:
653 nfit_mem
->fwa_result
= NVDIMM_FWA_RESULT_NOTSTAGED
;
655 case ND_INTEL_DIMM_FWA_NEEDRESET
:
656 nfit_mem
->fwa_result
= NVDIMM_FWA_RESULT_NEEDRESET
;
658 case ND_INTEL_DIMM_FWA_MEDIAFAILED
:
659 case ND_INTEL_DIMM_FWA_ABORT
:
660 case ND_INTEL_DIMM_FWA_NOTSUPP
:
661 case ND_INTEL_DIMM_FWA_ERROR
:
663 nfit_mem
->fwa_result
= NVDIMM_FWA_RESULT_FAIL
;
667 nfit_mem
->fwa_count
= acpi_desc
->fwa_count
;
669 return nfit_mem
->fwa_state
;
672 static enum nvdimm_fwa_result
intel_fwa_result(struct nvdimm
*nvdimm
)
674 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
675 struct acpi_nfit_desc
*acpi_desc
= nfit_mem
->acpi_desc
;
677 if (nfit_mem
->fwa_count
== acpi_desc
->fwa_count
678 && nfit_mem
->fwa_result
> NVDIMM_FWA_RESULT_INVALID
)
679 return nfit_mem
->fwa_result
;
681 if (intel_fwa_state(nvdimm
) > NVDIMM_FWA_INVALID
)
682 return nfit_mem
->fwa_result
;
684 return NVDIMM_FWA_RESULT_INVALID
;
687 static int intel_fwa_arm(struct nvdimm
*nvdimm
, enum nvdimm_fwa_trigger arm
)
689 struct nfit_mem
*nfit_mem
= nvdimm_provider_data(nvdimm
);
690 struct acpi_nfit_desc
*acpi_desc
= nfit_mem
->acpi_desc
;
692 struct nd_cmd_pkg pkg
;
693 struct nd_intel_fw_activate_arm cmd
;
696 .nd_command
= NVDIMM_INTEL_FW_ACTIVATE_ARM
,
697 .nd_family
= NVDIMM_FAMILY_INTEL
,
698 .nd_size_in
= sizeof(nd_cmd
.cmd
.activate_arm
),
700 sizeof(struct nd_intel_fw_activate_arm
),
702 sizeof(struct nd_intel_fw_activate_arm
),
705 .activate_arm
= arm
== NVDIMM_FWA_ARM
706 ? ND_INTEL_DIMM_FWA_ARM
707 : ND_INTEL_DIMM_FWA_DISARM
,
712 switch (intel_fwa_state(nvdimm
)) {
713 case NVDIMM_FWA_INVALID
:
715 case NVDIMM_FWA_BUSY
:
717 case NVDIMM_FWA_IDLE
:
718 if (arm
== NVDIMM_FWA_DISARM
)
721 case NVDIMM_FWA_ARMED
:
722 if (arm
== NVDIMM_FWA_ARM
)
730 * Invalidate the bus-level state, now that we're committed to
731 * changing the 'arm' state.
733 acpi_desc
->fwa_state
= NVDIMM_FWA_INVALID
;
734 nfit_mem
->fwa_state
= NVDIMM_FWA_INVALID
;
736 rc
= nvdimm_ctl(nvdimm
, ND_CMD_CALL
, &nd_cmd
, sizeof(nd_cmd
), NULL
);
738 dev_dbg(acpi_desc
->dev
, "%s result: %d\n", arm
== NVDIMM_FWA_ARM
739 ? "arm" : "disarm", rc
);
743 static const struct nvdimm_fw_ops __intel_fw_ops
= {
744 .activate_state
= intel_fwa_state
,
745 .activate_result
= intel_fwa_result
,
746 .arm
= intel_fwa_arm
,
749 const struct nvdimm_fw_ops
*intel_fw_ops
= &__intel_fw_ops
;