1 // SPDX-License-Identifier: GPL-2.0
3 * Intel MAX10 Board Management Controller Secure Update Driver
5 * Copyright (C) 2019-2022 Intel Corporation. All rights reserved.
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/firmware.h>
11 #include <linux/mfd/intel-m10-bmc.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
19 struct m10bmc_sec_ops
{
20 int (*rsu_status
)(struct m10bmc_sec
*sec
);
25 struct intel_m10bmc
*m10bmc
;
26 struct fw_upload
*fwl
;
30 const struct m10bmc_sec_ops
*ops
;
33 static DEFINE_XARRAY_ALLOC(fw_upload_xa
);
35 /* Root Entry Hash (REH) support */
36 #define REH_SHA256_SIZE 32
37 #define REH_SHA384_SIZE 48
38 #define REH_MAGIC GENMASK(15, 0)
39 #define REH_SHA_NUM_BYTES GENMASK(31, 16)
41 static int m10bmc_sec_write(struct m10bmc_sec
*sec
, const u8
*buf
, u32 offset
, u32 size
)
43 struct intel_m10bmc
*m10bmc
= sec
->m10bmc
;
44 unsigned int stride
= regmap_get_reg_stride(m10bmc
->regmap
);
45 u32 write_count
= size
/ stride
;
46 u32 leftover_offset
= write_count
* stride
;
47 u32 leftover_size
= size
- leftover_offset
;
51 if (sec
->m10bmc
->flash_bulk_ops
)
52 return sec
->m10bmc
->flash_bulk_ops
->write(m10bmc
, buf
, offset
, size
);
54 if (WARN_ON_ONCE(stride
> sizeof(leftover_tmp
)))
57 ret
= regmap_bulk_write(m10bmc
->regmap
, M10BMC_STAGING_BASE
+ offset
,
58 buf
+ offset
, write_count
);
62 /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
64 memcpy(&leftover_tmp
, buf
+ leftover_offset
, leftover_size
);
65 ret
= regmap_write(m10bmc
->regmap
, M10BMC_STAGING_BASE
+ offset
+ leftover_offset
,
74 static int m10bmc_sec_read(struct m10bmc_sec
*sec
, u8
*buf
, u32 addr
, u32 size
)
76 struct intel_m10bmc
*m10bmc
= sec
->m10bmc
;
77 unsigned int stride
= regmap_get_reg_stride(m10bmc
->regmap
);
78 u32 read_count
= size
/ stride
;
79 u32 leftover_offset
= read_count
* stride
;
80 u32 leftover_size
= size
- leftover_offset
;
84 if (sec
->m10bmc
->flash_bulk_ops
)
85 return sec
->m10bmc
->flash_bulk_ops
->read(m10bmc
, buf
, addr
, size
);
87 if (WARN_ON_ONCE(stride
> sizeof(leftover_tmp
)))
90 ret
= regmap_bulk_read(m10bmc
->regmap
, addr
, buf
, read_count
);
94 /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
96 ret
= regmap_read(m10bmc
->regmap
, addr
+ leftover_offset
, &leftover_tmp
);
99 memcpy(buf
+ leftover_offset
, &leftover_tmp
, leftover_size
);
107 show_root_entry_hash(struct device
*dev
, u32 exp_magic
,
108 u32 prog_addr
, u32 reh_addr
, char *buf
)
110 struct m10bmc_sec
*sec
= dev_get_drvdata(dev
);
111 int sha_num_bytes
, i
, ret
, cnt
= 0;
112 u8 hash
[REH_SHA384_SIZE
];
115 ret
= m10bmc_sec_read(sec
, (u8
*)&magic
, prog_addr
, sizeof(magic
));
119 if (FIELD_GET(REH_MAGIC
, magic
) != exp_magic
)
120 return sysfs_emit(buf
, "hash not programmed\n");
122 sha_num_bytes
= FIELD_GET(REH_SHA_NUM_BYTES
, magic
) / 8;
123 if (sha_num_bytes
!= REH_SHA256_SIZE
&&
124 sha_num_bytes
!= REH_SHA384_SIZE
) {
125 dev_err(sec
->dev
, "%s bad sha num bytes %d\n", __func__
,
130 ret
= m10bmc_sec_read(sec
, hash
, reh_addr
, sha_num_bytes
);
132 dev_err(dev
, "failed to read root entry hash\n");
136 for (i
= 0; i
< sha_num_bytes
; i
++)
137 cnt
+= sprintf(buf
+ cnt
, "%02x", hash
[i
]);
138 cnt
+= sprintf(buf
+ cnt
, "\n");
143 #define DEVICE_ATTR_SEC_REH_RO(_name) \
144 static ssize_t _name##_root_entry_hash_show(struct device *dev, \
145 struct device_attribute *attr, \
148 struct m10bmc_sec *sec = dev_get_drvdata(dev); \
149 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
151 return show_root_entry_hash(dev, csr_map->_name##_magic, \
152 csr_map->_name##_prog_addr, \
153 csr_map->_name##_reh_addr, \
156 static DEVICE_ATTR_RO(_name##_root_entry_hash)
158 DEVICE_ATTR_SEC_REH_RO(bmc
);
159 DEVICE_ATTR_SEC_REH_RO(sr
);
160 DEVICE_ATTR_SEC_REH_RO(pr
);
162 #define CSK_BIT_LEN 128U
163 #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
166 show_canceled_csk(struct device
*dev
, u32 addr
, char *buf
)
168 unsigned int i
, size
= CSK_32ARRAY_SIZE
* sizeof(u32
);
169 struct m10bmc_sec
*sec
= dev_get_drvdata(dev
);
170 DECLARE_BITMAP(csk_map
, CSK_BIT_LEN
);
171 __le32 csk_le32
[CSK_32ARRAY_SIZE
];
172 u32 csk32
[CSK_32ARRAY_SIZE
];
175 ret
= m10bmc_sec_read(sec
, (u8
*)&csk_le32
, addr
, size
);
177 dev_err(sec
->dev
, "failed to read CSK vector\n");
181 for (i
= 0; i
< CSK_32ARRAY_SIZE
; i
++)
182 csk32
[i
] = le32_to_cpu(((csk_le32
[i
])));
184 bitmap_from_arr32(csk_map
, csk32
, CSK_BIT_LEN
);
185 bitmap_complement(csk_map
, csk_map
, CSK_BIT_LEN
);
186 return bitmap_print_to_pagebuf(1, buf
, csk_map
, CSK_BIT_LEN
);
189 #define DEVICE_ATTR_SEC_CSK_RO(_name) \
190 static ssize_t _name##_canceled_csks_show(struct device *dev, \
191 struct device_attribute *attr, \
194 struct m10bmc_sec *sec = dev_get_drvdata(dev); \
195 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
197 return show_canceled_csk(dev, \
198 csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
201 static DEVICE_ATTR_RO(_name##_canceled_csks)
203 #define CSK_VEC_OFFSET 0x34
205 DEVICE_ATTR_SEC_CSK_RO(bmc
);
206 DEVICE_ATTR_SEC_CSK_RO(sr
);
207 DEVICE_ATTR_SEC_CSK_RO(pr
);
209 #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
211 static ssize_t
flash_count_show(struct device
*dev
,
212 struct device_attribute
*attr
, char *buf
)
214 struct m10bmc_sec
*sec
= dev_get_drvdata(dev
);
215 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
216 unsigned int num_bits
;
220 num_bits
= FLASH_COUNT_SIZE
* 8;
222 flash_buf
= kmalloc(FLASH_COUNT_SIZE
, GFP_KERNEL
);
226 ret
= m10bmc_sec_read(sec
, flash_buf
, csr_map
->rsu_update_counter
,
229 dev_err(sec
->dev
, "failed to read flash count\n");
232 cnt
= num_bits
- bitmap_weight((unsigned long *)flash_buf
, num_bits
);
237 return ret
? : sysfs_emit(buf
, "%u\n", cnt
);
239 static DEVICE_ATTR_RO(flash_count
);
241 static struct attribute
*m10bmc_security_attrs
[] = {
242 &dev_attr_flash_count
.attr
,
243 &dev_attr_bmc_root_entry_hash
.attr
,
244 &dev_attr_sr_root_entry_hash
.attr
,
245 &dev_attr_pr_root_entry_hash
.attr
,
246 &dev_attr_sr_canceled_csks
.attr
,
247 &dev_attr_pr_canceled_csks
.attr
,
248 &dev_attr_bmc_canceled_csks
.attr
,
252 static struct attribute_group m10bmc_security_attr_group
= {
254 .attrs
= m10bmc_security_attrs
,
257 static const struct attribute_group
*m10bmc_sec_attr_groups
[] = {
258 &m10bmc_security_attr_group
,
262 static void log_error_regs(struct m10bmc_sec
*sec
, u32 doorbell
)
264 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
267 dev_err(sec
->dev
, "Doorbell: 0x%08x\n", doorbell
);
269 if (!m10bmc_sys_read(sec
->m10bmc
, csr_map
->auth_result
, &auth_result
))
270 dev_err(sec
->dev
, "RSU auth result: 0x%08x\n", auth_result
);
273 static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec
*sec
)
275 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
279 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->doorbell
, &doorbell
);
283 return FIELD_GET(DRBL_RSU_STATUS
, doorbell
);
286 static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec
*sec
)
288 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
292 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->auth_result
, &auth_result
);
296 return FIELD_GET(AUTH_RESULT_RSU_STATUS
, auth_result
);
299 static bool rsu_status_ok(u32 status
)
301 return (status
== RSU_STAT_NORMAL
||
302 status
== RSU_STAT_NIOS_OK
||
303 status
== RSU_STAT_USER_OK
||
304 status
== RSU_STAT_FACTORY_OK
);
307 static bool rsu_progress_done(u32 progress
)
309 return (progress
== RSU_PROG_IDLE
||
310 progress
== RSU_PROG_RSU_DONE
);
313 static bool rsu_progress_busy(u32 progress
)
315 return (progress
== RSU_PROG_AUTHENTICATING
||
316 progress
== RSU_PROG_COPYING
||
317 progress
== RSU_PROG_UPDATE_CANCEL
||
318 progress
== RSU_PROG_PROGRAM_KEY_HASH
);
321 static int m10bmc_sec_progress_status(struct m10bmc_sec
*sec
, u32
*doorbell_reg
,
322 u32
*progress
, u32
*status
)
324 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
327 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->doorbell
, doorbell_reg
);
331 ret
= sec
->ops
->rsu_status(sec
);
336 *progress
= rsu_prog(*doorbell_reg
);
341 static enum fw_upload_err
rsu_check_idle(struct m10bmc_sec
*sec
)
343 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
347 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->doorbell
, &doorbell
);
349 return FW_UPLOAD_ERR_RW_ERROR
;
351 if (!rsu_progress_done(rsu_prog(doorbell
))) {
352 log_error_regs(sec
, doorbell
);
353 return FW_UPLOAD_ERR_BUSY
;
356 return FW_UPLOAD_ERR_NONE
;
359 static inline bool rsu_start_done(u32 doorbell_reg
, u32 progress
, u32 status
)
361 if (doorbell_reg
& DRBL_RSU_REQUEST
)
364 if (status
== RSU_STAT_ERASE_FAIL
|| status
== RSU_STAT_WEAROUT
)
367 if (!rsu_progress_done(progress
))
373 static enum fw_upload_err
rsu_update_init(struct m10bmc_sec
*sec
)
375 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
376 u32 doorbell_reg
, progress
, status
;
379 ret
= m10bmc_sys_update_bits(sec
->m10bmc
, csr_map
->doorbell
,
380 DRBL_RSU_REQUEST
| DRBL_HOST_STATUS
,
382 FIELD_PREP(DRBL_HOST_STATUS
,
385 return FW_UPLOAD_ERR_RW_ERROR
;
387 ret
= read_poll_timeout(m10bmc_sec_progress_status
, err
,
388 err
< 0 || rsu_start_done(doorbell_reg
, progress
, status
),
389 NIOS_HANDSHAKE_INTERVAL_US
,
390 NIOS_HANDSHAKE_TIMEOUT_US
,
392 sec
, &doorbell_reg
, &progress
, &status
);
394 if (ret
== -ETIMEDOUT
) {
395 log_error_regs(sec
, doorbell_reg
);
396 return FW_UPLOAD_ERR_TIMEOUT
;
398 return FW_UPLOAD_ERR_RW_ERROR
;
401 if (status
== RSU_STAT_WEAROUT
) {
402 dev_warn(sec
->dev
, "Excessive flash update count detected\n");
403 return FW_UPLOAD_ERR_WEAROUT
;
404 } else if (status
== RSU_STAT_ERASE_FAIL
) {
405 log_error_regs(sec
, doorbell_reg
);
406 return FW_UPLOAD_ERR_HW_ERROR
;
409 return FW_UPLOAD_ERR_NONE
;
412 static enum fw_upload_err
rsu_prog_ready(struct m10bmc_sec
*sec
)
414 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
415 unsigned long poll_timeout
;
416 u32 doorbell
, progress
;
419 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->doorbell
, &doorbell
);
421 return FW_UPLOAD_ERR_RW_ERROR
;
423 poll_timeout
= jiffies
+ msecs_to_jiffies(RSU_PREP_TIMEOUT_MS
);
424 while (rsu_prog(doorbell
) == RSU_PROG_PREPARE
) {
425 msleep(RSU_PREP_INTERVAL_MS
);
426 if (time_after(jiffies
, poll_timeout
))
429 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->doorbell
, &doorbell
);
431 return FW_UPLOAD_ERR_RW_ERROR
;
434 progress
= rsu_prog(doorbell
);
435 if (progress
== RSU_PROG_PREPARE
) {
436 log_error_regs(sec
, doorbell
);
437 return FW_UPLOAD_ERR_TIMEOUT
;
438 } else if (progress
!= RSU_PROG_READY
) {
439 log_error_regs(sec
, doorbell
);
440 return FW_UPLOAD_ERR_HW_ERROR
;
443 return FW_UPLOAD_ERR_NONE
;
446 static enum fw_upload_err
rsu_send_data(struct m10bmc_sec
*sec
)
448 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
449 u32 doorbell_reg
, status
;
452 ret
= m10bmc_sys_update_bits(sec
->m10bmc
, csr_map
->doorbell
,
454 FIELD_PREP(DRBL_HOST_STATUS
,
455 HOST_STATUS_WRITE_DONE
));
457 return FW_UPLOAD_ERR_RW_ERROR
;
459 ret
= regmap_read_poll_timeout(sec
->m10bmc
->regmap
,
460 csr_map
->base
+ csr_map
->doorbell
,
462 rsu_prog(doorbell_reg
) != RSU_PROG_READY
,
463 NIOS_HANDSHAKE_INTERVAL_US
,
464 NIOS_HANDSHAKE_TIMEOUT_US
);
466 if (ret
== -ETIMEDOUT
) {
467 log_error_regs(sec
, doorbell_reg
);
468 return FW_UPLOAD_ERR_TIMEOUT
;
470 return FW_UPLOAD_ERR_RW_ERROR
;
473 ret
= sec
->ops
->rsu_status(sec
);
475 return FW_UPLOAD_ERR_HW_ERROR
;
478 if (!rsu_status_ok(status
)) {
479 log_error_regs(sec
, doorbell_reg
);
480 return FW_UPLOAD_ERR_HW_ERROR
;
483 return FW_UPLOAD_ERR_NONE
;
486 static int rsu_check_complete(struct m10bmc_sec
*sec
, u32
*doorbell_reg
)
488 u32 progress
, status
;
490 if (m10bmc_sec_progress_status(sec
, doorbell_reg
, &progress
, &status
))
493 if (!rsu_status_ok(status
))
496 if (rsu_progress_done(progress
))
499 if (rsu_progress_busy(progress
))
505 static enum fw_upload_err
rsu_cancel(struct m10bmc_sec
*sec
)
507 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
511 ret
= m10bmc_sys_read(sec
->m10bmc
, csr_map
->doorbell
, &doorbell
);
513 return FW_UPLOAD_ERR_RW_ERROR
;
515 if (rsu_prog(doorbell
) != RSU_PROG_READY
)
516 return FW_UPLOAD_ERR_BUSY
;
518 ret
= m10bmc_sys_update_bits(sec
->m10bmc
, csr_map
->doorbell
,
520 FIELD_PREP(DRBL_HOST_STATUS
,
521 HOST_STATUS_ABORT_RSU
));
523 return FW_UPLOAD_ERR_RW_ERROR
;
525 return FW_UPLOAD_ERR_CANCELED
;
528 static enum fw_upload_err
m10bmc_sec_prepare(struct fw_upload
*fwl
,
529 const u8
*data
, u32 size
)
531 struct m10bmc_sec
*sec
= fwl
->dd_handle
;
532 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
535 sec
->cancel_request
= false;
537 if (!size
|| size
> csr_map
->staging_size
)
538 return FW_UPLOAD_ERR_INVALID_SIZE
;
540 if (sec
->m10bmc
->flash_bulk_ops
)
541 if (sec
->m10bmc
->flash_bulk_ops
->lock_write(sec
->m10bmc
))
542 return FW_UPLOAD_ERR_BUSY
;
544 ret
= rsu_check_idle(sec
);
545 if (ret
!= FW_UPLOAD_ERR_NONE
)
548 m10bmc_fw_state_set(sec
->m10bmc
, M10BMC_FW_STATE_SEC_UPDATE_PREPARE
);
550 ret
= rsu_update_init(sec
);
551 if (ret
!= FW_UPLOAD_ERR_NONE
)
554 ret
= rsu_prog_ready(sec
);
555 if (ret
!= FW_UPLOAD_ERR_NONE
)
558 if (sec
->cancel_request
) {
559 ret
= rsu_cancel(sec
);
563 m10bmc_fw_state_set(sec
->m10bmc
, M10BMC_FW_STATE_SEC_UPDATE_WRITE
);
565 return FW_UPLOAD_ERR_NONE
;
568 m10bmc_fw_state_set(sec
->m10bmc
, M10BMC_FW_STATE_NORMAL
);
571 if (sec
->m10bmc
->flash_bulk_ops
)
572 sec
->m10bmc
->flash_bulk_ops
->unlock_write(sec
->m10bmc
);
576 #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
578 static enum fw_upload_err
m10bmc_sec_fw_write(struct fw_upload
*fwl
, const u8
*data
,
579 u32 offset
, u32 size
, u32
*written
)
581 struct m10bmc_sec
*sec
= fwl
->dd_handle
;
582 const struct m10bmc_csr_map
*csr_map
= sec
->m10bmc
->info
->csr_map
;
583 struct intel_m10bmc
*m10bmc
= sec
->m10bmc
;
584 u32 blk_size
, doorbell
;
587 if (sec
->cancel_request
)
588 return rsu_cancel(sec
);
590 ret
= m10bmc_sys_read(m10bmc
, csr_map
->doorbell
, &doorbell
);
592 return FW_UPLOAD_ERR_RW_ERROR
;
593 } else if (rsu_prog(doorbell
) != RSU_PROG_READY
) {
594 log_error_regs(sec
, doorbell
);
595 return FW_UPLOAD_ERR_HW_ERROR
;
598 WARN_ON_ONCE(WRITE_BLOCK_SIZE
% regmap_get_reg_stride(m10bmc
->regmap
));
599 blk_size
= min_t(u32
, WRITE_BLOCK_SIZE
, size
);
600 ret
= m10bmc_sec_write(sec
, data
, offset
, blk_size
);
602 return FW_UPLOAD_ERR_RW_ERROR
;
605 return FW_UPLOAD_ERR_NONE
;
608 static enum fw_upload_err
m10bmc_sec_poll_complete(struct fw_upload
*fwl
)
610 struct m10bmc_sec
*sec
= fwl
->dd_handle
;
611 unsigned long poll_timeout
;
612 u32 doorbell
, result
;
615 if (sec
->cancel_request
)
616 return rsu_cancel(sec
);
618 m10bmc_fw_state_set(sec
->m10bmc
, M10BMC_FW_STATE_SEC_UPDATE_PROGRAM
);
620 result
= rsu_send_data(sec
);
621 if (result
!= FW_UPLOAD_ERR_NONE
)
624 poll_timeout
= jiffies
+ msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS
);
626 msleep(RSU_COMPLETE_INTERVAL_MS
);
627 ret
= rsu_check_complete(sec
, &doorbell
);
628 } while (ret
== -EAGAIN
&& !time_after(jiffies
, poll_timeout
));
630 if (ret
== -EAGAIN
) {
631 log_error_regs(sec
, doorbell
);
632 return FW_UPLOAD_ERR_TIMEOUT
;
633 } else if (ret
== -EIO
) {
634 return FW_UPLOAD_ERR_RW_ERROR
;
636 log_error_regs(sec
, doorbell
);
637 return FW_UPLOAD_ERR_HW_ERROR
;
640 return FW_UPLOAD_ERR_NONE
;
644 * m10bmc_sec_cancel() may be called asynchronously with an on-going update.
645 * All other functions are called sequentially in a single thread. To avoid
646 * contention on register accesses, m10bmc_sec_cancel() must only update
647 * the cancel_request flag. Other functions will check this flag and handle
648 * the cancel request synchronously.
650 static void m10bmc_sec_cancel(struct fw_upload
*fwl
)
652 struct m10bmc_sec
*sec
= fwl
->dd_handle
;
654 sec
->cancel_request
= true;
657 static void m10bmc_sec_cleanup(struct fw_upload
*fwl
)
659 struct m10bmc_sec
*sec
= fwl
->dd_handle
;
661 (void)rsu_cancel(sec
);
663 m10bmc_fw_state_set(sec
->m10bmc
, M10BMC_FW_STATE_NORMAL
);
665 if (sec
->m10bmc
->flash_bulk_ops
)
666 sec
->m10bmc
->flash_bulk_ops
->unlock_write(sec
->m10bmc
);
669 static const struct fw_upload_ops m10bmc_ops
= {
670 .prepare
= m10bmc_sec_prepare
,
671 .write
= m10bmc_sec_fw_write
,
672 .poll_complete
= m10bmc_sec_poll_complete
,
673 .cancel
= m10bmc_sec_cancel
,
674 .cleanup
= m10bmc_sec_cleanup
,
677 static const struct m10bmc_sec_ops m10sec_n3000_ops
= {
678 .rsu_status
= m10bmc_sec_n3000_rsu_status
,
681 static const struct m10bmc_sec_ops m10sec_n6000_ops
= {
682 .rsu_status
= m10bmc_sec_n6000_rsu_status
,
685 #define SEC_UPDATE_LEN_MAX 32
686 static int m10bmc_sec_probe(struct platform_device
*pdev
)
688 char buf
[SEC_UPDATE_LEN_MAX
];
689 struct m10bmc_sec
*sec
;
690 struct fw_upload
*fwl
;
694 sec
= devm_kzalloc(&pdev
->dev
, sizeof(*sec
), GFP_KERNEL
);
698 sec
->dev
= &pdev
->dev
;
699 sec
->m10bmc
= dev_get_drvdata(pdev
->dev
.parent
);
700 sec
->ops
= (struct m10bmc_sec_ops
*)platform_get_device_id(pdev
)->driver_data
;
701 dev_set_drvdata(&pdev
->dev
, sec
);
703 ret
= xa_alloc(&fw_upload_xa
, &sec
->fw_name_id
, sec
,
704 xa_limit_32b
, GFP_KERNEL
);
708 len
= scnprintf(buf
, SEC_UPDATE_LEN_MAX
, "secure-update%d",
710 sec
->fw_name
= kmemdup_nul(buf
, len
, GFP_KERNEL
);
716 fwl
= firmware_upload_register(THIS_MODULE
, sec
->dev
, sec
->fw_name
,
719 dev_err(sec
->dev
, "Firmware Upload driver failed to start\n");
721 goto fw_uploader_fail
;
730 xa_erase(&fw_upload_xa
, sec
->fw_name_id
);
734 static void m10bmc_sec_remove(struct platform_device
*pdev
)
736 struct m10bmc_sec
*sec
= dev_get_drvdata(&pdev
->dev
);
738 firmware_upload_unregister(sec
->fwl
);
740 xa_erase(&fw_upload_xa
, sec
->fw_name_id
);
743 static const struct platform_device_id intel_m10bmc_sec_ids
[] = {
745 .name
= "n3000bmc-sec-update",
746 .driver_data
= (kernel_ulong_t
)&m10sec_n3000_ops
,
749 .name
= "d5005bmc-sec-update",
750 .driver_data
= (kernel_ulong_t
)&m10sec_n3000_ops
,
753 .name
= "n6000bmc-sec-update",
754 .driver_data
= (kernel_ulong_t
)&m10sec_n6000_ops
,
758 MODULE_DEVICE_TABLE(platform
, intel_m10bmc_sec_ids
);
760 static struct platform_driver intel_m10bmc_sec_driver
= {
761 .probe
= m10bmc_sec_probe
,
762 .remove
= m10bmc_sec_remove
,
764 .name
= "intel-m10bmc-sec-update",
765 .dev_groups
= m10bmc_sec_attr_groups
,
767 .id_table
= intel_m10bmc_sec_ids
,
769 module_platform_driver(intel_m10bmc_sec_driver
);
771 MODULE_AUTHOR("Intel Corporation");
772 MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
773 MODULE_LICENSE("GPL");
774 MODULE_IMPORT_NS("INTEL_M10_BMC_CORE");