1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013--2024 Intel Corporation
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/iopoll.h>
15 #include <linux/math64.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/pfn.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/time64.h>
28 #include "ipu6-buttress.h"
29 #include "ipu6-platform-buttress-regs.h"
31 #define BOOTLOADER_STATUS_OFFSET 0x15c
33 #define BOOTLOADER_MAGIC_KEY 0xb00710ad
35 #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
36 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
37 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
39 #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10
41 #define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC)
43 #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC)
44 #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC)
45 #define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC)
47 #define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC
48 #define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC
49 #define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC)
50 #define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC)
52 #define BUTTRESS_IPC_RESET_RETRY 2000
53 #define BUTTRESS_CSE_IPC_RESET_RETRY 4
54 #define BUTTRESS_IPC_CMD_SEND_RETRY 1
56 #define BUTTRESS_MAX_CONSECUTIVE_IRQS 100
58 static const u32 ipu6_adev_irq_mask
[2] = {
63 int ipu6_buttress_ipc_reset(struct ipu6_device
*isp
,
64 struct ipu6_buttress_ipc
*ipc
)
66 unsigned int retries
= BUTTRESS_IPC_RESET_RETRY
;
67 struct ipu6_buttress
*b
= &isp
->buttress
;
68 u32 val
= 0, csr_in_clr
;
70 if (!isp
->secure_mode
) {
71 dev_dbg(&isp
->pdev
->dev
, "Skip IPC reset for non-secure mode");
75 mutex_lock(&b
->ipc_mutex
);
77 /* Clear-by-1 CSR (all bits), corresponding internal states. */
78 val
= readl(isp
->base
+ ipc
->csr_in
);
79 writel(val
, isp
->base
+ ipc
->csr_in
);
81 /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
82 writel(ENTRY
, isp
->base
+ ipc
->csr_out
);
84 * Clear-by-1 all CSR bits EXCEPT following
86 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
87 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
88 * C. Possibly custom bits, depending on
91 csr_in_clr
= BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ
|
92 BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID
|
93 BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ
| QUERY
;
96 usleep_range(400, 500);
97 val
= readl(isp
->base
+ ipc
->csr_in
);
100 case ENTRY
| EXIT
| QUERY
:
102 * 1) Clear-by-1 CSR bits
103 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
104 * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
105 * 2) Set peer CSR bit
106 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
108 writel(ENTRY
| EXIT
, isp
->base
+ ipc
->csr_in
);
109 writel(QUERY
, isp
->base
+ ipc
->csr_out
);
114 * 1) Clear-by-1 CSR bits
115 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
116 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
117 * 2) Set peer CSR bit
118 * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
120 writel(ENTRY
| QUERY
, isp
->base
+ ipc
->csr_in
);
121 writel(ENTRY
, isp
->base
+ ipc
->csr_out
);
127 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
128 * 1) Clear incoming doorbell.
129 * 2) Clear-by-1 all CSR bits EXCEPT following
131 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
132 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
133 * C. Possibly custom bits, depending on
135 * 3) Set peer CSR bit
136 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
138 writel(EXIT
, isp
->base
+ ipc
->csr_in
);
139 writel(0, isp
->base
+ ipc
->db0_in
);
140 writel(csr_in_clr
, isp
->base
+ ipc
->csr_in
);
141 writel(EXIT
, isp
->base
+ ipc
->csr_out
);
144 * Read csr_in again to make sure if RST_PHASE2 is done.
145 * If csr_in is QUERY, it should be handled again.
147 usleep_range(200, 300);
148 val
= readl(isp
->base
+ ipc
->csr_in
);
150 dev_dbg(&isp
->pdev
->dev
,
151 "RST_PHASE2 retry csr_in = %x\n", val
);
154 mutex_unlock(&b
->ipc_mutex
);
158 * 1) Clear-by-1 CSR bit
159 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
160 * 2) Set peer CSR bit
161 * IPC_PEER_COMP_ACTIONS_RST_PHASE1
163 writel(QUERY
, isp
->base
+ ipc
->csr_in
);
164 writel(ENTRY
, isp
->base
+ ipc
->csr_out
);
167 dev_dbg_ratelimited(&isp
->pdev
->dev
,
168 "Unexpected CSR 0x%x\n", val
);
173 mutex_unlock(&b
->ipc_mutex
);
174 dev_err(&isp
->pdev
->dev
, "Timed out while waiting for CSE\n");
179 static void ipu6_buttress_ipc_validity_close(struct ipu6_device
*isp
,
180 struct ipu6_buttress_ipc
*ipc
)
182 writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ
,
183 isp
->base
+ ipc
->csr_out
);
187 ipu6_buttress_ipc_validity_open(struct ipu6_device
*isp
,
188 struct ipu6_buttress_ipc
*ipc
)
190 unsigned int mask
= BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID
;
195 writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ
,
196 isp
->base
+ ipc
->csr_out
);
198 addr
= isp
->base
+ ipc
->csr_in
;
199 ret
= readl_poll_timeout(addr
, val
, val
& mask
, 200,
200 BUTTRESS_IPC_VALIDITY_TIMEOUT_US
);
202 dev_err(&isp
->pdev
->dev
, "CSE validity timeout 0x%x\n", val
);
203 ipu6_buttress_ipc_validity_close(isp
, ipc
);
209 static void ipu6_buttress_ipc_recv(struct ipu6_device
*isp
,
210 struct ipu6_buttress_ipc
*ipc
, u32
*ipc_msg
)
213 *ipc_msg
= readl(isp
->base
+ ipc
->data0_in
);
214 writel(0, isp
->base
+ ipc
->db0_in
);
217 static int ipu6_buttress_ipc_send_bulk(struct ipu6_device
*isp
,
218 struct ipu6_ipc_buttress_bulk_msg
*msgs
,
221 unsigned long tx_timeout_jiffies
, rx_timeout_jiffies
;
222 unsigned int i
, retry
= BUTTRESS_IPC_CMD_SEND_RETRY
;
223 struct ipu6_buttress
*b
= &isp
->buttress
;
224 struct ipu6_buttress_ipc
*ipc
= &b
->cse
;
229 mutex_lock(&b
->ipc_mutex
);
231 ret
= ipu6_buttress_ipc_validity_open(isp
, ipc
);
233 dev_err(&isp
->pdev
->dev
, "IPC validity open failed\n");
237 tx_timeout_jiffies
= msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS
);
238 rx_timeout_jiffies
= msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS
);
240 for (i
= 0; i
< size
; i
++) {
241 reinit_completion(&ipc
->send_complete
);
242 if (msgs
[i
].require_resp
)
243 reinit_completion(&ipc
->recv_complete
);
245 dev_dbg(&isp
->pdev
->dev
, "bulk IPC command: 0x%x\n",
247 writel(msgs
[i
].cmd
, isp
->base
+ ipc
->data0_out
);
248 val
= BUTTRESS_IU2CSEDB0_BUSY
| msgs
[i
].cmd_size
;
249 writel(val
, isp
->base
+ ipc
->db0_out
);
251 tout
= wait_for_completion_timeout(&ipc
->send_complete
,
254 dev_err(&isp
->pdev
->dev
, "send IPC response timeout\n");
260 /* Try again if CSE is not responding on first try */
261 writel(0, isp
->base
+ ipc
->db0_out
);
266 retry
= BUTTRESS_IPC_CMD_SEND_RETRY
;
268 if (!msgs
[i
].require_resp
)
271 tout
= wait_for_completion_timeout(&ipc
->recv_complete
,
274 dev_err(&isp
->pdev
->dev
, "recv IPC response timeout\n");
279 if (ipc
->nack_mask
&&
280 (ipc
->recv_data
& ipc
->nack_mask
) == ipc
->nack
) {
281 dev_err(&isp
->pdev
->dev
,
282 "IPC NACK for cmd 0x%x\n", msgs
[i
].cmd
);
287 if (ipc
->recv_data
!= msgs
[i
].expected_resp
) {
288 dev_err(&isp
->pdev
->dev
,
289 "expected resp: 0x%x, IPC response: 0x%x ",
290 msgs
[i
].expected_resp
, ipc
->recv_data
);
296 dev_dbg(&isp
->pdev
->dev
, "bulk IPC commands done\n");
299 ipu6_buttress_ipc_validity_close(isp
, ipc
);
300 mutex_unlock(&b
->ipc_mutex
);
305 ipu6_buttress_ipc_send(struct ipu6_device
*isp
,
306 u32 ipc_msg
, u32 size
, bool require_resp
,
309 struct ipu6_ipc_buttress_bulk_msg msg
= {
312 .require_resp
= require_resp
,
313 .expected_resp
= expected_resp
,
316 return ipu6_buttress_ipc_send_bulk(isp
, &msg
, 1);
319 static irqreturn_t
ipu6_buttress_call_isr(struct ipu6_bus_device
*adev
)
321 irqreturn_t ret
= IRQ_WAKE_THREAD
;
323 if (!adev
|| !adev
->auxdrv
|| !adev
->auxdrv_data
)
326 if (adev
->auxdrv_data
->isr
)
327 ret
= adev
->auxdrv_data
->isr(adev
);
329 if (ret
== IRQ_WAKE_THREAD
&& !adev
->auxdrv_data
->isr_threaded
)
335 irqreturn_t
ipu6_buttress_isr(int irq
, void *isp_ptr
)
337 struct ipu6_device
*isp
= isp_ptr
;
338 struct ipu6_bus_device
*adev
[] = { isp
->isys
, isp
->psys
};
339 struct ipu6_buttress
*b
= &isp
->buttress
;
340 u32 reg_irq_sts
= BUTTRESS_REG_ISR_STATUS
;
341 irqreturn_t ret
= IRQ_NONE
;
342 u32 disable_irqs
= 0;
347 active
= pm_runtime_get_if_active(&isp
->pdev
->dev
);
351 irq_status
= readl(isp
->base
+ reg_irq_sts
);
352 if (irq_status
== 0 || WARN_ON_ONCE(irq_status
== 0xffffffffu
)) {
354 pm_runtime_put_noidle(&isp
->pdev
->dev
);
359 writel(irq_status
, isp
->base
+ BUTTRESS_REG_ISR_CLEAR
);
361 for (i
= 0; i
< ARRAY_SIZE(ipu6_adev_irq_mask
); i
++) {
362 irqreturn_t r
= ipu6_buttress_call_isr(adev
[i
]);
364 if (!(irq_status
& ipu6_adev_irq_mask
[i
]))
367 if (r
== IRQ_WAKE_THREAD
) {
368 ret
= IRQ_WAKE_THREAD
;
369 disable_irqs
|= ipu6_adev_irq_mask
[i
];
370 } else if (ret
== IRQ_NONE
&& r
== IRQ_HANDLED
) {
375 if ((irq_status
& BUTTRESS_EVENT
) && ret
== IRQ_NONE
)
378 if (irq_status
& BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING
) {
379 dev_dbg(&isp
->pdev
->dev
,
380 "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
381 ipu6_buttress_ipc_recv(isp
, &b
->cse
, &b
->cse
.recv_data
);
382 complete(&b
->cse
.recv_complete
);
385 if (irq_status
& BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE
) {
386 dev_dbg(&isp
->pdev
->dev
,
387 "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
388 complete(&b
->cse
.send_complete
);
391 if (irq_status
& BUTTRESS_ISR_SAI_VIOLATION
&&
392 ipu6_buttress_get_secure_mode(isp
))
393 dev_err(&isp
->pdev
->dev
,
394 "BUTTRESS_ISR_SAI_VIOLATION\n");
396 if (irq_status
& (BUTTRESS_ISR_IS_FATAL_MEM_ERR
|
397 BUTTRESS_ISR_PS_FATAL_MEM_ERR
))
398 dev_err(&isp
->pdev
->dev
,
399 "BUTTRESS_ISR_FATAL_MEM_ERR\n");
401 if (irq_status
& BUTTRESS_ISR_UFI_ERROR
)
402 dev_err(&isp
->pdev
->dev
, "BUTTRESS_ISR_UFI_ERROR\n");
404 if (++count
== BUTTRESS_MAX_CONSECUTIVE_IRQS
) {
405 dev_err(&isp
->pdev
->dev
, "too many consecutive IRQs\n");
410 irq_status
= readl(isp
->base
+ reg_irq_sts
);
411 } while (irq_status
);
414 writel(BUTTRESS_IRQS
& ~disable_irqs
,
415 isp
->base
+ BUTTRESS_REG_ISR_ENABLE
);
418 pm_runtime_put(&isp
->pdev
->dev
);
423 irqreturn_t
ipu6_buttress_isr_threaded(int irq
, void *isp_ptr
)
425 struct ipu6_device
*isp
= isp_ptr
;
426 struct ipu6_bus_device
*adev
[] = { isp
->isys
, isp
->psys
};
427 const struct ipu6_auxdrv_data
*drv_data
= NULL
;
428 irqreturn_t ret
= IRQ_NONE
;
431 for (i
= 0; i
< ARRAY_SIZE(ipu6_adev_irq_mask
) && adev
[i
]; i
++) {
432 drv_data
= adev
[i
]->auxdrv_data
;
436 if (drv_data
->wake_isr_thread
&&
437 drv_data
->isr_threaded(adev
[i
]) == IRQ_HANDLED
)
441 writel(BUTTRESS_IRQS
, isp
->base
+ BUTTRESS_REG_ISR_ENABLE
);
446 int ipu6_buttress_power(struct device
*dev
, struct ipu6_buttress_ctrl
*ctrl
,
449 struct ipu6_device
*isp
= to_ipu6_bus_device(dev
)->isp
;
456 mutex_lock(&isp
->buttress
.power_mutex
);
460 pwr_sts
= ctrl
->pwr_sts_off
<< ctrl
->pwr_sts_shift
;
462 val
= BUTTRESS_FREQ_CTL_START
|
463 FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK
,
465 FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK
,
467 BUTTRESS_FREQ_CTL_ICCMAX_LEVEL
;
469 pwr_sts
= ctrl
->pwr_sts_on
<< ctrl
->pwr_sts_shift
;
472 writel(val
, isp
->base
+ ctrl
->freq_ctl
);
474 ret
= readl_poll_timeout(isp
->base
+ BUTTRESS_REG_PWR_STATE
,
475 val
, (val
& ctrl
->pwr_sts_mask
) == pwr_sts
,
476 100, BUTTRESS_POWER_TIMEOUT_US
);
478 dev_err(&isp
->pdev
->dev
,
479 "Change power status timeout with 0x%x\n", val
);
481 ctrl
->started
= !ret
&& on
;
483 mutex_unlock(&isp
->buttress
.power_mutex
);
488 bool ipu6_buttress_get_secure_mode(struct ipu6_device
*isp
)
492 val
= readl(isp
->base
+ BUTTRESS_REG_SECURITY_CTL
);
494 return val
& BUTTRESS_SECURITY_CTL_FW_SECURE_MODE
;
497 bool ipu6_buttress_auth_done(struct ipu6_device
*isp
)
501 if (!isp
->secure_mode
)
504 val
= readl(isp
->base
+ BUTTRESS_REG_SECURITY_CTL
);
505 val
= FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK
, val
);
507 return val
== BUTTRESS_SECURITY_CTL_AUTH_DONE
;
509 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done
, INTEL_IPU6
);
511 int ipu6_buttress_reset_authentication(struct ipu6_device
*isp
)
516 if (!isp
->secure_mode
) {
517 dev_dbg(&isp
->pdev
->dev
, "Skip auth for non-secure mode\n");
521 writel(BUTTRESS_FW_RESET_CTL_START
, isp
->base
+
522 BUTTRESS_REG_FW_RESET_CTL
);
524 ret
= readl_poll_timeout(isp
->base
+ BUTTRESS_REG_FW_RESET_CTL
, val
,
525 val
& BUTTRESS_FW_RESET_CTL_DONE
, 500,
526 BUTTRESS_CSE_FWRESET_TIMEOUT_US
);
528 dev_err(&isp
->pdev
->dev
,
529 "Time out while resetting authentication state\n");
533 dev_dbg(&isp
->pdev
->dev
, "FW reset for authentication done\n");
534 writel(0, isp
->base
+ BUTTRESS_REG_FW_RESET_CTL
);
535 /* leave some time for HW restore */
536 usleep_range(800, 1000);
541 int ipu6_buttress_map_fw_image(struct ipu6_bus_device
*sys
,
542 const struct firmware
*fw
, struct sg_table
*sgt
)
544 bool is_vmalloc
= is_vmalloc_addr(fw
->data
);
545 struct pci_dev
*pdev
= sys
->isp
->pdev
;
548 unsigned long n_pages
;
552 if (!is_vmalloc
&& !virt_addr_valid(fw
->data
))
555 n_pages
= PFN_UP(fw
->size
);
557 pages
= kmalloc_array(n_pages
, sizeof(*pages
), GFP_KERNEL
);
562 for (i
= 0; i
< n_pages
; i
++) {
563 struct page
*p
= is_vmalloc
?
564 vmalloc_to_page(addr
) : virt_to_page(addr
);
574 ret
= sg_alloc_table_from_pages(sgt
, pages
, n_pages
, 0, fw
->size
,
581 ret
= dma_map_sgtable(&pdev
->dev
, sgt
, DMA_TO_DEVICE
, 0);
587 ret
= ipu6_dma_map_sgtable(sys
, sgt
, DMA_TO_DEVICE
, 0);
589 dma_unmap_sgtable(&pdev
->dev
, sgt
, DMA_TO_DEVICE
, 0);
594 ipu6_dma_sync_sgtable(sys
, sgt
);
601 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image
, INTEL_IPU6
);
603 void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device
*sys
,
604 struct sg_table
*sgt
)
606 struct pci_dev
*pdev
= sys
->isp
->pdev
;
608 ipu6_dma_unmap_sgtable(sys
, sgt
, DMA_TO_DEVICE
, 0);
609 dma_unmap_sgtable(&pdev
->dev
, sgt
, DMA_TO_DEVICE
, 0);
612 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image
, INTEL_IPU6
);
614 int ipu6_buttress_authenticate(struct ipu6_device
*isp
)
616 struct ipu6_buttress
*b
= &isp
->buttress
;
617 struct ipu6_psys_pdata
*psys_pdata
;
618 u32 data
, mask
, done
, fail
;
621 if (!isp
->secure_mode
) {
622 dev_dbg(&isp
->pdev
->dev
, "Skip auth for non-secure mode\n");
626 psys_pdata
= isp
->psys
->pdata
;
628 mutex_lock(&b
->auth_mutex
);
630 if (ipu6_buttress_auth_done(isp
)) {
636 * Write address of FIT table to FW_SOURCE register
637 * Let's use fw address. I.e. not using FIT table yet
639 data
= lower_32_bits(isp
->psys
->pkg_dir_dma_addr
);
640 writel(data
, isp
->base
+ BUTTRESS_REG_FW_SOURCE_BASE_LO
);
642 data
= upper_32_bits(isp
->psys
->pkg_dir_dma_addr
);
643 writel(data
, isp
->base
+ BUTTRESS_REG_FW_SOURCE_BASE_HI
);
646 * Write boot_load into IU2CSEDATA0
647 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
648 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
650 dev_info(&isp
->pdev
->dev
, "Sending BOOT_LOAD to CSE\n");
652 ret
= ipu6_buttress_ipc_send(isp
,
653 BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD
,
655 BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE
);
657 dev_err(&isp
->pdev
->dev
, "CSE boot_load failed\n");
661 mask
= BUTTRESS_SECURITY_CTL_FW_SETUP_MASK
;
662 done
= BUTTRESS_SECURITY_CTL_FW_SETUP_DONE
;
663 fail
= BUTTRESS_SECURITY_CTL_AUTH_FAILED
;
664 ret
= readl_poll_timeout(isp
->base
+ BUTTRESS_REG_SECURITY_CTL
, data
,
665 ((data
& mask
) == done
||
666 (data
& mask
) == fail
), 500,
667 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US
);
669 dev_err(&isp
->pdev
->dev
, "CSE boot_load timeout\n");
673 if ((data
& mask
) == fail
) {
674 dev_err(&isp
->pdev
->dev
, "CSE auth failed\n");
679 ret
= readl_poll_timeout(psys_pdata
->base
+ BOOTLOADER_STATUS_OFFSET
,
680 data
, data
== BOOTLOADER_MAGIC_KEY
, 500,
681 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US
);
683 dev_err(&isp
->pdev
->dev
, "Unexpected magic number 0x%x\n",
689 * Write authenticate_run into IU2CSEDATA0
690 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
691 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
693 dev_info(&isp
->pdev
->dev
, "Sending AUTHENTICATE_RUN to CSE\n");
694 ret
= ipu6_buttress_ipc_send(isp
,
695 BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN
,
697 BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE
);
699 dev_err(&isp
->pdev
->dev
, "CSE authenticate_run failed\n");
703 done
= BUTTRESS_SECURITY_CTL_AUTH_DONE
;
704 ret
= readl_poll_timeout(isp
->base
+ BUTTRESS_REG_SECURITY_CTL
, data
,
705 ((data
& mask
) == done
||
706 (data
& mask
) == fail
), 500,
707 BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US
);
709 dev_err(&isp
->pdev
->dev
, "CSE authenticate timeout\n");
713 if ((data
& mask
) == fail
) {
714 dev_err(&isp
->pdev
->dev
, "CSE boot_load failed\n");
719 dev_info(&isp
->pdev
->dev
, "CSE authenticate_run done\n");
722 mutex_unlock(&b
->auth_mutex
);
727 static int ipu6_buttress_send_tsc_request(struct ipu6_device
*isp
)
732 mask
= BUTTRESS_PWR_STATE_HH_STATUS_MASK
;
734 writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC
,
735 isp
->base
+ BUTTRESS_REG_FABRIC_CMD
);
737 val
= readl(isp
->base
+ BUTTRESS_REG_PWR_STATE
);
738 val
= FIELD_GET(mask
, val
);
739 if (val
== BUTTRESS_PWR_STATE_HH_STATE_ERR
) {
740 dev_err(&isp
->pdev
->dev
, "Start tsc sync failed\n");
744 done
= BUTTRESS_PWR_STATE_HH_STATE_DONE
;
745 ret
= readl_poll_timeout(isp
->base
+ BUTTRESS_REG_PWR_STATE
, val
,
746 FIELD_GET(mask
, val
) == done
, 500,
747 BUTTRESS_TSC_SYNC_TIMEOUT_US
);
749 dev_err(&isp
->pdev
->dev
, "Start tsc sync timeout\n");
754 int ipu6_buttress_start_tsc_sync(struct ipu6_device
*isp
)
758 for (i
= 0; i
< BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX
; i
++) {
762 ret
= ipu6_buttress_send_tsc_request(isp
);
763 if (ret
!= -ETIMEDOUT
)
766 val
= readl(isp
->base
+ BUTTRESS_REG_TSW_CTL
);
767 val
= val
| BUTTRESS_TSW_CTL_SOFT_RESET
;
768 writel(val
, isp
->base
+ BUTTRESS_REG_TSW_CTL
);
769 val
= val
& ~BUTTRESS_TSW_CTL_SOFT_RESET
;
770 writel(val
, isp
->base
+ BUTTRESS_REG_TSW_CTL
);
773 dev_err(&isp
->pdev
->dev
, "TSC sync failed (timeout)\n");
777 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync
, INTEL_IPU6
);
779 void ipu6_buttress_tsc_read(struct ipu6_device
*isp
, u64
*val
)
781 u32 tsc_hi_1
, tsc_hi_2
, tsc_lo
;
784 local_irq_save(flags
);
785 tsc_hi_1
= readl(isp
->base
+ BUTTRESS_REG_TSC_HI
);
786 tsc_lo
= readl(isp
->base
+ BUTTRESS_REG_TSC_LO
);
787 tsc_hi_2
= readl(isp
->base
+ BUTTRESS_REG_TSC_HI
);
788 if (tsc_hi_1
== tsc_hi_2
) {
789 *val
= (u64
)tsc_hi_1
<< 32 | tsc_lo
;
791 /* Check if TSC has rolled over */
792 if (tsc_lo
& BIT(31))
793 *val
= (u64
)tsc_hi_1
<< 32 | tsc_lo
;
795 *val
= (u64
)tsc_hi_2
<< 32 | tsc_lo
;
797 local_irq_restore(flags
);
799 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read
, INTEL_IPU6
);
801 u64
ipu6_buttress_tsc_ticks_to_ns(u64 ticks
, const struct ipu6_device
*isp
)
803 u64 ns
= ticks
* 10000;
806 * converting TSC tick count to ns is calculated by:
807 * Example (TSC clock frequency is 19.2MHz):
808 * ns = ticks * 1000 000 000 / 19.2Mhz
809 * = ticks * 1000 000 000 / 19200000Hz
810 * = ticks * 10000 / 192 ns
812 return div_u64(ns
, isp
->buttress
.ref_clk
);
814 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns
, INTEL_IPU6
);
816 void ipu6_buttress_restore(struct ipu6_device
*isp
)
818 struct ipu6_buttress
*b
= &isp
->buttress
;
820 writel(BUTTRESS_IRQS
, isp
->base
+ BUTTRESS_REG_ISR_CLEAR
);
821 writel(BUTTRESS_IRQS
, isp
->base
+ BUTTRESS_REG_ISR_ENABLE
);
822 writel(b
->wdt_cached_value
, isp
->base
+ BUTTRESS_REG_WDT
);
825 int ipu6_buttress_init(struct ipu6_device
*isp
)
827 int ret
, ipc_reset_retry
= BUTTRESS_CSE_IPC_RESET_RETRY
;
828 struct ipu6_buttress
*b
= &isp
->buttress
;
831 mutex_init(&b
->power_mutex
);
832 mutex_init(&b
->auth_mutex
);
833 mutex_init(&b
->cons_mutex
);
834 mutex_init(&b
->ipc_mutex
);
835 init_completion(&b
->cse
.send_complete
);
836 init_completion(&b
->cse
.recv_complete
);
838 b
->cse
.nack
= BUTTRESS_CSE2IUDATA0_IPC_NACK
;
839 b
->cse
.nack_mask
= BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK
;
840 b
->cse
.csr_in
= BUTTRESS_REG_CSE2IUCSR
;
841 b
->cse
.csr_out
= BUTTRESS_REG_IU2CSECSR
;
842 b
->cse
.db0_in
= BUTTRESS_REG_CSE2IUDB0
;
843 b
->cse
.db0_out
= BUTTRESS_REG_IU2CSEDB0
;
844 b
->cse
.data0_in
= BUTTRESS_REG_CSE2IUDATA0
;
845 b
->cse
.data0_out
= BUTTRESS_REG_IU2CSEDATA0
;
847 INIT_LIST_HEAD(&b
->constraints
);
849 isp
->secure_mode
= ipu6_buttress_get_secure_mode(isp
);
850 dev_info(&isp
->pdev
->dev
, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
851 isp
->secure_mode
? "secure" : "non-secure",
852 readl(isp
->base
+ BUTTRESS_REG_SECURITY_TOUCH
),
853 readl(isp
->base
+ BUTTRESS_REG_CAMERA_MASK
));
855 b
->wdt_cached_value
= readl(isp
->base
+ BUTTRESS_REG_WDT
);
856 writel(BUTTRESS_IRQS
, isp
->base
+ BUTTRESS_REG_ISR_CLEAR
);
857 writel(BUTTRESS_IRQS
, isp
->base
+ BUTTRESS_REG_ISR_ENABLE
);
859 /* get ref_clk frequency by reading the indication in btrs control */
860 val
= readl(isp
->base
+ BUTTRESS_REG_BTRS_CTRL
);
861 val
= FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND
, val
);
874 dev_warn(&isp
->pdev
->dev
,
875 "Unsupported ref clock, use 19.2Mhz by default.\n");
880 /* Retry couple of times in case of CSE initialization is delayed */
882 ret
= ipu6_buttress_ipc_reset(isp
, &b
->cse
);
884 dev_warn(&isp
->pdev
->dev
,
885 "IPC reset protocol failed, retrying\n");
887 dev_dbg(&isp
->pdev
->dev
, "IPC reset done\n");
890 } while (ipc_reset_retry
--);
892 dev_err(&isp
->pdev
->dev
, "IPC reset protocol failed\n");
894 mutex_destroy(&b
->power_mutex
);
895 mutex_destroy(&b
->auth_mutex
);
896 mutex_destroy(&b
->cons_mutex
);
897 mutex_destroy(&b
->ipc_mutex
);
902 void ipu6_buttress_exit(struct ipu6_device
*isp
)
904 struct ipu6_buttress
*b
= &isp
->buttress
;
906 writel(0, isp
->base
+ BUTTRESS_REG_ISR_ENABLE
);
908 mutex_destroy(&b
->power_mutex
);
909 mutex_destroy(&b
->auth_mutex
);
910 mutex_destroy(&b
->cons_mutex
);
911 mutex_destroy(&b
->ipc_mutex
);