Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / media / pci / intel / ipu6 / ipu6-buttress.c
blob277e101da137ec1fefa1ec58c77bb3b8d71b151e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013--2024 Intel Corporation
4 */
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/iopoll.h>
15 #include <linux/math64.h>
16 #include <linux/mm.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/pfn.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/time64.h>
25 #include "ipu6.h"
26 #include "ipu6-bus.h"
27 #include "ipu6-dma.h"
28 #include "ipu6-buttress.h"
29 #include "ipu6-platform-buttress-regs.h"
31 #define BOOTLOADER_STATUS_OFFSET 0x15c
33 #define BOOTLOADER_MAGIC_KEY 0xb00710ad
35 #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
36 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
37 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
39 #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10
41 #define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC)
43 #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC)
44 #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC)
45 #define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC)
47 #define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC
48 #define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC
49 #define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC)
50 #define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC)
52 #define BUTTRESS_IPC_RESET_RETRY 2000
53 #define BUTTRESS_CSE_IPC_RESET_RETRY 4
54 #define BUTTRESS_IPC_CMD_SEND_RETRY 1
56 #define BUTTRESS_MAX_CONSECUTIVE_IRQS 100
58 static const u32 ipu6_adev_irq_mask[2] = {
59 BUTTRESS_ISR_IS_IRQ,
60 BUTTRESS_ISR_PS_IRQ
63 int ipu6_buttress_ipc_reset(struct ipu6_device *isp,
64 struct ipu6_buttress_ipc *ipc)
66 unsigned int retries = BUTTRESS_IPC_RESET_RETRY;
67 struct ipu6_buttress *b = &isp->buttress;
68 u32 val = 0, csr_in_clr;
70 if (!isp->secure_mode) {
71 dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode");
72 return 0;
75 mutex_lock(&b->ipc_mutex);
77 /* Clear-by-1 CSR (all bits), corresponding internal states. */
78 val = readl(isp->base + ipc->csr_in);
79 writel(val, isp->base + ipc->csr_in);
81 /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
82 writel(ENTRY, isp->base + ipc->csr_out);
84 * Clear-by-1 all CSR bits EXCEPT following
85 * bits:
86 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
87 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
88 * C. Possibly custom bits, depending on
89 * their role.
91 csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
92 BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
93 BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
95 do {
96 usleep_range(400, 500);
97 val = readl(isp->base + ipc->csr_in);
98 switch (val) {
99 case ENTRY | EXIT:
100 case ENTRY | EXIT | QUERY:
102 * 1) Clear-by-1 CSR bits
103 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
104 * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
105 * 2) Set peer CSR bit
106 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
108 writel(ENTRY | EXIT, isp->base + ipc->csr_in);
109 writel(QUERY, isp->base + ipc->csr_out);
110 break;
111 case ENTRY:
112 case ENTRY | QUERY:
114 * 1) Clear-by-1 CSR bits
115 * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
116 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
117 * 2) Set peer CSR bit
118 * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
120 writel(ENTRY | QUERY, isp->base + ipc->csr_in);
121 writel(ENTRY, isp->base + ipc->csr_out);
122 break;
123 case EXIT:
124 case EXIT | QUERY:
126 * Clear-by-1 CSR bit
127 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
128 * 1) Clear incoming doorbell.
129 * 2) Clear-by-1 all CSR bits EXCEPT following
130 * bits:
131 * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
132 * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
133 * C. Possibly custom bits, depending on
134 * their role.
135 * 3) Set peer CSR bit
136 * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
138 writel(EXIT, isp->base + ipc->csr_in);
139 writel(0, isp->base + ipc->db0_in);
140 writel(csr_in_clr, isp->base + ipc->csr_in);
141 writel(EXIT, isp->base + ipc->csr_out);
144 * Read csr_in again to make sure if RST_PHASE2 is done.
145 * If csr_in is QUERY, it should be handled again.
147 usleep_range(200, 300);
148 val = readl(isp->base + ipc->csr_in);
149 if (val & QUERY) {
150 dev_dbg(&isp->pdev->dev,
151 "RST_PHASE2 retry csr_in = %x\n", val);
152 break;
154 mutex_unlock(&b->ipc_mutex);
155 return 0;
156 case QUERY:
158 * 1) Clear-by-1 CSR bit
159 * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
160 * 2) Set peer CSR bit
161 * IPC_PEER_COMP_ACTIONS_RST_PHASE1
163 writel(QUERY, isp->base + ipc->csr_in);
164 writel(ENTRY, isp->base + ipc->csr_out);
165 break;
166 default:
167 dev_dbg_ratelimited(&isp->pdev->dev,
168 "Unexpected CSR 0x%x\n", val);
169 break;
171 } while (retries--);
173 mutex_unlock(&b->ipc_mutex);
174 dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n");
176 return -ETIMEDOUT;
179 static void ipu6_buttress_ipc_validity_close(struct ipu6_device *isp,
180 struct ipu6_buttress_ipc *ipc)
182 writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
183 isp->base + ipc->csr_out);
186 static int
187 ipu6_buttress_ipc_validity_open(struct ipu6_device *isp,
188 struct ipu6_buttress_ipc *ipc)
190 unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
191 void __iomem *addr;
192 int ret;
193 u32 val;
195 writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
196 isp->base + ipc->csr_out);
198 addr = isp->base + ipc->csr_in;
199 ret = readl_poll_timeout(addr, val, val & mask, 200,
200 BUTTRESS_IPC_VALIDITY_TIMEOUT_US);
201 if (ret) {
202 dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val);
203 ipu6_buttress_ipc_validity_close(isp, ipc);
206 return ret;
209 static void ipu6_buttress_ipc_recv(struct ipu6_device *isp,
210 struct ipu6_buttress_ipc *ipc, u32 *ipc_msg)
212 if (ipc_msg)
213 *ipc_msg = readl(isp->base + ipc->data0_in);
214 writel(0, isp->base + ipc->db0_in);
217 static int ipu6_buttress_ipc_send_bulk(struct ipu6_device *isp,
218 struct ipu6_ipc_buttress_bulk_msg *msgs,
219 u32 size)
221 unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
222 unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY;
223 struct ipu6_buttress *b = &isp->buttress;
224 struct ipu6_buttress_ipc *ipc = &b->cse;
225 u32 val;
226 int ret;
227 int tout;
229 mutex_lock(&b->ipc_mutex);
231 ret = ipu6_buttress_ipc_validity_open(isp, ipc);
232 if (ret) {
233 dev_err(&isp->pdev->dev, "IPC validity open failed\n");
234 goto out;
237 tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS);
238 rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS);
240 for (i = 0; i < size; i++) {
241 reinit_completion(&ipc->send_complete);
242 if (msgs[i].require_resp)
243 reinit_completion(&ipc->recv_complete);
245 dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n",
246 msgs[i].cmd);
247 writel(msgs[i].cmd, isp->base + ipc->data0_out);
248 val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size;
249 writel(val, isp->base + ipc->db0_out);
251 tout = wait_for_completion_timeout(&ipc->send_complete,
252 tx_timeout_jiffies);
253 if (!tout) {
254 dev_err(&isp->pdev->dev, "send IPC response timeout\n");
255 if (!retry--) {
256 ret = -ETIMEDOUT;
257 goto out;
260 /* Try again if CSE is not responding on first try */
261 writel(0, isp->base + ipc->db0_out);
262 i--;
263 continue;
266 retry = BUTTRESS_IPC_CMD_SEND_RETRY;
268 if (!msgs[i].require_resp)
269 continue;
271 tout = wait_for_completion_timeout(&ipc->recv_complete,
272 rx_timeout_jiffies);
273 if (!tout) {
274 dev_err(&isp->pdev->dev, "recv IPC response timeout\n");
275 ret = -ETIMEDOUT;
276 goto out;
279 if (ipc->nack_mask &&
280 (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
281 dev_err(&isp->pdev->dev,
282 "IPC NACK for cmd 0x%x\n", msgs[i].cmd);
283 ret = -EIO;
284 goto out;
287 if (ipc->recv_data != msgs[i].expected_resp) {
288 dev_err(&isp->pdev->dev,
289 "expected resp: 0x%x, IPC response: 0x%x ",
290 msgs[i].expected_resp, ipc->recv_data);
291 ret = -EIO;
292 goto out;
296 dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n");
298 out:
299 ipu6_buttress_ipc_validity_close(isp, ipc);
300 mutex_unlock(&b->ipc_mutex);
301 return ret;
304 static int
305 ipu6_buttress_ipc_send(struct ipu6_device *isp,
306 u32 ipc_msg, u32 size, bool require_resp,
307 u32 expected_resp)
309 struct ipu6_ipc_buttress_bulk_msg msg = {
310 .cmd = ipc_msg,
311 .cmd_size = size,
312 .require_resp = require_resp,
313 .expected_resp = expected_resp,
316 return ipu6_buttress_ipc_send_bulk(isp, &msg, 1);
319 static irqreturn_t ipu6_buttress_call_isr(struct ipu6_bus_device *adev)
321 irqreturn_t ret = IRQ_WAKE_THREAD;
323 if (!adev || !adev->auxdrv || !adev->auxdrv_data)
324 return IRQ_NONE;
326 if (adev->auxdrv_data->isr)
327 ret = adev->auxdrv_data->isr(adev);
329 if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded)
330 ret = IRQ_NONE;
332 return ret;
335 irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr)
337 struct ipu6_device *isp = isp_ptr;
338 struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
339 struct ipu6_buttress *b = &isp->buttress;
340 u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS;
341 irqreturn_t ret = IRQ_NONE;
342 u32 disable_irqs = 0;
343 u32 irq_status;
344 u32 i, count = 0;
345 int active;
347 active = pm_runtime_get_if_active(&isp->pdev->dev);
348 if (!active)
349 return IRQ_NONE;
351 irq_status = readl(isp->base + reg_irq_sts);
352 if (irq_status == 0 || WARN_ON_ONCE(irq_status == 0xffffffffu)) {
353 if (active > 0)
354 pm_runtime_put_noidle(&isp->pdev->dev);
355 return IRQ_NONE;
358 do {
359 writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
361 for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask); i++) {
362 irqreturn_t r = ipu6_buttress_call_isr(adev[i]);
364 if (!(irq_status & ipu6_adev_irq_mask[i]))
365 continue;
367 if (r == IRQ_WAKE_THREAD) {
368 ret = IRQ_WAKE_THREAD;
369 disable_irqs |= ipu6_adev_irq_mask[i];
370 } else if (ret == IRQ_NONE && r == IRQ_HANDLED) {
371 ret = IRQ_HANDLED;
375 if ((irq_status & BUTTRESS_EVENT) && ret == IRQ_NONE)
376 ret = IRQ_HANDLED;
378 if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) {
379 dev_dbg(&isp->pdev->dev,
380 "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
381 ipu6_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
382 complete(&b->cse.recv_complete);
385 if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) {
386 dev_dbg(&isp->pdev->dev,
387 "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
388 complete(&b->cse.send_complete);
391 if (irq_status & BUTTRESS_ISR_SAI_VIOLATION &&
392 ipu6_buttress_get_secure_mode(isp))
393 dev_err(&isp->pdev->dev,
394 "BUTTRESS_ISR_SAI_VIOLATION\n");
396 if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR |
397 BUTTRESS_ISR_PS_FATAL_MEM_ERR))
398 dev_err(&isp->pdev->dev,
399 "BUTTRESS_ISR_FATAL_MEM_ERR\n");
401 if (irq_status & BUTTRESS_ISR_UFI_ERROR)
402 dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n");
404 if (++count == BUTTRESS_MAX_CONSECUTIVE_IRQS) {
405 dev_err(&isp->pdev->dev, "too many consecutive IRQs\n");
406 ret = IRQ_NONE;
407 break;
410 irq_status = readl(isp->base + reg_irq_sts);
411 } while (irq_status);
413 if (disable_irqs)
414 writel(BUTTRESS_IRQS & ~disable_irqs,
415 isp->base + BUTTRESS_REG_ISR_ENABLE);
417 if (active > 0)
418 pm_runtime_put(&isp->pdev->dev);
420 return ret;
423 irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr)
425 struct ipu6_device *isp = isp_ptr;
426 struct ipu6_bus_device *adev[] = { isp->isys, isp->psys };
427 const struct ipu6_auxdrv_data *drv_data = NULL;
428 irqreturn_t ret = IRQ_NONE;
429 unsigned int i;
431 for (i = 0; i < ARRAY_SIZE(ipu6_adev_irq_mask) && adev[i]; i++) {
432 drv_data = adev[i]->auxdrv_data;
433 if (!drv_data)
434 continue;
436 if (drv_data->wake_isr_thread &&
437 drv_data->isr_threaded(adev[i]) == IRQ_HANDLED)
438 ret = IRQ_HANDLED;
441 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
443 return ret;
446 int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
447 bool on)
449 struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp;
450 u32 pwr_sts, val;
451 int ret;
453 if (!ctrl)
454 return 0;
456 mutex_lock(&isp->buttress.power_mutex);
458 if (!on) {
459 val = 0;
460 pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
461 } else {
462 val = BUTTRESS_FREQ_CTL_START |
463 FIELD_PREP(BUTTRESS_FREQ_CTL_RATIO_MASK,
464 ctrl->ratio) |
465 FIELD_PREP(BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK,
466 ctrl->qos_floor) |
467 BUTTRESS_FREQ_CTL_ICCMAX_LEVEL;
469 pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
472 writel(val, isp->base + ctrl->freq_ctl);
474 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE,
475 val, (val & ctrl->pwr_sts_mask) == pwr_sts,
476 100, BUTTRESS_POWER_TIMEOUT_US);
477 if (ret)
478 dev_err(&isp->pdev->dev,
479 "Change power status timeout with 0x%x\n", val);
481 ctrl->started = !ret && on;
483 mutex_unlock(&isp->buttress.power_mutex);
485 return ret;
488 bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp)
490 u32 val;
492 val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
494 return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
497 bool ipu6_buttress_auth_done(struct ipu6_device *isp)
499 u32 val;
501 if (!isp->secure_mode)
502 return true;
504 val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
505 val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val);
507 return val == BUTTRESS_SECURITY_CTL_AUTH_DONE;
509 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_auth_done, INTEL_IPU6);
511 int ipu6_buttress_reset_authentication(struct ipu6_device *isp)
513 int ret;
514 u32 val;
516 if (!isp->secure_mode) {
517 dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
518 return 0;
521 writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
522 BUTTRESS_REG_FW_RESET_CTL);
524 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
525 val & BUTTRESS_FW_RESET_CTL_DONE, 500,
526 BUTTRESS_CSE_FWRESET_TIMEOUT_US);
527 if (ret) {
528 dev_err(&isp->pdev->dev,
529 "Time out while resetting authentication state\n");
530 return ret;
533 dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n");
534 writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
535 /* leave some time for HW restore */
536 usleep_range(800, 1000);
538 return 0;
541 int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
542 const struct firmware *fw, struct sg_table *sgt)
544 bool is_vmalloc = is_vmalloc_addr(fw->data);
545 struct pci_dev *pdev = sys->isp->pdev;
546 struct page **pages;
547 const void *addr;
548 unsigned long n_pages;
549 unsigned int i;
550 int ret;
552 if (!is_vmalloc && !virt_addr_valid(fw->data))
553 return -EDOM;
555 n_pages = PFN_UP(fw->size);
557 pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
558 if (!pages)
559 return -ENOMEM;
561 addr = fw->data;
562 for (i = 0; i < n_pages; i++) {
563 struct page *p = is_vmalloc ?
564 vmalloc_to_page(addr) : virt_to_page(addr);
566 if (!p) {
567 ret = -ENOMEM;
568 goto out;
570 pages[i] = p;
571 addr += PAGE_SIZE;
574 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size,
575 GFP_KERNEL);
576 if (ret) {
577 ret = -ENOMEM;
578 goto out;
581 ret = dma_map_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0);
582 if (ret) {
583 sg_free_table(sgt);
584 goto out;
587 ret = ipu6_dma_map_sgtable(sys, sgt, DMA_TO_DEVICE, 0);
588 if (ret) {
589 dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0);
590 sg_free_table(sgt);
591 goto out;
594 ipu6_dma_sync_sgtable(sys, sgt);
596 out:
597 kfree(pages);
599 return ret;
601 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_map_fw_image, INTEL_IPU6);
603 void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
604 struct sg_table *sgt)
606 struct pci_dev *pdev = sys->isp->pdev;
608 ipu6_dma_unmap_sgtable(sys, sgt, DMA_TO_DEVICE, 0);
609 dma_unmap_sgtable(&pdev->dev, sgt, DMA_TO_DEVICE, 0);
610 sg_free_table(sgt);
612 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_unmap_fw_image, INTEL_IPU6);
614 int ipu6_buttress_authenticate(struct ipu6_device *isp)
616 struct ipu6_buttress *b = &isp->buttress;
617 struct ipu6_psys_pdata *psys_pdata;
618 u32 data, mask, done, fail;
619 int ret;
621 if (!isp->secure_mode) {
622 dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n");
623 return 0;
626 psys_pdata = isp->psys->pdata;
628 mutex_lock(&b->auth_mutex);
630 if (ipu6_buttress_auth_done(isp)) {
631 ret = 0;
632 goto out_unlock;
636 * Write address of FIT table to FW_SOURCE register
637 * Let's use fw address. I.e. not using FIT table yet
639 data = lower_32_bits(isp->psys->pkg_dir_dma_addr);
640 writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO);
642 data = upper_32_bits(isp->psys->pkg_dir_dma_addr);
643 writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI);
646 * Write boot_load into IU2CSEDATA0
647 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
648 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
650 dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n");
652 ret = ipu6_buttress_ipc_send(isp,
653 BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
654 1, true,
655 BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
656 if (ret) {
657 dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
658 goto out_unlock;
661 mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
662 done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
663 fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
664 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
665 ((data & mask) == done ||
666 (data & mask) == fail), 500,
667 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
668 if (ret) {
669 dev_err(&isp->pdev->dev, "CSE boot_load timeout\n");
670 goto out_unlock;
673 if ((data & mask) == fail) {
674 dev_err(&isp->pdev->dev, "CSE auth failed\n");
675 ret = -EINVAL;
676 goto out_unlock;
679 ret = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET,
680 data, data == BOOTLOADER_MAGIC_KEY, 500,
681 BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US);
682 if (ret) {
683 dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n",
684 data);
685 goto out_unlock;
689 * Write authenticate_run into IU2CSEDATA0
690 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
691 * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
693 dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n");
694 ret = ipu6_buttress_ipc_send(isp,
695 BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
696 1, true,
697 BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
698 if (ret) {
699 dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n");
700 goto out_unlock;
703 done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
704 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
705 ((data & mask) == done ||
706 (data & mask) == fail), 500,
707 BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US);
708 if (ret) {
709 dev_err(&isp->pdev->dev, "CSE authenticate timeout\n");
710 goto out_unlock;
713 if ((data & mask) == fail) {
714 dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
715 ret = -EINVAL;
716 goto out_unlock;
719 dev_info(&isp->pdev->dev, "CSE authenticate_run done\n");
721 out_unlock:
722 mutex_unlock(&b->auth_mutex);
724 return ret;
727 static int ipu6_buttress_send_tsc_request(struct ipu6_device *isp)
729 u32 val, mask, done;
730 int ret;
732 mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK;
734 writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC,
735 isp->base + BUTTRESS_REG_FABRIC_CMD);
737 val = readl(isp->base + BUTTRESS_REG_PWR_STATE);
738 val = FIELD_GET(mask, val);
739 if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) {
740 dev_err(&isp->pdev->dev, "Start tsc sync failed\n");
741 return -EINVAL;
744 done = BUTTRESS_PWR_STATE_HH_STATE_DONE;
745 ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val,
746 FIELD_GET(mask, val) == done, 500,
747 BUTTRESS_TSC_SYNC_TIMEOUT_US);
748 if (ret)
749 dev_err(&isp->pdev->dev, "Start tsc sync timeout\n");
751 return ret;
754 int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp)
756 unsigned int i;
758 for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
759 u32 val;
760 int ret;
762 ret = ipu6_buttress_send_tsc_request(isp);
763 if (ret != -ETIMEDOUT)
764 return ret;
766 val = readl(isp->base + BUTTRESS_REG_TSW_CTL);
767 val = val | BUTTRESS_TSW_CTL_SOFT_RESET;
768 writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
769 val = val & ~BUTTRESS_TSW_CTL_SOFT_RESET;
770 writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
773 dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n");
775 return -ETIMEDOUT;
777 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_start_tsc_sync, INTEL_IPU6);
779 void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val)
781 u32 tsc_hi_1, tsc_hi_2, tsc_lo;
782 unsigned long flags;
784 local_irq_save(flags);
785 tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI);
786 tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO);
787 tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI);
788 if (tsc_hi_1 == tsc_hi_2) {
789 *val = (u64)tsc_hi_1 << 32 | tsc_lo;
790 } else {
791 /* Check if TSC has rolled over */
792 if (tsc_lo & BIT(31))
793 *val = (u64)tsc_hi_1 << 32 | tsc_lo;
794 else
795 *val = (u64)tsc_hi_2 << 32 | tsc_lo;
797 local_irq_restore(flags);
799 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_read, INTEL_IPU6);
801 u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp)
803 u64 ns = ticks * 10000;
806 * converting TSC tick count to ns is calculated by:
807 * Example (TSC clock frequency is 19.2MHz):
808 * ns = ticks * 1000 000 000 / 19.2Mhz
809 * = ticks * 1000 000 000 / 19200000Hz
810 * = ticks * 10000 / 192 ns
812 return div_u64(ns, isp->buttress.ref_clk);
814 EXPORT_SYMBOL_NS_GPL(ipu6_buttress_tsc_ticks_to_ns, INTEL_IPU6);
816 void ipu6_buttress_restore(struct ipu6_device *isp)
818 struct ipu6_buttress *b = &isp->buttress;
820 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
821 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
822 writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT);
825 int ipu6_buttress_init(struct ipu6_device *isp)
827 int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
828 struct ipu6_buttress *b = &isp->buttress;
829 u32 val;
831 mutex_init(&b->power_mutex);
832 mutex_init(&b->auth_mutex);
833 mutex_init(&b->cons_mutex);
834 mutex_init(&b->ipc_mutex);
835 init_completion(&b->cse.send_complete);
836 init_completion(&b->cse.recv_complete);
838 b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
839 b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
840 b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
841 b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
842 b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
843 b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
844 b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
845 b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
847 INIT_LIST_HEAD(&b->constraints);
849 isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
850 dev_info(&isp->pdev->dev, "IPU6 in %s mode touch 0x%x mask 0x%x\n",
851 isp->secure_mode ? "secure" : "non-secure",
852 readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH),
853 readl(isp->base + BUTTRESS_REG_CAMERA_MASK));
855 b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
856 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
857 writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
859 /* get ref_clk frequency by reading the indication in btrs control */
860 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
861 val = FIELD_GET(BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND, val);
863 switch (val) {
864 case 0x0:
865 b->ref_clk = 240;
866 break;
867 case 0x1:
868 b->ref_clk = 192;
869 break;
870 case 0x2:
871 b->ref_clk = 384;
872 break;
873 default:
874 dev_warn(&isp->pdev->dev,
875 "Unsupported ref clock, use 19.2Mhz by default.\n");
876 b->ref_clk = 192;
877 break;
880 /* Retry couple of times in case of CSE initialization is delayed */
881 do {
882 ret = ipu6_buttress_ipc_reset(isp, &b->cse);
883 if (ret) {
884 dev_warn(&isp->pdev->dev,
885 "IPC reset protocol failed, retrying\n");
886 } else {
887 dev_dbg(&isp->pdev->dev, "IPC reset done\n");
888 return 0;
890 } while (ipc_reset_retry--);
892 dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
894 mutex_destroy(&b->power_mutex);
895 mutex_destroy(&b->auth_mutex);
896 mutex_destroy(&b->cons_mutex);
897 mutex_destroy(&b->ipc_mutex);
899 return ret;
902 void ipu6_buttress_exit(struct ipu6_device *isp)
904 struct ipu6_buttress *b = &isp->buttress;
906 writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE);
908 mutex_destroy(&b->power_mutex);
909 mutex_destroy(&b->auth_mutex);
910 mutex_destroy(&b->cons_mutex);
911 mutex_destroy(&b->ipc_mutex);