1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * cx18 mailbox functions
5 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
6 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
9 #include <linux/bitops.h>
11 #include "cx18-driver.h"
15 #include "cx18-mailbox.h"
16 #include "cx18-queue.h"
17 #include "cx18-streams.h"
18 #include "cx18-alsa-pcm.h" /* FIXME make configurable */
20 static const char *rpu_str
[] = { "APU", "CPU", "EPU", "HPU" };
22 #define API_FAST (1 << 2) /* Short timeout */
23 #define API_SLOW (1 << 3) /* Additional 300ms timeout */
25 struct cx18_api_info
{
27 u8 flags
; /* Flags, see above */
28 u8 rpu
; /* Processing unit */
29 const char *name
; /* The name of the command */
32 #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
34 static const struct cx18_api_info api_info
[] = {
35 /* MPEG encoder API */
36 API_ENTRY(CPU
, CX18_CPU_SET_CHANNEL_TYPE
, 0),
37 API_ENTRY(CPU
, CX18_EPU_DEBUG
, 0),
38 API_ENTRY(CPU
, CX18_CREATE_TASK
, 0),
39 API_ENTRY(CPU
, CX18_DESTROY_TASK
, 0),
40 API_ENTRY(CPU
, CX18_CPU_CAPTURE_START
, API_SLOW
),
41 API_ENTRY(CPU
, CX18_CPU_CAPTURE_STOP
, API_SLOW
),
42 API_ENTRY(CPU
, CX18_CPU_CAPTURE_PAUSE
, 0),
43 API_ENTRY(CPU
, CX18_CPU_CAPTURE_RESUME
, 0),
44 API_ENTRY(CPU
, CX18_CPU_SET_CHANNEL_TYPE
, 0),
45 API_ENTRY(CPU
, CX18_CPU_SET_STREAM_OUTPUT_TYPE
, 0),
46 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_IN
, 0),
47 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_RATE
, 0),
48 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_RESOLUTION
, 0),
49 API_ENTRY(CPU
, CX18_CPU_SET_FILTER_PARAM
, 0),
50 API_ENTRY(CPU
, CX18_CPU_SET_SPATIAL_FILTER_TYPE
, 0),
51 API_ENTRY(CPU
, CX18_CPU_SET_MEDIAN_CORING
, 0),
52 API_ENTRY(CPU
, CX18_CPU_SET_INDEXTABLE
, 0),
53 API_ENTRY(CPU
, CX18_CPU_SET_AUDIO_PARAMETERS
, 0),
54 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_MUTE
, 0),
55 API_ENTRY(CPU
, CX18_CPU_SET_AUDIO_MUTE
, 0),
56 API_ENTRY(CPU
, CX18_CPU_SET_MISC_PARAMETERS
, 0),
57 API_ENTRY(CPU
, CX18_CPU_SET_RAW_VBI_PARAM
, API_SLOW
),
58 API_ENTRY(CPU
, CX18_CPU_SET_CAPTURE_LINE_NO
, 0),
59 API_ENTRY(CPU
, CX18_CPU_SET_COPYRIGHT
, 0),
60 API_ENTRY(CPU
, CX18_CPU_SET_AUDIO_PID
, 0),
61 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_PID
, 0),
62 API_ENTRY(CPU
, CX18_CPU_SET_VER_CROP_LINE
, 0),
63 API_ENTRY(CPU
, CX18_CPU_SET_GOP_STRUCTURE
, 0),
64 API_ENTRY(CPU
, CX18_CPU_SET_SCENE_CHANGE_DETECTION
, 0),
65 API_ENTRY(CPU
, CX18_CPU_SET_ASPECT_RATIO
, 0),
66 API_ENTRY(CPU
, CX18_CPU_SET_SKIP_INPUT_FRAME
, 0),
67 API_ENTRY(CPU
, CX18_CPU_SET_SLICED_VBI_PARAM
, 0),
68 API_ENTRY(CPU
, CX18_CPU_SET_USERDATA_PLACE_HOLDER
, 0),
69 API_ENTRY(CPU
, CX18_CPU_GET_ENC_PTS
, 0),
70 API_ENTRY(CPU
, CX18_CPU_SET_VFC_PARAM
, 0),
71 API_ENTRY(CPU
, CX18_CPU_DE_SET_MDL_ACK
, 0),
72 API_ENTRY(CPU
, CX18_CPU_DE_SET_MDL
, API_FAST
),
73 API_ENTRY(CPU
, CX18_CPU_DE_RELEASE_MDL
, API_SLOW
),
74 API_ENTRY(APU
, CX18_APU_START
, 0),
75 API_ENTRY(APU
, CX18_APU_STOP
, 0),
76 API_ENTRY(APU
, CX18_APU_RESETAI
, 0),
77 API_ENTRY(CPU
, CX18_CPU_DEBUG_PEEK32
, 0),
81 static const struct cx18_api_info
*find_api_info(u32 cmd
)
85 for (i
= 0; api_info
[i
].cmd
; i
++)
86 if (api_info
[i
].cmd
== cmd
)
91 /* Call with buf of n*11+1 bytes */
92 static char *u32arr2hex(u32 data
[], int n
, char *buf
)
97 for (i
= 0, p
= buf
; i
< n
; i
++, p
+= 11) {
98 /* kernel snprintf() appends '\0' always */
99 snprintf(p
, 12, " %#010x", data
[i
]);
105 static void dump_mb(struct cx18
*cx
, struct cx18_mailbox
*mb
, char *name
)
107 char argstr
[MAX_MB_ARGUMENTS
*11+1];
109 if (!(cx18_debug
& CX18_DBGFLG_API
))
112 CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
113 name
, mb
->request
, mb
->ack
, mb
->cmd
, mb
->error
,
114 u32arr2hex(mb
->args
, MAX_MB_ARGUMENTS
, argstr
));
119 * Functions that run in a work_queue work handling context
122 static void cx18_mdl_send_to_dvb(struct cx18_stream
*s
, struct cx18_mdl
*mdl
)
124 struct cx18_buffer
*buf
;
126 if (s
->dvb
== NULL
|| !s
->dvb
->enabled
|| mdl
->bytesused
== 0)
129 /* We ignore mdl and buf readpos accounting here - it doesn't matter */
131 /* The likely case */
132 if (list_is_singular(&mdl
->buf_list
)) {
133 buf
= list_first_entry(&mdl
->buf_list
, struct cx18_buffer
,
136 dvb_dmx_swfilter(&s
->dvb
->demux
,
137 buf
->buf
, buf
->bytesused
);
141 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
142 if (buf
->bytesused
== 0)
144 dvb_dmx_swfilter(&s
->dvb
->demux
, buf
->buf
, buf
->bytesused
);
148 static void cx18_mdl_send_to_videobuf(struct cx18_stream
*s
,
149 struct cx18_mdl
*mdl
)
151 struct cx18_videobuf_buffer
*vb_buf
;
152 struct cx18_buffer
*buf
;
157 if (mdl
->bytesused
== 0)
160 /* Acquire a videobuf buffer, clone to and and release it */
161 spin_lock(&s
->vb_lock
);
162 if (list_empty(&s
->vb_capture
))
165 vb_buf
= list_first_entry(&s
->vb_capture
, struct cx18_videobuf_buffer
,
168 p
= videobuf_to_vmalloc(&vb_buf
->vb
);
172 offset
= vb_buf
->bytes_used
;
173 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
174 if (buf
->bytesused
== 0)
177 if ((offset
+ buf
->bytesused
) <= vb_buf
->vb
.bsize
) {
178 memcpy(p
+ offset
, buf
->buf
, buf
->bytesused
);
179 offset
+= buf
->bytesused
;
180 vb_buf
->bytes_used
+= buf
->bytesused
;
184 /* If we've filled the buffer as per the callers res then dispatch it */
185 if (vb_buf
->bytes_used
>= s
->vb_bytes_per_frame
) {
187 vb_buf
->bytes_used
= 0;
191 vb_buf
->vb
.ts
= ktime_get_ns();
192 list_del(&vb_buf
->vb
.queue
);
193 vb_buf
->vb
.state
= VIDEOBUF_DONE
;
194 wake_up(&vb_buf
->vb
.done
);
197 mod_timer(&s
->vb_timeout
, msecs_to_jiffies(2000) + jiffies
);
200 spin_unlock(&s
->vb_lock
);
203 static void cx18_mdl_send_to_alsa(struct cx18
*cx
, struct cx18_stream
*s
,
204 struct cx18_mdl
*mdl
)
206 struct cx18_buffer
*buf
;
208 if (mdl
->bytesused
== 0)
211 /* We ignore mdl and buf readpos accounting here - it doesn't matter */
213 /* The likely case */
214 if (list_is_singular(&mdl
->buf_list
)) {
215 buf
= list_first_entry(&mdl
->buf_list
, struct cx18_buffer
,
218 cx
->pcm_announce_callback(cx
->alsa
, buf
->buf
,
223 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
224 if (buf
->bytesused
== 0)
226 cx
->pcm_announce_callback(cx
->alsa
, buf
->buf
, buf
->bytesused
);
230 static void epu_dma_done(struct cx18
*cx
, struct cx18_in_work_order
*order
)
232 u32 handle
, mdl_ack_count
, id
;
233 struct cx18_mailbox
*mb
;
234 struct cx18_mdl_ack
*mdl_ack
;
235 struct cx18_stream
*s
;
236 struct cx18_mdl
*mdl
;
240 handle
= mb
->args
[0];
241 s
= cx18_handle_to_stream(cx
, handle
);
244 CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
246 (order
->flags
& CX18_F_EWO_MB_STALE_UPON_RECEIPT
) ?
247 "stale" : "good", mb
->request
);
251 mdl_ack_count
= mb
->args
[2];
252 mdl_ack
= order
->mdl_ack
;
253 for (i
= 0; i
< mdl_ack_count
; i
++, mdl_ack
++) {
256 * Simple integrity check for processing a stale (and possibly
257 * inconsistent mailbox): make sure the MDL id is in the
258 * valid range for the stream.
260 * We go through the trouble of dealing with stale mailboxes
261 * because most of the time, the mailbox data is still valid and
262 * unchanged (and in practice the firmware ping-pongs the
263 * two mdl_ack buffers so mdl_acks are not stale).
265 * There are occasions when we get a half changed mailbox,
266 * which this check catches for a handle & id mismatch. If the
267 * handle and id do correspond, the worst case is that we
268 * completely lost the old MDL, but pick up the new MDL
269 * early (but the new mdl_ack is guaranteed to be good in this
270 * case as the firmware wouldn't point us to a new mdl_ack until
273 * cx18_queue_get_mdl() will detect the lost MDLs
274 * and send them back to q_free for fw rotation eventually.
276 if ((order
->flags
& CX18_F_EWO_MB_STALE_UPON_RECEIPT
) &&
277 !(id
>= s
->mdl_base_idx
&&
278 id
< (s
->mdl_base_idx
+ s
->buffers
))) {
279 CX18_WARN("Fell behind! Ignoring stale mailbox with inconsistent data. Lost MDL for mailbox seq no %d\n",
283 mdl
= cx18_queue_get_mdl(s
, id
, mdl_ack
->data_used
);
285 CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s
->name
, id
);
287 CX18_WARN("Could not find MDL %d for stream %s\n",
292 CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
293 s
->name
, mdl
->bytesused
);
295 if (s
->type
== CX18_ENC_STREAM_TYPE_TS
) {
296 cx18_mdl_send_to_dvb(s
, mdl
);
297 cx18_enqueue(s
, mdl
, &s
->q_free
);
298 } else if (s
->type
== CX18_ENC_STREAM_TYPE_PCM
) {
299 /* Pass the data to cx18-alsa */
300 if (cx
->pcm_announce_callback
!= NULL
) {
301 cx18_mdl_send_to_alsa(cx
, s
, mdl
);
302 cx18_enqueue(s
, mdl
, &s
->q_free
);
304 cx18_enqueue(s
, mdl
, &s
->q_full
);
306 } else if (s
->type
== CX18_ENC_STREAM_TYPE_YUV
) {
307 cx18_mdl_send_to_videobuf(s
, mdl
);
308 cx18_enqueue(s
, mdl
, &s
->q_free
);
310 cx18_enqueue(s
, mdl
, &s
->q_full
);
311 if (s
->type
== CX18_ENC_STREAM_TYPE_IDX
)
312 cx18_stream_rotate_idx_mdls(cx
);
315 /* Put as many MDLs as possible back into fw use */
316 cx18_stream_load_fw_queue(s
);
318 wake_up(&cx
->dma_waitq
);
323 static void epu_debug(struct cx18
*cx
, struct cx18_in_work_order
*order
)
326 char *str
= order
->str
;
328 CX18_DEBUG_INFO("%x %s\n", order
->mb
.args
[0], str
);
329 p
= strchr(str
, '.');
330 if (!test_bit(CX18_F_I_LOADED_FW
, &cx
->i_flags
) && p
&& p
> str
)
331 CX18_INFO("FW version: %s\n", p
- 1);
334 static void epu_cmd(struct cx18
*cx
, struct cx18_in_work_order
*order
)
336 switch (order
->rpu
) {
339 switch (order
->mb
.cmd
) {
340 case CX18_EPU_DMA_DONE
:
341 epu_dma_done(cx
, order
);
344 epu_debug(cx
, order
);
347 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
354 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
363 void free_in_work_order(struct cx18
*cx
, struct cx18_in_work_order
*order
)
365 atomic_set(&order
->pending
, 0);
368 void cx18_in_work_handler(struct work_struct
*work
)
370 struct cx18_in_work_order
*order
=
371 container_of(work
, struct cx18_in_work_order
, work
);
372 struct cx18
*cx
= order
->cx
;
374 free_in_work_order(cx
, order
);
379 * Functions that run in an interrupt handling context
382 static void mb_ack_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
384 struct cx18_mailbox __iomem
*ack_mb
;
387 switch (order
->rpu
) {
389 ack_irq
= IRQ_EPU_TO_APU_ACK
;
390 ack_mb
= &cx
->scb
->apu2epu_mb
;
393 ack_irq
= IRQ_EPU_TO_CPU_ACK
;
394 ack_mb
= &cx
->scb
->cpu2epu_mb
;
397 CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
398 order
->rpu
, order
->mb
.cmd
);
402 req
= order
->mb
.request
;
403 /* Don't ack if the RPU has gotten impatient and timed us out */
404 if (req
!= cx18_readl(cx
, &ack_mb
->request
) ||
405 req
== cx18_readl(cx
, &ack_mb
->ack
)) {
406 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
407 rpu_str
[order
->rpu
], rpu_str
[order
->rpu
], req
);
408 order
->flags
|= CX18_F_EWO_MB_STALE_WHILE_PROC
;
411 cx18_writel(cx
, req
, &ack_mb
->ack
);
412 cx18_write_reg_expect(cx
, ack_irq
, SW2_INT_SET
, ack_irq
, ack_irq
);
416 static int epu_dma_done_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
418 u32 handle
, mdl_ack_offset
, mdl_ack_count
;
419 struct cx18_mailbox
*mb
;
423 handle
= mb
->args
[0];
424 mdl_ack_offset
= mb
->args
[1];
425 mdl_ack_count
= mb
->args
[2];
427 if (handle
== CX18_INVALID_TASK_HANDLE
||
428 mdl_ack_count
== 0 || mdl_ack_count
> CX18_MAX_MDL_ACKS
) {
429 if ((order
->flags
& CX18_F_EWO_MB_STALE
) == 0)
430 mb_ack_irq(cx
, order
);
434 for (i
= 0; i
< sizeof(struct cx18_mdl_ack
) * mdl_ack_count
; i
+= sizeof(u32
))
435 ((u32
*)order
->mdl_ack
)[i
/ sizeof(u32
)] =
436 cx18_readl(cx
, cx
->enc_mem
+ mdl_ack_offset
+ i
);
438 if ((order
->flags
& CX18_F_EWO_MB_STALE
) == 0)
439 mb_ack_irq(cx
, order
);
444 int epu_debug_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
447 char *str
= order
->str
;
450 str_offset
= order
->mb
.args
[1];
452 cx18_setup_page(cx
, str_offset
);
453 cx18_memcpy_fromio(cx
, str
, cx
->enc_mem
+ str_offset
, 252);
455 cx18_setup_page(cx
, SCB_OFFSET
);
458 if ((order
->flags
& CX18_F_EWO_MB_STALE
) == 0)
459 mb_ack_irq(cx
, order
);
461 return str_offset
? 1 : 0;
465 int epu_cmd_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
469 switch (order
->rpu
) {
472 switch (order
->mb
.cmd
) {
473 case CX18_EPU_DMA_DONE
:
474 ret
= epu_dma_done_irq(cx
, order
);
477 ret
= epu_debug_irq(cx
, order
);
480 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
487 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
497 struct cx18_in_work_order
*alloc_in_work_order_irq(struct cx18
*cx
)
500 struct cx18_in_work_order
*order
= NULL
;
502 for (i
= 0; i
< CX18_MAX_IN_WORK_ORDERS
; i
++) {
504 * We only need "pending" atomic to inspect its contents,
505 * and need not do a check and set because:
506 * 1. Any work handler thread only clears "pending" and only
507 * on one, particular work order at a time, per handler thread.
508 * 2. "pending" is only set here, and we're serialized because
509 * we're called in an IRQ handler context.
511 if (atomic_read(&cx
->in_work_order
[i
].pending
) == 0) {
512 order
= &cx
->in_work_order
[i
];
513 atomic_set(&order
->pending
, 1);
520 void cx18_api_epu_cmd_irq(struct cx18
*cx
, int rpu
)
522 struct cx18_mailbox __iomem
*mb
;
523 struct cx18_mailbox
*order_mb
;
524 struct cx18_in_work_order
*order
;
530 mb
= &cx
->scb
->cpu2epu_mb
;
533 mb
= &cx
->scb
->apu2epu_mb
;
539 order
= alloc_in_work_order_irq(cx
);
541 CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
547 order_mb
= &order
->mb
;
549 /* mb->cmd and mb->args[0] through mb->args[2] */
550 for (i
= 0; i
< 4; i
++)
551 (&order_mb
->cmd
)[i
] = cx18_readl(cx
, &mb
->cmd
+ i
);
553 /* mb->request and mb->ack. N.B. we want to read mb->ack last */
554 for (i
= 0; i
< 2; i
++)
555 (&order_mb
->request
)[i
] = cx18_readl(cx
, &mb
->request
+ i
);
557 if (order_mb
->request
== order_mb
->ack
) {
558 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
559 rpu_str
[rpu
], rpu_str
[rpu
], order_mb
->request
);
560 if (cx18_debug
& CX18_DBGFLG_WARN
)
561 dump_mb(cx
, order_mb
, "incoming");
562 order
->flags
= CX18_F_EWO_MB_STALE_UPON_RECEIPT
;
566 * Individual EPU command processing is responsible for ack-ing
567 * a non-stale mailbox as soon as possible
569 submit
= epu_cmd_irq(cx
, order
);
571 queue_work(cx
->in_work_queue
, &order
->work
);
577 * Functions called from a non-interrupt, non work_queue context
580 static int cx18_api_call(struct cx18
*cx
, u32 cmd
, int args
, u32 data
[])
582 const struct cx18_api_info
*info
= find_api_info(cmd
);
583 u32 irq
, req
, ack
, err
;
584 struct cx18_mailbox __iomem
*mb
;
585 wait_queue_head_t
*waitq
;
586 struct mutex
*mb_lock
;
587 unsigned long int t0
, timeout
, ret
;
589 char argstr
[MAX_MB_ARGUMENTS
*11+1];
593 CX18_WARN("unknown cmd %x\n", cmd
);
597 if (cx18_debug
& CX18_DBGFLG_API
) { /* only call u32arr2hex if needed */
598 if (cmd
== CX18_CPU_DE_SET_MDL
) {
599 if (cx18_debug
& CX18_DBGFLG_HIGHVOL
)
600 CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n",
602 u32arr2hex(data
, args
, argstr
));
604 CX18_DEBUG_API("%s\tcmd %#010x args%s\n",
606 u32arr2hex(data
, args
, argstr
));
611 waitq
= &cx
->mb_apu_waitq
;
612 mb_lock
= &cx
->epu2apu_mb_lock
;
613 irq
= IRQ_EPU_TO_APU
;
614 mb
= &cx
->scb
->epu2apu_mb
;
617 waitq
= &cx
->mb_cpu_waitq
;
618 mb_lock
= &cx
->epu2cpu_mb_lock
;
619 irq
= IRQ_EPU_TO_CPU
;
620 mb
= &cx
->scb
->epu2cpu_mb
;
623 CX18_WARN("Unknown RPU (%d) for API call\n", info
->rpu
);
629 * Wait for an in-use mailbox to complete
631 * If the XPU is responding with Ack's, the mailbox shouldn't be in
632 * a busy state, since we serialize access to it on our end.
634 * If the wait for ack after sending a previous command was interrupted
635 * by a signal, we may get here and find a busy mailbox. After waiting,
636 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
638 req
= cx18_readl(cx
, &mb
->request
);
639 timeout
= msecs_to_jiffies(10);
640 ret
= wait_event_timeout(*waitq
,
641 (ack
= cx18_readl(cx
, &mb
->ack
)) == req
,
644 /* waited long enough, make the mbox "not busy" from our end */
645 cx18_writel(cx
, req
, &mb
->ack
);
646 CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
648 } else if (ret
!= timeout
)
649 CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
650 jiffies_to_msecs(timeout
-ret
));
652 /* Build the outgoing mailbox */
653 req
= ((req
& 0xfffffffe) == 0xfffffffe) ? 1 : req
+ 1;
655 cx18_writel(cx
, cmd
, &mb
->cmd
);
656 for (i
= 0; i
< args
; i
++)
657 cx18_writel(cx
, data
[i
], &mb
->args
[i
]);
658 cx18_writel(cx
, 0, &mb
->error
);
659 cx18_writel(cx
, req
, &mb
->request
);
660 cx18_writel(cx
, req
- 1, &mb
->ack
); /* ensure ack & req are distinct */
663 * Notify the XPU and wait for it to send an Ack back
665 timeout
= msecs_to_jiffies((info
->flags
& API_FAST
) ? 10 : 20);
667 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
670 /* So we don't miss the wakeup, prepare to wait before notifying fw */
671 prepare_to_wait(waitq
, &w
, TASK_UNINTERRUPTIBLE
);
672 cx18_write_reg_expect(cx
, irq
, SW1_INT_SET
, irq
, irq
);
675 ack
= cx18_readl(cx
, &mb
->ack
);
677 schedule_timeout(timeout
);
679 ack
= cx18_readl(cx
, &mb
->ack
);
684 finish_wait(waitq
, &w
);
687 mutex_unlock(mb_lock
);
688 if (ret
>= timeout
) {
690 CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
691 info
->name
, jiffies_to_msecs(ret
));
693 CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU. only waited %d msecs on req %u but awakened with unmatched ack %u\n",
695 jiffies_to_msecs(ret
),
702 CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
703 info
->name
, jiffies_to_msecs(ret
));
705 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
706 jiffies_to_msecs(ret
), info
->name
);
708 /* Collect data returned by the XPU */
709 for (i
= 0; i
< MAX_MB_ARGUMENTS
; i
++)
710 data
[i
] = cx18_readl(cx
, &mb
->args
[i
]);
711 err
= cx18_readl(cx
, &mb
->error
);
712 mutex_unlock(mb_lock
);
715 * Wait for XPU to perform extra actions for the caller in some cases.
716 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
717 * back in a burst shortly thereafter
719 if (info
->flags
& API_SLOW
)
720 cx18_msleep_timeout(300, 0);
723 CX18_DEBUG_API("mailbox error %08x for command %s\n", err
,
725 return err
? -EIO
: 0;
728 int cx18_api(struct cx18
*cx
, u32 cmd
, int args
, u32 data
[])
730 return cx18_api_call(cx
, cmd
, args
, data
);
733 static int cx18_set_filter_param(struct cx18_stream
*s
)
735 struct cx18
*cx
= s
->cx
;
739 mode
= (cx
->filter_mode
& 1) ? 2 : (cx
->spatial_strength
? 1 : 0);
740 ret
= cx18_vapi(cx
, CX18_CPU_SET_FILTER_PARAM
, 4,
741 s
->handle
, 1, mode
, cx
->spatial_strength
);
742 mode
= (cx
->filter_mode
& 2) ? 2 : (cx
->temporal_strength
? 1 : 0);
743 ret
= ret
? ret
: cx18_vapi(cx
, CX18_CPU_SET_FILTER_PARAM
, 4,
744 s
->handle
, 0, mode
, cx
->temporal_strength
);
745 ret
= ret
? ret
: cx18_vapi(cx
, CX18_CPU_SET_FILTER_PARAM
, 4,
746 s
->handle
, 2, cx
->filter_mode
>> 2, 0);
750 int cx18_api_func(void *priv
, u32 cmd
, int in
, int out
,
751 u32 data
[CX2341X_MBOX_MAX_DATA
])
753 struct cx18_stream
*s
= priv
;
754 struct cx18
*cx
= s
->cx
;
757 case CX2341X_ENC_SET_OUTPUT_PORT
:
759 case CX2341X_ENC_SET_FRAME_RATE
:
760 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_IN
, 6,
761 s
->handle
, 0, 0, 0, 0, data
[0]);
762 case CX2341X_ENC_SET_FRAME_SIZE
:
763 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_RESOLUTION
, 3,
764 s
->handle
, data
[1], data
[0]);
765 case CX2341X_ENC_SET_STREAM_TYPE
:
766 return cx18_vapi(cx
, CX18_CPU_SET_STREAM_OUTPUT_TYPE
, 2,
768 case CX2341X_ENC_SET_ASPECT_RATIO
:
769 return cx18_vapi(cx
, CX18_CPU_SET_ASPECT_RATIO
, 2,
772 case CX2341X_ENC_SET_GOP_PROPERTIES
:
773 return cx18_vapi(cx
, CX18_CPU_SET_GOP_STRUCTURE
, 3,
774 s
->handle
, data
[0], data
[1]);
775 case CX2341X_ENC_SET_GOP_CLOSURE
:
777 case CX2341X_ENC_SET_AUDIO_PROPERTIES
:
778 return cx18_vapi(cx
, CX18_CPU_SET_AUDIO_PARAMETERS
, 2,
780 case CX2341X_ENC_MUTE_AUDIO
:
781 return cx18_vapi(cx
, CX18_CPU_SET_AUDIO_MUTE
, 2,
783 case CX2341X_ENC_SET_BIT_RATE
:
784 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_RATE
, 5,
785 s
->handle
, data
[0], data
[1], data
[2], data
[3]);
786 case CX2341X_ENC_MUTE_VIDEO
:
787 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_MUTE
, 2,
789 case CX2341X_ENC_SET_FRAME_DROP_RATE
:
790 return cx18_vapi(cx
, CX18_CPU_SET_SKIP_INPUT_FRAME
, 2,
792 case CX2341X_ENC_MISC
:
793 return cx18_vapi(cx
, CX18_CPU_SET_MISC_PARAMETERS
, 4,
794 s
->handle
, data
[0], data
[1], data
[2]);
795 case CX2341X_ENC_SET_DNR_FILTER_MODE
:
796 cx
->filter_mode
= (data
[0] & 3) | (data
[1] << 2);
797 return cx18_set_filter_param(s
);
798 case CX2341X_ENC_SET_DNR_FILTER_PROPS
:
799 cx
->spatial_strength
= data
[0];
800 cx
->temporal_strength
= data
[1];
801 return cx18_set_filter_param(s
);
802 case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE
:
803 return cx18_vapi(cx
, CX18_CPU_SET_SPATIAL_FILTER_TYPE
, 3,
804 s
->handle
, data
[0], data
[1]);
805 case CX2341X_ENC_SET_CORING_LEVELS
:
806 return cx18_vapi(cx
, CX18_CPU_SET_MEDIAN_CORING
, 5,
807 s
->handle
, data
[0], data
[1], data
[2], data
[3]);
809 CX18_WARN("Unknown cmd %x\n", cmd
);
813 int cx18_vapi_result(struct cx18
*cx
, u32 data
[MAX_MB_ARGUMENTS
],
814 u32 cmd
, int args
, ...)
820 for (i
= 0; i
< args
; i
++)
821 data
[i
] = va_arg(ap
, u32
);
823 return cx18_api(cx
, cmd
, args
, data
);
826 int cx18_vapi(struct cx18
*cx
, u32 cmd
, int args
, ...)
828 u32 data
[MAX_MB_ARGUMENTS
];
833 CX18_ERR("cx == NULL (cmd=%x)\n", cmd
);
836 if (args
> MAX_MB_ARGUMENTS
) {
837 CX18_ERR("args too big (cmd=%x)\n", cmd
);
838 args
= MAX_MB_ARGUMENTS
;
841 for (i
= 0; i
< args
; i
++)
842 data
[i
] = va_arg(ap
, u32
);
844 return cx18_api(cx
, cmd
, args
, data
);