2 * cx18 mailbox functions
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
20 #include "cx18-driver.h"
24 #include "cx18-mailbox.h"
25 #include "cx18-queue.h"
26 #include "cx18-streams.h"
27 #include "cx18-alsa-pcm.h" /* FIXME make configurable */
29 static const char *rpu_str
[] = { "APU", "CPU", "EPU", "HPU" };
31 #define API_FAST (1 << 2) /* Short timeout */
32 #define API_SLOW (1 << 3) /* Additional 300ms timeout */
34 struct cx18_api_info
{
36 u8 flags
; /* Flags, see above */
37 u8 rpu
; /* Processing unit */
38 const char *name
; /* The name of the command */
41 #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
43 static const struct cx18_api_info api_info
[] = {
44 /* MPEG encoder API */
45 API_ENTRY(CPU
, CX18_CPU_SET_CHANNEL_TYPE
, 0),
46 API_ENTRY(CPU
, CX18_EPU_DEBUG
, 0),
47 API_ENTRY(CPU
, CX18_CREATE_TASK
, 0),
48 API_ENTRY(CPU
, CX18_DESTROY_TASK
, 0),
49 API_ENTRY(CPU
, CX18_CPU_CAPTURE_START
, API_SLOW
),
50 API_ENTRY(CPU
, CX18_CPU_CAPTURE_STOP
, API_SLOW
),
51 API_ENTRY(CPU
, CX18_CPU_CAPTURE_PAUSE
, 0),
52 API_ENTRY(CPU
, CX18_CPU_CAPTURE_RESUME
, 0),
53 API_ENTRY(CPU
, CX18_CPU_SET_CHANNEL_TYPE
, 0),
54 API_ENTRY(CPU
, CX18_CPU_SET_STREAM_OUTPUT_TYPE
, 0),
55 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_IN
, 0),
56 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_RATE
, 0),
57 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_RESOLUTION
, 0),
58 API_ENTRY(CPU
, CX18_CPU_SET_FILTER_PARAM
, 0),
59 API_ENTRY(CPU
, CX18_CPU_SET_SPATIAL_FILTER_TYPE
, 0),
60 API_ENTRY(CPU
, CX18_CPU_SET_MEDIAN_CORING
, 0),
61 API_ENTRY(CPU
, CX18_CPU_SET_INDEXTABLE
, 0),
62 API_ENTRY(CPU
, CX18_CPU_SET_AUDIO_PARAMETERS
, 0),
63 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_MUTE
, 0),
64 API_ENTRY(CPU
, CX18_CPU_SET_AUDIO_MUTE
, 0),
65 API_ENTRY(CPU
, CX18_CPU_SET_MISC_PARAMETERS
, 0),
66 API_ENTRY(CPU
, CX18_CPU_SET_RAW_VBI_PARAM
, API_SLOW
),
67 API_ENTRY(CPU
, CX18_CPU_SET_CAPTURE_LINE_NO
, 0),
68 API_ENTRY(CPU
, CX18_CPU_SET_COPYRIGHT
, 0),
69 API_ENTRY(CPU
, CX18_CPU_SET_AUDIO_PID
, 0),
70 API_ENTRY(CPU
, CX18_CPU_SET_VIDEO_PID
, 0),
71 API_ENTRY(CPU
, CX18_CPU_SET_VER_CROP_LINE
, 0),
72 API_ENTRY(CPU
, CX18_CPU_SET_GOP_STRUCTURE
, 0),
73 API_ENTRY(CPU
, CX18_CPU_SET_SCENE_CHANGE_DETECTION
, 0),
74 API_ENTRY(CPU
, CX18_CPU_SET_ASPECT_RATIO
, 0),
75 API_ENTRY(CPU
, CX18_CPU_SET_SKIP_INPUT_FRAME
, 0),
76 API_ENTRY(CPU
, CX18_CPU_SET_SLICED_VBI_PARAM
, 0),
77 API_ENTRY(CPU
, CX18_CPU_SET_USERDATA_PLACE_HOLDER
, 0),
78 API_ENTRY(CPU
, CX18_CPU_GET_ENC_PTS
, 0),
79 API_ENTRY(CPU
, CX18_CPU_SET_VFC_PARAM
, 0),
80 API_ENTRY(CPU
, CX18_CPU_DE_SET_MDL_ACK
, 0),
81 API_ENTRY(CPU
, CX18_CPU_DE_SET_MDL
, API_FAST
),
82 API_ENTRY(CPU
, CX18_CPU_DE_RELEASE_MDL
, API_SLOW
),
83 API_ENTRY(APU
, CX18_APU_START
, 0),
84 API_ENTRY(APU
, CX18_APU_STOP
, 0),
85 API_ENTRY(APU
, CX18_APU_RESETAI
, 0),
86 API_ENTRY(CPU
, CX18_CPU_DEBUG_PEEK32
, 0),
90 static const struct cx18_api_info
*find_api_info(u32 cmd
)
94 for (i
= 0; api_info
[i
].cmd
; i
++)
95 if (api_info
[i
].cmd
== cmd
)
100 /* Call with buf of n*11+1 bytes */
101 static char *u32arr2hex(u32 data
[], int n
, char *buf
)
106 for (i
= 0, p
= buf
; i
< n
; i
++, p
+= 11) {
107 /* kernel snprintf() appends '\0' always */
108 snprintf(p
, 12, " %#010x", data
[i
]);
114 static void dump_mb(struct cx18
*cx
, struct cx18_mailbox
*mb
, char *name
)
116 char argstr
[MAX_MB_ARGUMENTS
*11+1];
118 if (!(cx18_debug
& CX18_DBGFLG_API
))
121 CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
122 name
, mb
->request
, mb
->ack
, mb
->cmd
, mb
->error
,
123 u32arr2hex(mb
->args
, MAX_MB_ARGUMENTS
, argstr
));
128 * Functions that run in a work_queue work handling context
131 static void cx18_mdl_send_to_dvb(struct cx18_stream
*s
, struct cx18_mdl
*mdl
)
133 struct cx18_buffer
*buf
;
135 if (s
->dvb
== NULL
|| !s
->dvb
->enabled
|| mdl
->bytesused
== 0)
138 /* We ignore mdl and buf readpos accounting here - it doesn't matter */
140 /* The likely case */
141 if (list_is_singular(&mdl
->buf_list
)) {
142 buf
= list_first_entry(&mdl
->buf_list
, struct cx18_buffer
,
145 dvb_dmx_swfilter(&s
->dvb
->demux
,
146 buf
->buf
, buf
->bytesused
);
150 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
151 if (buf
->bytesused
== 0)
153 dvb_dmx_swfilter(&s
->dvb
->demux
, buf
->buf
, buf
->bytesused
);
157 static void cx18_mdl_send_to_videobuf(struct cx18_stream
*s
,
158 struct cx18_mdl
*mdl
)
160 struct cx18_videobuf_buffer
*vb_buf
;
161 struct cx18_buffer
*buf
;
166 if (mdl
->bytesused
== 0)
169 /* Acquire a videobuf buffer, clone to and and release it */
170 spin_lock(&s
->vb_lock
);
171 if (list_empty(&s
->vb_capture
))
174 vb_buf
= list_first_entry(&s
->vb_capture
, struct cx18_videobuf_buffer
,
177 p
= videobuf_to_vmalloc(&vb_buf
->vb
);
181 offset
= vb_buf
->bytes_used
;
182 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
183 if (buf
->bytesused
== 0)
186 if ((offset
+ buf
->bytesused
) <= vb_buf
->vb
.bsize
) {
187 memcpy(p
+ offset
, buf
->buf
, buf
->bytesused
);
188 offset
+= buf
->bytesused
;
189 vb_buf
->bytes_used
+= buf
->bytesused
;
193 /* If we've filled the buffer as per the callers res then dispatch it */
194 if (vb_buf
->bytes_used
>= s
->vb_bytes_per_frame
) {
196 vb_buf
->bytes_used
= 0;
200 v4l2_get_timestamp(&vb_buf
->vb
.ts
);
201 list_del(&vb_buf
->vb
.queue
);
202 vb_buf
->vb
.state
= VIDEOBUF_DONE
;
203 wake_up(&vb_buf
->vb
.done
);
206 mod_timer(&s
->vb_timeout
, msecs_to_jiffies(2000) + jiffies
);
209 spin_unlock(&s
->vb_lock
);
212 static void cx18_mdl_send_to_alsa(struct cx18
*cx
, struct cx18_stream
*s
,
213 struct cx18_mdl
*mdl
)
215 struct cx18_buffer
*buf
;
217 if (mdl
->bytesused
== 0)
220 /* We ignore mdl and buf readpos accounting here - it doesn't matter */
222 /* The likely case */
223 if (list_is_singular(&mdl
->buf_list
)) {
224 buf
= list_first_entry(&mdl
->buf_list
, struct cx18_buffer
,
227 cx
->pcm_announce_callback(cx
->alsa
, buf
->buf
,
232 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
233 if (buf
->bytesused
== 0)
235 cx
->pcm_announce_callback(cx
->alsa
, buf
->buf
, buf
->bytesused
);
239 static void epu_dma_done(struct cx18
*cx
, struct cx18_in_work_order
*order
)
241 u32 handle
, mdl_ack_count
, id
;
242 struct cx18_mailbox
*mb
;
243 struct cx18_mdl_ack
*mdl_ack
;
244 struct cx18_stream
*s
;
245 struct cx18_mdl
*mdl
;
249 handle
= mb
->args
[0];
250 s
= cx18_handle_to_stream(cx
, handle
);
253 CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
255 (order
->flags
& CX18_F_EWO_MB_STALE_UPON_RECEIPT
) ?
256 "stale" : "good", mb
->request
);
260 mdl_ack_count
= mb
->args
[2];
261 mdl_ack
= order
->mdl_ack
;
262 for (i
= 0; i
< mdl_ack_count
; i
++, mdl_ack
++) {
265 * Simple integrity check for processing a stale (and possibly
266 * inconsistent mailbox): make sure the MDL id is in the
267 * valid range for the stream.
269 * We go through the trouble of dealing with stale mailboxes
270 * because most of the time, the mailbox data is still valid and
271 * unchanged (and in practice the firmware ping-pongs the
272 * two mdl_ack buffers so mdl_acks are not stale).
274 * There are occasions when we get a half changed mailbox,
275 * which this check catches for a handle & id mismatch. If the
276 * handle and id do correspond, the worst case is that we
277 * completely lost the old MDL, but pick up the new MDL
278 * early (but the new mdl_ack is guaranteed to be good in this
279 * case as the firmware wouldn't point us to a new mdl_ack until
282 * cx18_queue_get_mdl() will detect the lost MDLs
283 * and send them back to q_free for fw rotation eventually.
285 if ((order
->flags
& CX18_F_EWO_MB_STALE_UPON_RECEIPT
) &&
286 !(id
>= s
->mdl_base_idx
&&
287 id
< (s
->mdl_base_idx
+ s
->buffers
))) {
288 CX18_WARN("Fell behind! Ignoring stale mailbox with inconsistent data. Lost MDL for mailbox seq no %d\n",
292 mdl
= cx18_queue_get_mdl(s
, id
, mdl_ack
->data_used
);
294 CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s
->name
, id
);
296 CX18_WARN("Could not find MDL %d for stream %s\n",
301 CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
302 s
->name
, mdl
->bytesused
);
304 if (s
->type
== CX18_ENC_STREAM_TYPE_TS
) {
305 cx18_mdl_send_to_dvb(s
, mdl
);
306 cx18_enqueue(s
, mdl
, &s
->q_free
);
307 } else if (s
->type
== CX18_ENC_STREAM_TYPE_PCM
) {
308 /* Pass the data to cx18-alsa */
309 if (cx
->pcm_announce_callback
!= NULL
) {
310 cx18_mdl_send_to_alsa(cx
, s
, mdl
);
311 cx18_enqueue(s
, mdl
, &s
->q_free
);
313 cx18_enqueue(s
, mdl
, &s
->q_full
);
315 } else if (s
->type
== CX18_ENC_STREAM_TYPE_YUV
) {
316 cx18_mdl_send_to_videobuf(s
, mdl
);
317 cx18_enqueue(s
, mdl
, &s
->q_free
);
319 cx18_enqueue(s
, mdl
, &s
->q_full
);
320 if (s
->type
== CX18_ENC_STREAM_TYPE_IDX
)
321 cx18_stream_rotate_idx_mdls(cx
);
324 /* Put as many MDLs as possible back into fw use */
325 cx18_stream_load_fw_queue(s
);
327 wake_up(&cx
->dma_waitq
);
332 static void epu_debug(struct cx18
*cx
, struct cx18_in_work_order
*order
)
335 char *str
= order
->str
;
337 CX18_DEBUG_INFO("%x %s\n", order
->mb
.args
[0], str
);
338 p
= strchr(str
, '.');
339 if (!test_bit(CX18_F_I_LOADED_FW
, &cx
->i_flags
) && p
&& p
> str
)
340 CX18_INFO("FW version: %s\n", p
- 1);
343 static void epu_cmd(struct cx18
*cx
, struct cx18_in_work_order
*order
)
345 switch (order
->rpu
) {
348 switch (order
->mb
.cmd
) {
349 case CX18_EPU_DMA_DONE
:
350 epu_dma_done(cx
, order
);
353 epu_debug(cx
, order
);
356 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
363 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
372 void free_in_work_order(struct cx18
*cx
, struct cx18_in_work_order
*order
)
374 atomic_set(&order
->pending
, 0);
377 void cx18_in_work_handler(struct work_struct
*work
)
379 struct cx18_in_work_order
*order
=
380 container_of(work
, struct cx18_in_work_order
, work
);
381 struct cx18
*cx
= order
->cx
;
383 free_in_work_order(cx
, order
);
388 * Functions that run in an interrupt handling context
391 static void mb_ack_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
393 struct cx18_mailbox __iomem
*ack_mb
;
396 switch (order
->rpu
) {
398 ack_irq
= IRQ_EPU_TO_APU_ACK
;
399 ack_mb
= &cx
->scb
->apu2epu_mb
;
402 ack_irq
= IRQ_EPU_TO_CPU_ACK
;
403 ack_mb
= &cx
->scb
->cpu2epu_mb
;
406 CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
407 order
->rpu
, order
->mb
.cmd
);
411 req
= order
->mb
.request
;
412 /* Don't ack if the RPU has gotten impatient and timed us out */
413 if (req
!= cx18_readl(cx
, &ack_mb
->request
) ||
414 req
== cx18_readl(cx
, &ack_mb
->ack
)) {
415 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
416 rpu_str
[order
->rpu
], rpu_str
[order
->rpu
], req
);
417 order
->flags
|= CX18_F_EWO_MB_STALE_WHILE_PROC
;
420 cx18_writel(cx
, req
, &ack_mb
->ack
);
421 cx18_write_reg_expect(cx
, ack_irq
, SW2_INT_SET
, ack_irq
, ack_irq
);
425 static int epu_dma_done_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
427 u32 handle
, mdl_ack_offset
, mdl_ack_count
;
428 struct cx18_mailbox
*mb
;
432 handle
= mb
->args
[0];
433 mdl_ack_offset
= mb
->args
[1];
434 mdl_ack_count
= mb
->args
[2];
436 if (handle
== CX18_INVALID_TASK_HANDLE
||
437 mdl_ack_count
== 0 || mdl_ack_count
> CX18_MAX_MDL_ACKS
) {
438 if ((order
->flags
& CX18_F_EWO_MB_STALE
) == 0)
439 mb_ack_irq(cx
, order
);
443 for (i
= 0; i
< sizeof(struct cx18_mdl_ack
) * mdl_ack_count
; i
+= sizeof(u32
))
444 ((u32
*)order
->mdl_ack
)[i
/ sizeof(u32
)] =
445 cx18_readl(cx
, cx
->enc_mem
+ mdl_ack_offset
+ i
);
447 if ((order
->flags
& CX18_F_EWO_MB_STALE
) == 0)
448 mb_ack_irq(cx
, order
);
453 int epu_debug_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
456 char *str
= order
->str
;
459 str_offset
= order
->mb
.args
[1];
461 cx18_setup_page(cx
, str_offset
);
462 cx18_memcpy_fromio(cx
, str
, cx
->enc_mem
+ str_offset
, 252);
464 cx18_setup_page(cx
, SCB_OFFSET
);
467 if ((order
->flags
& CX18_F_EWO_MB_STALE
) == 0)
468 mb_ack_irq(cx
, order
);
470 return str_offset
? 1 : 0;
474 int epu_cmd_irq(struct cx18
*cx
, struct cx18_in_work_order
*order
)
478 switch (order
->rpu
) {
481 switch (order
->mb
.cmd
) {
482 case CX18_EPU_DMA_DONE
:
483 ret
= epu_dma_done_irq(cx
, order
);
486 ret
= epu_debug_irq(cx
, order
);
489 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
496 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
506 struct cx18_in_work_order
*alloc_in_work_order_irq(struct cx18
*cx
)
509 struct cx18_in_work_order
*order
= NULL
;
511 for (i
= 0; i
< CX18_MAX_IN_WORK_ORDERS
; i
++) {
513 * We only need "pending" atomic to inspect its contents,
514 * and need not do a check and set because:
515 * 1. Any work handler thread only clears "pending" and only
516 * on one, particular work order at a time, per handler thread.
517 * 2. "pending" is only set here, and we're serialized because
518 * we're called in an IRQ handler context.
520 if (atomic_read(&cx
->in_work_order
[i
].pending
) == 0) {
521 order
= &cx
->in_work_order
[i
];
522 atomic_set(&order
->pending
, 1);
529 void cx18_api_epu_cmd_irq(struct cx18
*cx
, int rpu
)
531 struct cx18_mailbox __iomem
*mb
;
532 struct cx18_mailbox
*order_mb
;
533 struct cx18_in_work_order
*order
;
539 mb
= &cx
->scb
->cpu2epu_mb
;
542 mb
= &cx
->scb
->apu2epu_mb
;
548 order
= alloc_in_work_order_irq(cx
);
550 CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
556 order_mb
= &order
->mb
;
558 /* mb->cmd and mb->args[0] through mb->args[2] */
559 for (i
= 0; i
< 4; i
++)
560 (&order_mb
->cmd
)[i
] = cx18_readl(cx
, &mb
->cmd
+ i
);
562 /* mb->request and mb->ack. N.B. we want to read mb->ack last */
563 for (i
= 0; i
< 2; i
++)
564 (&order_mb
->request
)[i
] = cx18_readl(cx
, &mb
->request
+ i
);
566 if (order_mb
->request
== order_mb
->ack
) {
567 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
568 rpu_str
[rpu
], rpu_str
[rpu
], order_mb
->request
);
569 if (cx18_debug
& CX18_DBGFLG_WARN
)
570 dump_mb(cx
, order_mb
, "incoming");
571 order
->flags
= CX18_F_EWO_MB_STALE_UPON_RECEIPT
;
575 * Individual EPU command processing is responsible for ack-ing
576 * a non-stale mailbox as soon as possible
578 submit
= epu_cmd_irq(cx
, order
);
580 queue_work(cx
->in_work_queue
, &order
->work
);
586 * Functions called from a non-interrupt, non work_queue context
589 static int cx18_api_call(struct cx18
*cx
, u32 cmd
, int args
, u32 data
[])
591 const struct cx18_api_info
*info
= find_api_info(cmd
);
592 u32 irq
, req
, ack
, err
;
593 struct cx18_mailbox __iomem
*mb
;
594 wait_queue_head_t
*waitq
;
595 struct mutex
*mb_lock
;
596 unsigned long int t0
, timeout
, ret
;
598 char argstr
[MAX_MB_ARGUMENTS
*11+1];
602 CX18_WARN("unknown cmd %x\n", cmd
);
606 if (cx18_debug
& CX18_DBGFLG_API
) { /* only call u32arr2hex if needed */
607 if (cmd
== CX18_CPU_DE_SET_MDL
) {
608 if (cx18_debug
& CX18_DBGFLG_HIGHVOL
)
609 CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n",
611 u32arr2hex(data
, args
, argstr
));
613 CX18_DEBUG_API("%s\tcmd %#010x args%s\n",
615 u32arr2hex(data
, args
, argstr
));
620 waitq
= &cx
->mb_apu_waitq
;
621 mb_lock
= &cx
->epu2apu_mb_lock
;
622 irq
= IRQ_EPU_TO_APU
;
623 mb
= &cx
->scb
->epu2apu_mb
;
626 waitq
= &cx
->mb_cpu_waitq
;
627 mb_lock
= &cx
->epu2cpu_mb_lock
;
628 irq
= IRQ_EPU_TO_CPU
;
629 mb
= &cx
->scb
->epu2cpu_mb
;
632 CX18_WARN("Unknown RPU (%d) for API call\n", info
->rpu
);
638 * Wait for an in-use mailbox to complete
640 * If the XPU is responding with Ack's, the mailbox shouldn't be in
641 * a busy state, since we serialize access to it on our end.
643 * If the wait for ack after sending a previous command was interrupted
644 * by a signal, we may get here and find a busy mailbox. After waiting,
645 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
647 req
= cx18_readl(cx
, &mb
->request
);
648 timeout
= msecs_to_jiffies(10);
649 ret
= wait_event_timeout(*waitq
,
650 (ack
= cx18_readl(cx
, &mb
->ack
)) == req
,
653 /* waited long enough, make the mbox "not busy" from our end */
654 cx18_writel(cx
, req
, &mb
->ack
);
655 CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
657 } else if (ret
!= timeout
)
658 CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
659 jiffies_to_msecs(timeout
-ret
));
661 /* Build the outgoing mailbox */
662 req
= ((req
& 0xfffffffe) == 0xfffffffe) ? 1 : req
+ 1;
664 cx18_writel(cx
, cmd
, &mb
->cmd
);
665 for (i
= 0; i
< args
; i
++)
666 cx18_writel(cx
, data
[i
], &mb
->args
[i
]);
667 cx18_writel(cx
, 0, &mb
->error
);
668 cx18_writel(cx
, req
, &mb
->request
);
669 cx18_writel(cx
, req
- 1, &mb
->ack
); /* ensure ack & req are distinct */
672 * Notify the XPU and wait for it to send an Ack back
674 timeout
= msecs_to_jiffies((info
->flags
& API_FAST
) ? 10 : 20);
676 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
679 /* So we don't miss the wakeup, prepare to wait before notifying fw */
680 prepare_to_wait(waitq
, &w
, TASK_UNINTERRUPTIBLE
);
681 cx18_write_reg_expect(cx
, irq
, SW1_INT_SET
, irq
, irq
);
684 ack
= cx18_readl(cx
, &mb
->ack
);
686 schedule_timeout(timeout
);
688 ack
= cx18_readl(cx
, &mb
->ack
);
693 finish_wait(waitq
, &w
);
696 mutex_unlock(mb_lock
);
697 if (ret
>= timeout
) {
699 CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
700 info
->name
, jiffies_to_msecs(ret
));
702 CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU. only waited %d msecs on req %u but awakened with unmatched ack %u\n",
704 jiffies_to_msecs(ret
),
711 CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
712 info
->name
, jiffies_to_msecs(ret
));
714 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
715 jiffies_to_msecs(ret
), info
->name
);
717 /* Collect data returned by the XPU */
718 for (i
= 0; i
< MAX_MB_ARGUMENTS
; i
++)
719 data
[i
] = cx18_readl(cx
, &mb
->args
[i
]);
720 err
= cx18_readl(cx
, &mb
->error
);
721 mutex_unlock(mb_lock
);
724 * Wait for XPU to perform extra actions for the caller in some cases.
725 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
726 * back in a burst shortly thereafter
728 if (info
->flags
& API_SLOW
)
729 cx18_msleep_timeout(300, 0);
732 CX18_DEBUG_API("mailbox error %08x for command %s\n", err
,
734 return err
? -EIO
: 0;
737 int cx18_api(struct cx18
*cx
, u32 cmd
, int args
, u32 data
[])
739 return cx18_api_call(cx
, cmd
, args
, data
);
742 static int cx18_set_filter_param(struct cx18_stream
*s
)
744 struct cx18
*cx
= s
->cx
;
748 mode
= (cx
->filter_mode
& 1) ? 2 : (cx
->spatial_strength
? 1 : 0);
749 ret
= cx18_vapi(cx
, CX18_CPU_SET_FILTER_PARAM
, 4,
750 s
->handle
, 1, mode
, cx
->spatial_strength
);
751 mode
= (cx
->filter_mode
& 2) ? 2 : (cx
->temporal_strength
? 1 : 0);
752 ret
= ret
? ret
: cx18_vapi(cx
, CX18_CPU_SET_FILTER_PARAM
, 4,
753 s
->handle
, 0, mode
, cx
->temporal_strength
);
754 ret
= ret
? ret
: cx18_vapi(cx
, CX18_CPU_SET_FILTER_PARAM
, 4,
755 s
->handle
, 2, cx
->filter_mode
>> 2, 0);
759 int cx18_api_func(void *priv
, u32 cmd
, int in
, int out
,
760 u32 data
[CX2341X_MBOX_MAX_DATA
])
762 struct cx18_stream
*s
= priv
;
763 struct cx18
*cx
= s
->cx
;
766 case CX2341X_ENC_SET_OUTPUT_PORT
:
768 case CX2341X_ENC_SET_FRAME_RATE
:
769 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_IN
, 6,
770 s
->handle
, 0, 0, 0, 0, data
[0]);
771 case CX2341X_ENC_SET_FRAME_SIZE
:
772 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_RESOLUTION
, 3,
773 s
->handle
, data
[1], data
[0]);
774 case CX2341X_ENC_SET_STREAM_TYPE
:
775 return cx18_vapi(cx
, CX18_CPU_SET_STREAM_OUTPUT_TYPE
, 2,
777 case CX2341X_ENC_SET_ASPECT_RATIO
:
778 return cx18_vapi(cx
, CX18_CPU_SET_ASPECT_RATIO
, 2,
781 case CX2341X_ENC_SET_GOP_PROPERTIES
:
782 return cx18_vapi(cx
, CX18_CPU_SET_GOP_STRUCTURE
, 3,
783 s
->handle
, data
[0], data
[1]);
784 case CX2341X_ENC_SET_GOP_CLOSURE
:
786 case CX2341X_ENC_SET_AUDIO_PROPERTIES
:
787 return cx18_vapi(cx
, CX18_CPU_SET_AUDIO_PARAMETERS
, 2,
789 case CX2341X_ENC_MUTE_AUDIO
:
790 return cx18_vapi(cx
, CX18_CPU_SET_AUDIO_MUTE
, 2,
792 case CX2341X_ENC_SET_BIT_RATE
:
793 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_RATE
, 5,
794 s
->handle
, data
[0], data
[1], data
[2], data
[3]);
795 case CX2341X_ENC_MUTE_VIDEO
:
796 return cx18_vapi(cx
, CX18_CPU_SET_VIDEO_MUTE
, 2,
798 case CX2341X_ENC_SET_FRAME_DROP_RATE
:
799 return cx18_vapi(cx
, CX18_CPU_SET_SKIP_INPUT_FRAME
, 2,
801 case CX2341X_ENC_MISC
:
802 return cx18_vapi(cx
, CX18_CPU_SET_MISC_PARAMETERS
, 4,
803 s
->handle
, data
[0], data
[1], data
[2]);
804 case CX2341X_ENC_SET_DNR_FILTER_MODE
:
805 cx
->filter_mode
= (data
[0] & 3) | (data
[1] << 2);
806 return cx18_set_filter_param(s
);
807 case CX2341X_ENC_SET_DNR_FILTER_PROPS
:
808 cx
->spatial_strength
= data
[0];
809 cx
->temporal_strength
= data
[1];
810 return cx18_set_filter_param(s
);
811 case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE
:
812 return cx18_vapi(cx
, CX18_CPU_SET_SPATIAL_FILTER_TYPE
, 3,
813 s
->handle
, data
[0], data
[1]);
814 case CX2341X_ENC_SET_CORING_LEVELS
:
815 return cx18_vapi(cx
, CX18_CPU_SET_MEDIAN_CORING
, 5,
816 s
->handle
, data
[0], data
[1], data
[2], data
[3]);
818 CX18_WARN("Unknown cmd %x\n", cmd
);
822 int cx18_vapi_result(struct cx18
*cx
, u32 data
[MAX_MB_ARGUMENTS
],
823 u32 cmd
, int args
, ...)
829 for (i
= 0; i
< args
; i
++)
830 data
[i
] = va_arg(ap
, u32
);
832 return cx18_api(cx
, cmd
, args
, data
);
835 int cx18_vapi(struct cx18
*cx
, u32 cmd
, int args
, ...)
837 u32 data
[MAX_MB_ARGUMENTS
];
842 CX18_ERR("cx == NULL (cmd=%x)\n", cmd
);
845 if (args
> MAX_MB_ARGUMENTS
) {
846 CX18_ERR("args too big (cmd=%x)\n", cmd
);
847 args
= MAX_MB_ARGUMENTS
;
850 for (i
= 0; i
< args
; i
++)
851 data
[i
] = va_arg(ap
, u32
);
853 return cx18_api(cx
, cmd
, args
, data
);