2 * Samsung S5P Multi Format Codec v 5.1
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Kamil Debski, <k.debski@samsung.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/version.h>
22 #include <linux/videodev2.h>
23 #include <linux/workqueue.h>
24 #include <media/videobuf2-core.h>
26 #include "s5p_mfc_ctrl.h"
27 #include "s5p_mfc_debug.h"
28 #include "s5p_mfc_dec.h"
29 #include "s5p_mfc_enc.h"
30 #include "s5p_mfc_intr.h"
31 #include "s5p_mfc_opr.h"
32 #include "s5p_mfc_pm.h"
33 #include "s5p_mfc_shm.h"
35 #define S5P_MFC_NAME "s5p-mfc"
36 #define S5P_MFC_DEC_NAME "s5p-mfc-dec"
37 #define S5P_MFC_ENC_NAME "s5p-mfc-enc"
40 module_param(debug
, int, S_IRUGO
| S_IWUSR
);
41 MODULE_PARM_DESC(debug
, "Debug level - higher value produces more verbose messages");
43 /* Helper functions for interrupt processing */
44 /* Remove from hw execution round robin */
45 static void clear_work_bit(struct s5p_mfc_ctx
*ctx
)
47 struct s5p_mfc_dev
*dev
= ctx
->dev
;
49 spin_lock(&dev
->condlock
);
50 clear_bit(ctx
->num
, &dev
->ctx_work_bits
);
51 spin_unlock(&dev
->condlock
);
54 /* Wake up context wait_queue */
55 static void wake_up_ctx(struct s5p_mfc_ctx
*ctx
, unsigned int reason
,
59 ctx
->int_type
= reason
;
64 /* Wake up device wait_queue */
65 static void wake_up_dev(struct s5p_mfc_dev
*dev
, unsigned int reason
,
69 dev
->int_type
= reason
;
74 void s5p_mfc_watchdog(unsigned long arg
)
76 struct s5p_mfc_dev
*dev
= (struct s5p_mfc_dev
*)arg
;
78 if (test_bit(0, &dev
->hw_lock
))
79 atomic_inc(&dev
->watchdog_cnt
);
80 if (atomic_read(&dev
->watchdog_cnt
) >= MFC_WATCHDOG_CNT
) {
81 /* This means that hw is busy and no interrupts were
82 * generated by hw for the Nth time of running this
83 * watchdog timer. This usually means a serious hw
84 * error. Now it is time to kill all instances and
86 mfc_err("Time out during waiting for HW\n");
87 queue_work(dev
->watchdog_workqueue
, &dev
->watchdog_work
);
89 dev
->watchdog_timer
.expires
= jiffies
+
90 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL
);
91 add_timer(&dev
->watchdog_timer
);
94 static void s5p_mfc_watchdog_worker(struct work_struct
*work
)
96 struct s5p_mfc_dev
*dev
;
97 struct s5p_mfc_ctx
*ctx
;
102 dev
= container_of(work
, struct s5p_mfc_dev
, watchdog_work
);
104 mfc_err("Driver timeout error handling\n");
105 /* Lock the mutex that protects open and release.
106 * This is necessary as they may load and unload firmware. */
107 mutex_locked
= mutex_trylock(&dev
->mfc_mutex
);
109 mfc_err("Error: some instance may be closing/opening\n");
110 spin_lock_irqsave(&dev
->irqlock
, flags
);
114 for (i
= 0; i
< MFC_NUM_CONTEXTS
; i
++) {
118 ctx
->state
= MFCINST_ERROR
;
119 s5p_mfc_cleanup_queue(&ctx
->dst_queue
, &ctx
->vq_dst
);
120 s5p_mfc_cleanup_queue(&ctx
->src_queue
, &ctx
->vq_src
);
122 wake_up_ctx(ctx
, S5P_FIMV_R2H_CMD_ERR_RET
, 0);
124 clear_bit(0, &dev
->hw_lock
);
125 spin_unlock_irqrestore(&dev
->irqlock
, flags
);
126 /* Double check if there is at least one instance running.
127 * If no instance is in memory than no firmware should be present */
128 if (dev
->num_inst
> 0) {
129 ret
= s5p_mfc_reload_firmware(dev
);
131 mfc_err("Failed to reload FW\n");
135 ret
= s5p_mfc_init_hw(dev
);
137 mfc_err("Failed to reinit FW\n");
141 mutex_unlock(&dev
->mfc_mutex
);
144 static enum s5p_mfc_node_type
s5p_mfc_get_node_type(struct file
*file
)
146 struct video_device
*vdev
= video_devdata(file
);
149 mfc_err("failed to get video_device");
150 return MFCNODE_INVALID
;
152 if (vdev
->index
== 0)
153 return MFCNODE_DECODER
;
154 else if (vdev
->index
== 1)
155 return MFCNODE_ENCODER
;
156 return MFCNODE_INVALID
;
159 static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev
*dev
)
161 mfc_write(dev
, 0, S5P_FIMV_RISC_HOST_INT
);
162 mfc_write(dev
, 0, S5P_FIMV_RISC2HOST_CMD
);
163 mfc_write(dev
, 0xffff, S5P_FIMV_SI_RTN_CHID
);
166 static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx
*ctx
)
168 struct s5p_mfc_buf
*dst_buf
;
170 ctx
->state
= MFCINST_FINISHED
;
172 while (!list_empty(&ctx
->dst_queue
)) {
173 dst_buf
= list_entry(ctx
->dst_queue
.next
,
174 struct s5p_mfc_buf
, list
);
175 mfc_debug(2, "Cleaning up buffer: %d\n",
176 dst_buf
->b
->v4l2_buf
.index
);
177 vb2_set_plane_payload(dst_buf
->b
, 0, 0);
178 vb2_set_plane_payload(dst_buf
->b
, 1, 0);
179 list_del(&dst_buf
->list
);
180 ctx
->dst_queue_cnt
--;
181 dst_buf
->b
->v4l2_buf
.sequence
= (ctx
->sequence
++);
183 if (s5p_mfc_read_shm(ctx
, PIC_TIME_TOP
) ==
184 s5p_mfc_read_shm(ctx
, PIC_TIME_BOT
))
185 dst_buf
->b
->v4l2_buf
.field
= V4L2_FIELD_NONE
;
187 dst_buf
->b
->v4l2_buf
.field
= V4L2_FIELD_INTERLACED
;
189 ctx
->dec_dst_flag
&= ~(1 << dst_buf
->b
->v4l2_buf
.index
);
190 vb2_buffer_done(dst_buf
->b
, VB2_BUF_STATE_DONE
);
194 static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx
*ctx
)
196 struct s5p_mfc_dev
*dev
= ctx
->dev
;
197 struct s5p_mfc_buf
*dst_buf
, *src_buf
;
198 size_t dec_y_addr
= s5p_mfc_get_dec_y_adr();
199 unsigned int frame_type
= s5p_mfc_get_frame_type();
201 /* Copy timestamp / timecode from decoded src to dst and set
203 src_buf
= list_entry(ctx
->src_queue
.next
, struct s5p_mfc_buf
, list
);
204 list_for_each_entry(dst_buf
, &ctx
->dst_queue
, list
) {
205 if (vb2_dma_contig_plane_paddr(dst_buf
->b
, 0) == dec_y_addr
) {
206 memcpy(&dst_buf
->b
->v4l2_buf
.timecode
,
207 &src_buf
->b
->v4l2_buf
.timecode
,
208 sizeof(struct v4l2_timecode
));
209 memcpy(&dst_buf
->b
->v4l2_buf
.timestamp
,
210 &src_buf
->b
->v4l2_buf
.timestamp
,
211 sizeof(struct timeval
));
212 switch (frame_type
) {
213 case S5P_FIMV_DECODE_FRAME_I_FRAME
:
214 dst_buf
->b
->v4l2_buf
.flags
|=
215 V4L2_BUF_FLAG_KEYFRAME
;
217 case S5P_FIMV_DECODE_FRAME_P_FRAME
:
218 dst_buf
->b
->v4l2_buf
.flags
|=
219 V4L2_BUF_FLAG_PFRAME
;
221 case S5P_FIMV_DECODE_FRAME_B_FRAME
:
222 dst_buf
->b
->v4l2_buf
.flags
|=
223 V4L2_BUF_FLAG_BFRAME
;
231 static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx
*ctx
, unsigned int err
)
233 struct s5p_mfc_dev
*dev
= ctx
->dev
;
234 struct s5p_mfc_buf
*dst_buf
;
235 size_t dspl_y_addr
= s5p_mfc_get_dspl_y_adr();
236 unsigned int frame_type
= s5p_mfc_get_frame_type();
239 /* If frame is same as previous then skip and do not dequeue */
240 if (frame_type
== S5P_FIMV_DECODE_FRAME_SKIPPED
) {
241 if (!ctx
->after_packed_pb
)
243 ctx
->after_packed_pb
= 0;
247 /* The MFC returns address of the buffer, now we have to
248 * check which videobuf does it correspond to */
249 list_for_each_entry(dst_buf
, &ctx
->dst_queue
, list
) {
250 /* Check if this is the buffer we're looking for */
251 if (vb2_dma_contig_plane_paddr(dst_buf
->b
, 0) == dspl_y_addr
) {
252 list_del(&dst_buf
->list
);
253 ctx
->dst_queue_cnt
--;
254 dst_buf
->b
->v4l2_buf
.sequence
= ctx
->sequence
;
255 if (s5p_mfc_read_shm(ctx
, PIC_TIME_TOP
) ==
256 s5p_mfc_read_shm(ctx
, PIC_TIME_BOT
))
257 dst_buf
->b
->v4l2_buf
.field
= V4L2_FIELD_NONE
;
259 dst_buf
->b
->v4l2_buf
.field
=
260 V4L2_FIELD_INTERLACED
;
261 vb2_set_plane_payload(dst_buf
->b
, 0, ctx
->luma_size
);
262 vb2_set_plane_payload(dst_buf
->b
, 1, ctx
->chroma_size
);
263 clear_bit(dst_buf
->b
->v4l2_buf
.index
,
266 vb2_buffer_done(dst_buf
->b
,
267 err
? VB2_BUF_STATE_ERROR
: VB2_BUF_STATE_DONE
);
269 index
= dst_buf
->b
->v4l2_buf
.index
;
275 /* Handle frame decoding interrupt */
276 static void s5p_mfc_handle_frame(struct s5p_mfc_ctx
*ctx
,
277 unsigned int reason
, unsigned int err
)
279 struct s5p_mfc_dev
*dev
= ctx
->dev
;
280 unsigned int dst_frame_status
;
281 struct s5p_mfc_buf
*src_buf
;
283 unsigned int res_change
;
287 dst_frame_status
= s5p_mfc_get_dspl_status()
288 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK
;
289 res_change
= s5p_mfc_get_dspl_status()
290 & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK
;
291 mfc_debug(2, "Frame Status: %x\n", dst_frame_status
);
292 if (ctx
->state
== MFCINST_RES_CHANGE_INIT
)
293 ctx
->state
= MFCINST_RES_CHANGE_FLUSH
;
295 ctx
->state
= MFCINST_RES_CHANGE_INIT
;
296 s5p_mfc_clear_int_flags(dev
);
297 wake_up_ctx(ctx
, reason
, err
);
298 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
301 s5p_mfc_try_run(dev
);
304 if (ctx
->dpb_flush_flag
)
305 ctx
->dpb_flush_flag
= 0;
307 spin_lock_irqsave(&dev
->irqlock
, flags
);
308 /* All frames remaining in the buffer have been extracted */
309 if (dst_frame_status
== S5P_FIMV_DEC_STATUS_DECODING_EMPTY
) {
310 if (ctx
->state
== MFCINST_RES_CHANGE_FLUSH
) {
311 s5p_mfc_handle_frame_all_extracted(ctx
);
312 ctx
->state
= MFCINST_RES_CHANGE_END
;
313 goto leave_handle_frame
;
315 s5p_mfc_handle_frame_all_extracted(ctx
);
319 if (dst_frame_status
== S5P_FIMV_DEC_STATUS_DECODING_DISPLAY
||
320 dst_frame_status
== S5P_FIMV_DEC_STATUS_DECODING_ONLY
)
321 s5p_mfc_handle_frame_copy_time(ctx
);
323 /* A frame has been decoded and is in the buffer */
324 if (dst_frame_status
== S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
||
325 dst_frame_status
== S5P_FIMV_DEC_STATUS_DECODING_DISPLAY
) {
326 s5p_mfc_handle_frame_new(ctx
, err
);
328 mfc_debug(2, "No frame decode\n");
330 /* Mark source buffer as complete */
331 if (dst_frame_status
!= S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
332 && !list_empty(&ctx
->src_queue
)) {
333 src_buf
= list_entry(ctx
->src_queue
.next
, struct s5p_mfc_buf
,
335 ctx
->consumed_stream
+= s5p_mfc_get_consumed_stream();
336 if (ctx
->codec_mode
!= S5P_FIMV_CODEC_H264_DEC
&&
337 s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
338 && ctx
->consumed_stream
+ STUFF_BYTE
<
339 src_buf
->b
->v4l2_planes
[0].bytesused
) {
340 /* Run MFC again on the same buffer */
341 mfc_debug(2, "Running again the same buffer\n");
342 ctx
->after_packed_pb
= 1;
344 index
= src_buf
->b
->v4l2_buf
.index
;
345 mfc_debug(2, "MFC needs next buffer\n");
346 ctx
->consumed_stream
= 0;
347 list_del(&src_buf
->list
);
348 ctx
->src_queue_cnt
--;
349 if (s5p_mfc_err_dec(err
) > 0)
350 vb2_buffer_done(src_buf
->b
, VB2_BUF_STATE_ERROR
);
352 vb2_buffer_done(src_buf
->b
, VB2_BUF_STATE_DONE
);
356 spin_unlock_irqrestore(&dev
->irqlock
, flags
);
357 if ((ctx
->src_queue_cnt
== 0 && ctx
->state
!= MFCINST_FINISHING
)
358 || ctx
->dst_queue_cnt
< ctx
->dpb_count
)
360 s5p_mfc_clear_int_flags(dev
);
361 wake_up_ctx(ctx
, reason
, err
);
362 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
365 s5p_mfc_try_run(dev
);
368 /* Error handling for interrupt */
369 static void s5p_mfc_handle_error(struct s5p_mfc_ctx
*ctx
,
370 unsigned int reason
, unsigned int err
)
372 struct s5p_mfc_dev
*dev
;
375 /* If no context is available then all necessary
376 * processing has been done. */
381 mfc_err("Interrupt Error: %08x\n", err
);
382 s5p_mfc_clear_int_flags(dev
);
383 wake_up_dev(dev
, reason
, err
);
385 /* Error recovery is dependent on the state of context */
386 switch (ctx
->state
) {
388 /* This error had to happen while acquireing instance */
389 case MFCINST_GOT_INST
:
390 /* This error had to happen while parsing the header */
391 case MFCINST_HEAD_PARSED
:
392 /* This error had to happen while setting dst buffers */
393 case MFCINST_RETURN_INST
:
394 /* This error had to happen while releasing instance */
396 wake_up_ctx(ctx
, reason
, err
);
397 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
400 ctx
->state
= MFCINST_ERROR
;
402 case MFCINST_FINISHING
:
403 case MFCINST_FINISHED
:
404 case MFCINST_RUNNING
:
405 /* It is higly probable that an error occured
406 * while decoding a frame */
408 ctx
->state
= MFCINST_ERROR
;
409 /* Mark all dst buffers as having an error */
410 spin_lock_irqsave(&dev
->irqlock
, flags
);
411 s5p_mfc_cleanup_queue(&ctx
->dst_queue
, &ctx
->vq_dst
);
412 /* Mark all src buffers as having an error */
413 s5p_mfc_cleanup_queue(&ctx
->src_queue
, &ctx
->vq_src
);
414 spin_unlock_irqrestore(&dev
->irqlock
, flags
);
415 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
420 mfc_err("Encountered an error interrupt which had not been handled\n");
426 /* Header parsing interrupt handling */
427 static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx
*ctx
,
428 unsigned int reason
, unsigned int err
)
430 struct s5p_mfc_dev
*dev
;
431 unsigned int guard_width
, guard_height
;
436 if (ctx
->c_ops
->post_seq_start
) {
437 if (ctx
->c_ops
->post_seq_start(ctx
))
438 mfc_err("post_seq_start() failed\n");
440 ctx
->img_width
= s5p_mfc_get_img_width();
441 ctx
->img_height
= s5p_mfc_get_img_height();
443 ctx
->buf_width
= ALIGN(ctx
->img_width
,
444 S5P_FIMV_NV12MT_HALIGN
);
445 ctx
->buf_height
= ALIGN(ctx
->img_height
,
446 S5P_FIMV_NV12MT_VALIGN
);
447 mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
448 "buffer dimensions: %dx%d\n", ctx
->img_width
,
449 ctx
->img_height
, ctx
->buf_width
,
451 if (ctx
->codec_mode
== S5P_FIMV_CODEC_H264_DEC
) {
452 ctx
->luma_size
= ALIGN(ctx
->buf_width
*
453 ctx
->buf_height
, S5P_FIMV_DEC_BUF_ALIGN
);
454 ctx
->chroma_size
= ALIGN(ctx
->buf_width
*
455 ALIGN((ctx
->img_height
>> 1),
456 S5P_FIMV_NV12MT_VALIGN
),
457 S5P_FIMV_DEC_BUF_ALIGN
);
458 ctx
->mv_size
= ALIGN(ctx
->buf_width
*
459 ALIGN((ctx
->buf_height
>> 2),
460 S5P_FIMV_NV12MT_VALIGN
),
461 S5P_FIMV_DEC_BUF_ALIGN
);
463 guard_width
= ALIGN(ctx
->img_width
+ 24,
464 S5P_FIMV_NV12MT_HALIGN
);
465 guard_height
= ALIGN(ctx
->img_height
+ 16,
466 S5P_FIMV_NV12MT_VALIGN
);
467 ctx
->luma_size
= ALIGN(guard_width
*
468 guard_height
, S5P_FIMV_DEC_BUF_ALIGN
);
469 guard_width
= ALIGN(ctx
->img_width
+ 16,
470 S5P_FIMV_NV12MT_HALIGN
);
471 guard_height
= ALIGN((ctx
->img_height
>> 1) + 4,
472 S5P_FIMV_NV12MT_VALIGN
);
473 ctx
->chroma_size
= ALIGN(guard_width
*
474 guard_height
, S5P_FIMV_DEC_BUF_ALIGN
);
477 ctx
->dpb_count
= s5p_mfc_get_dpb_count();
478 if (ctx
->img_width
== 0 || ctx
->img_width
== 0)
479 ctx
->state
= MFCINST_ERROR
;
481 ctx
->state
= MFCINST_HEAD_PARSED
;
483 s5p_mfc_clear_int_flags(dev
);
485 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
488 s5p_mfc_try_run(dev
);
489 wake_up_ctx(ctx
, reason
, err
);
492 /* Header parsing interrupt handling */
493 static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx
*ctx
,
494 unsigned int reason
, unsigned int err
)
496 struct s5p_mfc_buf
*src_buf
;
497 struct s5p_mfc_dev
*dev
;
503 s5p_mfc_clear_int_flags(dev
);
504 ctx
->int_type
= reason
;
507 spin_lock(&dev
->condlock
);
508 clear_bit(ctx
->num
, &dev
->ctx_work_bits
);
509 spin_unlock(&dev
->condlock
);
511 ctx
->state
= MFCINST_RUNNING
;
512 if (!ctx
->dpb_flush_flag
) {
513 spin_lock_irqsave(&dev
->irqlock
, flags
);
514 if (!list_empty(&ctx
->src_queue
)) {
515 src_buf
= list_entry(ctx
->src_queue
.next
,
516 struct s5p_mfc_buf
, list
);
517 list_del(&src_buf
->list
);
518 ctx
->src_queue_cnt
--;
519 vb2_buffer_done(src_buf
->b
,
522 spin_unlock_irqrestore(&dev
->irqlock
, flags
);
524 ctx
->dpb_flush_flag
= 0;
526 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
531 wake_up(&ctx
->queue
);
532 s5p_mfc_try_run(dev
);
534 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
539 wake_up(&ctx
->queue
);
543 /* Interrupt processing */
544 static irqreturn_t
s5p_mfc_irq(int irq
, void *priv
)
546 struct s5p_mfc_dev
*dev
= priv
;
547 struct s5p_mfc_ctx
*ctx
;
552 /* Reset the timeout watchdog */
553 atomic_set(&dev
->watchdog_cnt
, 0);
554 ctx
= dev
->ctx
[dev
->curr_ctx
];
555 /* Get the reason of interrupt and the error code */
556 reason
= s5p_mfc_get_int_reason();
557 err
= s5p_mfc_get_int_err();
558 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason
, err
);
560 case S5P_FIMV_R2H_CMD_ERR_RET
:
561 /* An error has occured */
562 if (ctx
->state
== MFCINST_RUNNING
&&
563 s5p_mfc_err_dec(err
) >= S5P_FIMV_ERR_WARNINGS_START
)
564 s5p_mfc_handle_frame(ctx
, reason
, err
);
566 s5p_mfc_handle_error(ctx
, reason
, err
);
567 clear_bit(0, &dev
->enter_suspend
);
570 case S5P_FIMV_R2H_CMD_SLICE_DONE_RET
:
571 case S5P_FIMV_R2H_CMD_FRAME_DONE_RET
:
572 if (ctx
->c_ops
->post_frame_start
) {
573 if (ctx
->c_ops
->post_frame_start(ctx
))
574 mfc_err("post_frame_start() failed\n");
575 s5p_mfc_clear_int_flags(dev
);
576 wake_up_ctx(ctx
, reason
, err
);
577 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
580 s5p_mfc_try_run(dev
);
582 s5p_mfc_handle_frame(ctx
, reason
, err
);
586 case S5P_FIMV_R2H_CMD_SEQ_DONE_RET
:
587 s5p_mfc_handle_seq_done(ctx
, reason
, err
);
590 case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET
:
591 ctx
->inst_no
= s5p_mfc_get_inst_no();
592 ctx
->state
= MFCINST_GOT_INST
;
594 wake_up(&ctx
->queue
);
597 case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET
:
599 ctx
->state
= MFCINST_FREE
;
600 wake_up(&ctx
->queue
);
603 case S5P_FIMV_R2H_CMD_SYS_INIT_RET
:
604 case S5P_FIMV_R2H_CMD_FW_STATUS_RET
:
605 case S5P_FIMV_R2H_CMD_SLEEP_RET
:
606 case S5P_FIMV_R2H_CMD_WAKEUP_RET
:
609 s5p_mfc_clear_int_flags(dev
);
610 wake_up_dev(dev
, reason
, err
);
611 clear_bit(0, &dev
->hw_lock
);
612 clear_bit(0, &dev
->enter_suspend
);
615 case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET
:
616 s5p_mfc_handle_init_buffers(ctx
, reason
, err
);
619 mfc_debug(2, "Unknown int reason\n");
620 s5p_mfc_clear_int_flags(dev
);
625 s5p_mfc_clear_int_flags(dev
);
626 ctx
->int_type
= reason
;
629 if (test_and_clear_bit(0, &dev
->hw_lock
) == 0)
630 mfc_err("Failed to unlock hw\n");
634 s5p_mfc_try_run(dev
);
635 mfc_debug(2, "Exit via irq_cleanup_hw\n");
639 /* Open an MFC node */
640 static int s5p_mfc_open(struct file
*file
)
642 struct s5p_mfc_dev
*dev
= video_drvdata(file
);
643 struct s5p_mfc_ctx
*ctx
= NULL
;
649 dev
->num_inst
++; /* It is guarded by mfc_mutex in vfd */
650 /* Allocate memory for context */
651 ctx
= kzalloc(sizeof *ctx
, GFP_KERNEL
);
653 mfc_err("Not enough memory\n");
657 v4l2_fh_init(&ctx
->fh
, video_devdata(file
));
658 file
->private_data
= &ctx
->fh
;
659 v4l2_fh_add(&ctx
->fh
);
661 INIT_LIST_HEAD(&ctx
->src_queue
);
662 INIT_LIST_HEAD(&ctx
->dst_queue
);
663 ctx
->src_queue_cnt
= 0;
664 ctx
->dst_queue_cnt
= 0;
665 /* Get context number */
667 while (dev
->ctx
[ctx
->num
]) {
669 if (ctx
->num
>= MFC_NUM_CONTEXTS
) {
670 mfc_err("Too many open contexts\n");
675 /* Mark context as idle */
676 spin_lock_irqsave(&dev
->condlock
, flags
);
677 clear_bit(ctx
->num
, &dev
->ctx_work_bits
);
678 spin_unlock_irqrestore(&dev
->condlock
, flags
);
679 dev
->ctx
[ctx
->num
] = ctx
;
680 if (s5p_mfc_get_node_type(file
) == MFCNODE_DECODER
) {
681 ctx
->type
= MFCINST_DECODER
;
682 ctx
->c_ops
= get_dec_codec_ops();
683 /* Setup ctrl handler */
684 ret
= s5p_mfc_dec_ctrls_setup(ctx
);
686 mfc_err("Failed to setup mfc controls\n");
687 goto err_ctrls_setup
;
689 } else if (s5p_mfc_get_node_type(file
) == MFCNODE_ENCODER
) {
690 ctx
->type
= MFCINST_ENCODER
;
691 ctx
->c_ops
= get_enc_codec_ops();
692 /* only for encoder */
693 INIT_LIST_HEAD(&ctx
->ref_queue
);
694 ctx
->ref_queue_cnt
= 0;
695 /* Setup ctrl handler */
696 ret
= s5p_mfc_enc_ctrls_setup(ctx
);
698 mfc_err("Failed to setup mfc controls\n");
699 goto err_ctrls_setup
;
705 ctx
->fh
.ctrl_handler
= &ctx
->ctrl_handler
;
707 /* Load firmware if this is the first instance */
708 if (dev
->num_inst
== 1) {
709 dev
->watchdog_timer
.expires
= jiffies
+
710 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL
);
711 add_timer(&dev
->watchdog_timer
);
712 ret
= s5p_mfc_power_on();
714 mfc_err("power on failed\n");
718 ret
= s5p_mfc_alloc_and_load_firmware(dev
);
722 ret
= s5p_mfc_init_hw(dev
);
727 /* Init videobuf2 queue for CAPTURE */
729 q
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
730 q
->drv_priv
= &ctx
->fh
;
731 if (s5p_mfc_get_node_type(file
) == MFCNODE_DECODER
) {
732 q
->io_modes
= VB2_MMAP
;
733 q
->ops
= get_dec_queue_ops();
734 } else if (s5p_mfc_get_node_type(file
) == MFCNODE_ENCODER
) {
735 q
->io_modes
= VB2_MMAP
| VB2_USERPTR
;
736 q
->ops
= get_enc_queue_ops();
741 q
->mem_ops
= (struct vb2_mem_ops
*)&vb2_dma_contig_memops
;
742 ret
= vb2_queue_init(q
);
744 mfc_err("Failed to initialize videobuf2 queue(capture)\n");
747 /* Init videobuf2 queue for OUTPUT */
749 q
->type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
750 q
->io_modes
= VB2_MMAP
;
751 q
->drv_priv
= &ctx
->fh
;
752 if (s5p_mfc_get_node_type(file
) == MFCNODE_DECODER
) {
753 q
->io_modes
= VB2_MMAP
;
754 q
->ops
= get_dec_queue_ops();
755 } else if (s5p_mfc_get_node_type(file
) == MFCNODE_ENCODER
) {
756 q
->io_modes
= VB2_MMAP
| VB2_USERPTR
;
757 q
->ops
= get_enc_queue_ops();
762 q
->mem_ops
= (struct vb2_mem_ops
*)&vb2_dma_contig_memops
;
763 ret
= vb2_queue_init(q
);
765 mfc_err("Failed to initialize videobuf2 queue(output)\n");
768 init_waitqueue_head(&ctx
->queue
);
771 /* Deinit when failure occured */
774 s5p_mfc_release_firmware(dev
);
776 dev
->ctx
[ctx
->num
] = 0;
777 del_timer_sync(&dev
->watchdog_timer
);
780 if (dev
->num_inst
== 1) {
781 if (s5p_mfc_power_off() < 0)
782 mfc_err("power off failed\n");
783 s5p_mfc_release_firmware(dev
);
786 s5p_mfc_dec_ctrls_delete(ctx
);
789 v4l2_fh_del(&ctx
->fh
);
790 v4l2_fh_exit(&ctx
->fh
);
798 /* Release MFC context */
799 static int s5p_mfc_release(struct file
*file
)
801 struct s5p_mfc_ctx
*ctx
= fh_to_ctx(file
->private_data
);
802 struct s5p_mfc_dev
*dev
= ctx
->dev
;
807 vb2_queue_release(&ctx
->vq_src
);
808 vb2_queue_release(&ctx
->vq_dst
);
809 /* Mark context as idle */
810 spin_lock_irqsave(&dev
->condlock
, flags
);
811 clear_bit(ctx
->num
, &dev
->ctx_work_bits
);
812 spin_unlock_irqrestore(&dev
->condlock
, flags
);
813 /* If instance was initialised then
814 * return instance and free reosurces */
815 if (ctx
->inst_no
!= MFC_NO_INSTANCE_SET
) {
816 mfc_debug(2, "Has to free instance\n");
817 ctx
->state
= MFCINST_RETURN_INST
;
818 spin_lock_irqsave(&dev
->condlock
, flags
);
819 set_bit(ctx
->num
, &dev
->ctx_work_bits
);
820 spin_unlock_irqrestore(&dev
->condlock
, flags
);
821 s5p_mfc_clean_ctx_int_flags(ctx
);
822 s5p_mfc_try_run(dev
);
823 /* Wait until instance is returned or timeout occured */
824 if (s5p_mfc_wait_for_done_ctx
825 (ctx
, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET
, 0)) {
827 mfc_err("Err returning instance\n");
829 mfc_debug(2, "After free instance\n");
831 s5p_mfc_release_codec_buffers(ctx
);
832 s5p_mfc_release_instance_buffer(ctx
);
833 if (ctx
->type
== MFCINST_DECODER
)
834 s5p_mfc_release_dec_desc_buffer(ctx
);
836 ctx
->inst_no
= MFC_NO_INSTANCE_SET
;
838 /* hardware locking scheme */
839 if (dev
->curr_ctx
== ctx
->num
)
840 clear_bit(0, &dev
->hw_lock
);
842 if (dev
->num_inst
== 0) {
843 mfc_debug(2, "Last instance - release firmware\n");
844 /* reset <-> F/W release */
846 s5p_mfc_release_firmware(dev
);
847 del_timer_sync(&dev
->watchdog_timer
);
848 if (s5p_mfc_power_off() < 0)
849 mfc_err("Power off failed\n");
851 mfc_debug(2, "Shutting down clock\n");
853 dev
->ctx
[ctx
->num
] = 0;
854 s5p_mfc_dec_ctrls_delete(ctx
);
855 v4l2_fh_del(&ctx
->fh
);
856 v4l2_fh_exit(&ctx
->fh
);
863 static unsigned int s5p_mfc_poll(struct file
*file
,
864 struct poll_table_struct
*wait
)
866 struct s5p_mfc_ctx
*ctx
= fh_to_ctx(file
->private_data
);
867 struct s5p_mfc_dev
*dev
= ctx
->dev
;
868 struct vb2_queue
*src_q
, *dst_q
;
869 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
873 src_q
= &ctx
->vq_src
;
874 dst_q
= &ctx
->vq_dst
;
876 * There has to be at least one buffer queued on each queued_list, which
877 * means either in driver already or waiting for driver to claim it
878 * and start processing.
880 if ((!src_q
->streaming
|| list_empty(&src_q
->queued_list
))
881 && (!dst_q
->streaming
|| list_empty(&dst_q
->queued_list
))) {
885 mutex_unlock(&dev
->mfc_mutex
);
886 poll_wait(file
, &src_q
->done_wq
, wait
);
887 poll_wait(file
, &dst_q
->done_wq
, wait
);
888 mutex_lock(&dev
->mfc_mutex
);
889 spin_lock_irqsave(&src_q
->done_lock
, flags
);
890 if (!list_empty(&src_q
->done_list
))
891 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
893 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
894 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
895 rc
|= POLLOUT
| POLLWRNORM
;
896 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
897 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
898 if (!list_empty(&dst_q
->done_list
))
899 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
901 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
902 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
903 rc
|= POLLIN
| POLLRDNORM
;
904 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
910 static int s5p_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
912 struct s5p_mfc_ctx
*ctx
= fh_to_ctx(file
->private_data
);
913 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
915 if (offset
< DST_QUEUE_OFF_BASE
) {
916 mfc_debug(2, "mmaping source\n");
917 ret
= vb2_mmap(&ctx
->vq_src
, vma
);
918 } else { /* capture */
919 mfc_debug(2, "mmaping destination\n");
920 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
921 ret
= vb2_mmap(&ctx
->vq_dst
, vma
);
927 static const struct v4l2_file_operations s5p_mfc_fops
= {
928 .owner
= THIS_MODULE
,
929 .open
= s5p_mfc_open
,
930 .release
= s5p_mfc_release
,
931 .poll
= s5p_mfc_poll
,
932 .unlocked_ioctl
= video_ioctl2
,
933 .mmap
= s5p_mfc_mmap
,
936 static int match_child(struct device
*dev
, void *data
)
940 return !strcmp(dev_name(dev
), (char *)data
);
944 /* MFC probe function */
945 static int __devinit
s5p_mfc_probe(struct platform_device
*pdev
)
947 struct s5p_mfc_dev
*dev
;
948 struct video_device
*vfd
;
949 struct resource
*res
;
952 pr_debug("%s++\n", __func__
);
953 dev
= kzalloc(sizeof *dev
, GFP_KERNEL
);
955 dev_err(&pdev
->dev
, "Not enough memory for MFC device\n");
959 spin_lock_init(&dev
->irqlock
);
960 spin_lock_init(&dev
->condlock
);
961 dev
->plat_dev
= pdev
;
962 if (!dev
->plat_dev
) {
963 dev_err(&pdev
->dev
, "No platform data specified\n");
968 ret
= s5p_mfc_init_pm(dev
);
970 dev_err(&pdev
->dev
, "failed to get mfc clock source\n");
974 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
976 dev_err(&pdev
->dev
, "failed to get memory region resource\n");
981 dev
->mfc_mem
= request_mem_region(res
->start
, resource_size(res
),
983 if (dev
->mfc_mem
== NULL
) {
984 dev_err(&pdev
->dev
, "failed to get memory region\n");
988 dev
->regs_base
= ioremap(dev
->mfc_mem
->start
, resource_size(dev
->mfc_mem
));
989 if (dev
->regs_base
== NULL
) {
990 dev_err(&pdev
->dev
, "failed to ioremap address region\n");
995 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
997 dev_err(&pdev
->dev
, "failed to get irq resource\n");
1001 dev
->irq
= res
->start
;
1002 ret
= request_irq(dev
->irq
, s5p_mfc_irq
, IRQF_DISABLED
, pdev
->name
,
1005 dev_err(&pdev
->dev
, "Failed to install irq (%d)\n", ret
);
1009 dev
->mem_dev_l
= device_find_child(&dev
->plat_dev
->dev
, "s5p-mfc-l",
1011 if (!dev
->mem_dev_l
) {
1012 mfc_err("Mem child (L) device get failed\n");
1014 goto err_find_child
;
1016 dev
->mem_dev_r
= device_find_child(&dev
->plat_dev
->dev
, "s5p-mfc-r",
1018 if (!dev
->mem_dev_r
) {
1019 mfc_err("Mem child (R) device get failed\n");
1021 goto err_find_child
;
1024 dev
->alloc_ctx
[0] = vb2_dma_contig_init_ctx(dev
->mem_dev_l
);
1025 if (IS_ERR_OR_NULL(dev
->alloc_ctx
[0])) {
1026 ret
= PTR_ERR(dev
->alloc_ctx
[0]);
1027 goto err_mem_init_ctx_0
;
1029 dev
->alloc_ctx
[1] = vb2_dma_contig_init_ctx(dev
->mem_dev_r
);
1030 if (IS_ERR_OR_NULL(dev
->alloc_ctx
[1])) {
1031 ret
= PTR_ERR(dev
->alloc_ctx
[1]);
1032 goto err_mem_init_ctx_1
;
1035 mutex_init(&dev
->mfc_mutex
);
1037 ret
= v4l2_device_register(&pdev
->dev
, &dev
->v4l2_dev
);
1039 goto err_v4l2_dev_reg
;
1040 init_waitqueue_head(&dev
->queue
);
1043 vfd
= video_device_alloc();
1045 v4l2_err(&dev
->v4l2_dev
, "Failed to allocate video device\n");
1049 vfd
->fops
= &s5p_mfc_fops
,
1050 vfd
->ioctl_ops
= get_dec_v4l2_ioctl_ops();
1051 vfd
->release
= video_device_release
,
1052 vfd
->lock
= &dev
->mfc_mutex
;
1053 vfd
->v4l2_dev
= &dev
->v4l2_dev
;
1054 snprintf(vfd
->name
, sizeof(vfd
->name
), "%s", S5P_MFC_DEC_NAME
);
1056 ret
= video_register_device(vfd
, VFL_TYPE_GRABBER
, 0);
1058 v4l2_err(&dev
->v4l2_dev
, "Failed to register video device\n");
1059 video_device_release(vfd
);
1062 v4l2_info(&dev
->v4l2_dev
,
1063 "decoder registered as /dev/video%d\n", vfd
->num
);
1064 video_set_drvdata(vfd
, dev
);
1067 vfd
= video_device_alloc();
1069 v4l2_err(&dev
->v4l2_dev
, "Failed to allocate video device\n");
1073 vfd
->fops
= &s5p_mfc_fops
,
1074 vfd
->ioctl_ops
= get_enc_v4l2_ioctl_ops();
1075 vfd
->release
= video_device_release
,
1076 vfd
->lock
= &dev
->mfc_mutex
;
1077 vfd
->v4l2_dev
= &dev
->v4l2_dev
;
1078 snprintf(vfd
->name
, sizeof(vfd
->name
), "%s", S5P_MFC_ENC_NAME
);
1080 ret
= video_register_device(vfd
, VFL_TYPE_GRABBER
, 0);
1082 v4l2_err(&dev
->v4l2_dev
, "Failed to register video device\n");
1083 video_device_release(vfd
);
1086 v4l2_info(&dev
->v4l2_dev
,
1087 "encoder registered as /dev/video%d\n", vfd
->num
);
1088 video_set_drvdata(vfd
, dev
);
1089 platform_set_drvdata(pdev
, dev
);
1092 dev
->watchdog_workqueue
= create_singlethread_workqueue(S5P_MFC_NAME
);
1093 INIT_WORK(&dev
->watchdog_work
, s5p_mfc_watchdog_worker
);
1094 atomic_set(&dev
->watchdog_cnt
, 0);
1095 init_timer(&dev
->watchdog_timer
);
1096 dev
->watchdog_timer
.data
= (unsigned long)dev
;
1097 dev
->watchdog_timer
.function
= s5p_mfc_watchdog
;
1099 pr_debug("%s--\n", __func__
);
1102 /* Deinit MFC if probe had failed */
1104 video_device_release(dev
->vfd_enc
);
1106 video_unregister_device(dev
->vfd_dec
);
1108 video_device_release(dev
->vfd_dec
);
1110 v4l2_device_unregister(&dev
->v4l2_dev
);
1112 vb2_dma_contig_cleanup_ctx(dev
->alloc_ctx
[1]);
1114 vb2_dma_contig_cleanup_ctx(dev
->alloc_ctx
[0]);
1117 free_irq(dev
->irq
, dev
);
1120 iounmap(dev
->regs_base
);
1121 dev
->regs_base
= NULL
;
1123 release_resource(dev
->mfc_mem
);
1124 kfree(dev
->mfc_mem
);
1127 s5p_mfc_final_pm(dev
);
1131 pr_debug("%s-- with error\n", __func__
);
1136 /* Remove the driver */
1137 static int __devexit
s5p_mfc_remove(struct platform_device
*pdev
)
1139 struct s5p_mfc_dev
*dev
= platform_get_drvdata(pdev
);
1141 v4l2_info(&dev
->v4l2_dev
, "Removing %s\n", pdev
->name
);
1143 del_timer_sync(&dev
->watchdog_timer
);
1144 flush_workqueue(dev
->watchdog_workqueue
);
1145 destroy_workqueue(dev
->watchdog_workqueue
);
1147 video_unregister_device(dev
->vfd_enc
);
1148 video_unregister_device(dev
->vfd_dec
);
1149 v4l2_device_unregister(&dev
->v4l2_dev
);
1150 vb2_dma_contig_cleanup_ctx(dev
->alloc_ctx
[0]);
1151 vb2_dma_contig_cleanup_ctx(dev
->alloc_ctx
[1]);
1153 free_irq(dev
->irq
, dev
);
1154 iounmap(dev
->regs_base
);
1156 release_resource(dev
->mfc_mem
);
1157 kfree(dev
->mfc_mem
);
1158 dev
->mfc_mem
= NULL
;
1160 s5p_mfc_final_pm(dev
);
1165 #ifdef CONFIG_PM_SLEEP
1167 static int s5p_mfc_suspend(struct device
*dev
)
1169 struct platform_device
*pdev
= to_platform_device(dev
);
1170 struct s5p_mfc_dev
*m_dev
= platform_get_drvdata(pdev
);
1173 if (m_dev
->num_inst
== 0)
1175 return s5p_mfc_sleep(m_dev
);
1176 if (test_and_set_bit(0, &m_dev
->enter_suspend
) != 0) {
1177 mfc_err("Error: going to suspend for a second time\n");
1181 /* Check if we're processing then wait if it necessary. */
1182 while (test_and_set_bit(0, &m_dev
->hw_lock
) != 0) {
1183 /* Try and lock the HW */
1184 /* Wait on the interrupt waitqueue */
1185 ret
= wait_event_interruptible_timeout(m_dev
->queue
,
1186 m_dev
->int_cond
|| m_dev
->ctx
[m_dev
->curr_ctx
]->int_cond
,
1187 msecs_to_jiffies(MFC_INT_TIMEOUT
));
1190 mfc_err("Waiting for hardware to finish timed out\n");
1197 static int s5p_mfc_resume(struct device
*dev
)
1199 struct platform_device
*pdev
= to_platform_device(dev
);
1200 struct s5p_mfc_dev
*m_dev
= platform_get_drvdata(pdev
);
1202 if (m_dev
->num_inst
== 0)
1204 return s5p_mfc_wakeup(m_dev
);
1208 #ifdef CONFIG_PM_RUNTIME
1209 static int s5p_mfc_runtime_suspend(struct device
*dev
)
1211 struct platform_device
*pdev
= to_platform_device(dev
);
1212 struct s5p_mfc_dev
*m_dev
= platform_get_drvdata(pdev
);
1214 atomic_set(&m_dev
->pm
.power
, 0);
1218 static int s5p_mfc_runtime_resume(struct device
*dev
)
1220 struct platform_device
*pdev
= to_platform_device(dev
);
1221 struct s5p_mfc_dev
*m_dev
= platform_get_drvdata(pdev
);
1224 if (!m_dev
->alloc_ctx
)
1226 pre_power
= atomic_read(&m_dev
->pm
.power
);
1227 atomic_set(&m_dev
->pm
.power
, 1);
1232 /* Power management */
1233 static const struct dev_pm_ops s5p_mfc_pm_ops
= {
1234 SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend
, s5p_mfc_resume
)
1235 SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend
, s5p_mfc_runtime_resume
,
1239 static struct platform_driver s5p_mfc_pdrv
= {
1240 .probe
= s5p_mfc_probe
,
1241 .remove
= __devexit_p(s5p_mfc_remove
),
1243 .name
= S5P_MFC_NAME
,
1244 .owner
= THIS_MODULE
,
1245 .pm
= &s5p_mfc_pm_ops
1249 static char banner
[] __initdata
=
1250 "S5P MFC V4L2 Driver, (C) 2011 Samsung Electronics\n";
1252 static int __init
s5p_mfc_init(void)
1256 pr_info("%s", banner
);
1257 ret
= platform_driver_register(&s5p_mfc_pdrv
);
1259 pr_err("Platform device registration failed.\n");
1263 static void __devexit
s5p_mfc_exit(void)
1265 platform_driver_unregister(&s5p_mfc_pdrv
);
1268 module_init(s5p_mfc_init
);
1269 module_exit(s5p_mfc_exit
);
1271 MODULE_LICENSE("GPL");
1272 MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
1273 MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");