treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / media / platform / coda / coda-bit.c
blob3443396ba5f3c09b4edaae5ae2aacd32b25a5601
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Coda multi-standard codec IP - BIT processor functions
5 * Copyright (C) 2012 Vista Silicon S.L.
6 * Javier Martin, <javier.martin@vista-silicon.com>
7 * Xavier Duret
8 * Copyright (C) 2012-2014 Philipp Zabel, Pengutronix
9 */
11 #include <linux/clk.h>
12 #include <linux/irqreturn.h>
13 #include <linux/kernel.h>
14 #include <linux/log2.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 #include <linux/slab.h>
18 #include <linux/videodev2.h>
20 #include <media/v4l2-common.h>
21 #include <media/v4l2-ctrls.h>
22 #include <media/v4l2-fh.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-v4l2.h>
25 #include <media/videobuf2-dma-contig.h>
26 #include <media/videobuf2-vmalloc.h>
28 #include "coda.h"
29 #include "imx-vdoa.h"
30 #define CREATE_TRACE_POINTS
31 #include "trace.h"
33 #define CODA_PARA_BUF_SIZE (10 * 1024)
34 #define CODA7_PS_BUF_SIZE 0x28000
35 #define CODA9_PS_SAVE_SIZE (512 * 1024)
37 #define CODA_DEFAULT_GAMMA 4096
38 #define CODA9_DEFAULT_GAMMA 24576 /* 0.75 * 32768 */
40 static void coda_free_bitstream_buffer(struct coda_ctx *ctx);
42 static inline int coda_is_initialized(struct coda_dev *dev)
44 return coda_read(dev, CODA_REG_BIT_CUR_PC) != 0;
47 static inline unsigned long coda_isbusy(struct coda_dev *dev)
49 return coda_read(dev, CODA_REG_BIT_BUSY);
52 static int coda_wait_timeout(struct coda_dev *dev)
54 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
56 while (coda_isbusy(dev)) {
57 if (time_after(jiffies, timeout))
58 return -ETIMEDOUT;
60 return 0;
63 static void coda_command_async(struct coda_ctx *ctx, int cmd)
65 struct coda_dev *dev = ctx->dev;
67 if (dev->devtype->product == CODA_HX4 ||
68 dev->devtype->product == CODA_7541 ||
69 dev->devtype->product == CODA_960) {
70 /* Restore context related registers to CODA */
71 coda_write(dev, ctx->bit_stream_param,
72 CODA_REG_BIT_BIT_STREAM_PARAM);
73 coda_write(dev, ctx->frm_dis_flg,
74 CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
75 coda_write(dev, ctx->frame_mem_ctrl,
76 CODA_REG_BIT_FRAME_MEM_CTRL);
77 coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
80 if (dev->devtype->product == CODA_960) {
81 coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
82 coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
85 coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
87 coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
88 coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
89 coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
91 trace_coda_bit_run(ctx, cmd);
93 coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
96 static int coda_command_sync(struct coda_ctx *ctx, int cmd)
98 struct coda_dev *dev = ctx->dev;
99 int ret;
101 lockdep_assert_held(&dev->coda_mutex);
103 coda_command_async(ctx, cmd);
104 ret = coda_wait_timeout(dev);
105 trace_coda_bit_done(ctx);
107 return ret;
110 int coda_hw_reset(struct coda_ctx *ctx)
112 struct coda_dev *dev = ctx->dev;
113 unsigned long timeout;
114 unsigned int idx;
115 int ret;
117 lockdep_assert_held(&dev->coda_mutex);
119 if (!dev->rstc)
120 return -ENOENT;
122 idx = coda_read(dev, CODA_REG_BIT_RUN_INDEX);
124 if (dev->devtype->product == CODA_960) {
125 timeout = jiffies + msecs_to_jiffies(100);
126 coda_write(dev, 0x11, CODA9_GDI_BUS_CTRL);
127 while (coda_read(dev, CODA9_GDI_BUS_STATUS) != 0x77) {
128 if (time_after(jiffies, timeout))
129 return -ETIME;
130 cpu_relax();
134 ret = reset_control_reset(dev->rstc);
135 if (ret < 0)
136 return ret;
138 if (dev->devtype->product == CODA_960)
139 coda_write(dev, 0x00, CODA9_GDI_BUS_CTRL);
140 coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
141 coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
142 ret = coda_wait_timeout(dev);
143 coda_write(dev, idx, CODA_REG_BIT_RUN_INDEX);
145 return ret;
148 static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
150 struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
151 struct coda_dev *dev = ctx->dev;
152 u32 rd_ptr;
154 rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
155 kfifo->out = (kfifo->in & ~kfifo->mask) |
156 (rd_ptr - ctx->bitstream.paddr);
157 if (kfifo->out > kfifo->in)
158 kfifo->out -= kfifo->mask + 1;
161 static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
163 struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
164 struct coda_dev *dev = ctx->dev;
165 u32 rd_ptr, wr_ptr;
167 rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
168 coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
169 wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
170 coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
173 static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
175 struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
176 struct coda_dev *dev = ctx->dev;
177 u32 wr_ptr;
179 wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
180 coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
183 static int coda_h264_bitstream_pad(struct coda_ctx *ctx, u32 size)
185 unsigned char *buf;
186 u32 n;
188 if (size < 6)
189 size = 6;
191 buf = kmalloc(size, GFP_KERNEL);
192 if (!buf)
193 return -ENOMEM;
195 coda_h264_filler_nal(size, buf);
196 n = kfifo_in(&ctx->bitstream_fifo, buf, size);
197 kfree(buf);
199 return (n < size) ? -ENOSPC : 0;
202 int coda_bitstream_flush(struct coda_ctx *ctx)
204 int ret;
206 if (ctx->inst_type != CODA_INST_DECODER || !ctx->use_bit)
207 return 0;
209 ret = coda_command_sync(ctx, CODA_COMMAND_DEC_BUF_FLUSH);
210 if (ret < 0) {
211 v4l2_err(&ctx->dev->v4l2_dev, "failed to flush bitstream\n");
212 return ret;
215 kfifo_init(&ctx->bitstream_fifo, ctx->bitstream.vaddr,
216 ctx->bitstream.size);
217 coda_kfifo_sync_to_device_full(ctx);
219 return 0;
222 static int coda_bitstream_queue(struct coda_ctx *ctx, const u8 *buf, u32 size)
224 u32 n = kfifo_in(&ctx->bitstream_fifo, buf, size);
226 return (n < size) ? -ENOSPC : 0;
229 static u32 coda_buffer_parse_headers(struct coda_ctx *ctx,
230 struct vb2_v4l2_buffer *src_buf,
231 u32 payload)
233 u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
234 u32 size = 0;
236 switch (ctx->codec->src_fourcc) {
237 case V4L2_PIX_FMT_MPEG2:
238 size = coda_mpeg2_parse_headers(ctx, vaddr, payload);
239 break;
240 case V4L2_PIX_FMT_MPEG4:
241 size = coda_mpeg4_parse_headers(ctx, vaddr, payload);
242 break;
243 default:
244 break;
247 return size;
250 static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
251 struct vb2_v4l2_buffer *src_buf)
253 unsigned long payload = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
254 u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
255 int ret;
256 int i;
258 if (coda_get_bitstream_payload(ctx) + payload + 512 >=
259 ctx->bitstream.size)
260 return false;
262 if (!vaddr) {
263 v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
264 return true;
267 if (ctx->qsequence == 0 && payload < 512) {
269 * Add padding after the first buffer, if it is too small to be
270 * fetched by the CODA, by repeating the headers. Without
271 * repeated headers, or the first frame already queued, decoder
272 * sequence initialization fails with error code 0x2000 on i.MX6
273 * or error code 0x1 on i.MX51.
275 u32 header_size = coda_buffer_parse_headers(ctx, src_buf,
276 payload);
278 if (header_size) {
279 coda_dbg(1, ctx, "pad with %u-byte header\n",
280 header_size);
281 for (i = payload; i < 512; i += header_size) {
282 ret = coda_bitstream_queue(ctx, vaddr,
283 header_size);
284 if (ret < 0) {
285 v4l2_err(&ctx->dev->v4l2_dev,
286 "bitstream buffer overflow\n");
287 return false;
289 if (ctx->dev->devtype->product == CODA_960)
290 break;
292 } else {
293 coda_dbg(1, ctx,
294 "could not parse header, sequence initialization might fail\n");
298 /* Add padding before the first buffer, if it is too small */
299 if (ctx->qsequence == 0 && payload < 512 &&
300 ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
301 coda_h264_bitstream_pad(ctx, 512 - payload);
303 ret = coda_bitstream_queue(ctx, vaddr, payload);
304 if (ret < 0) {
305 v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
306 return false;
309 src_buf->sequence = ctx->qsequence++;
311 /* Sync read pointer to device */
312 if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
313 coda_kfifo_sync_to_device_write(ctx);
315 /* Set the stream-end flag after the last buffer is queued */
316 if (src_buf->flags & V4L2_BUF_FLAG_LAST)
317 coda_bit_stream_end_flag(ctx);
318 ctx->hold = false;
320 return true;
323 void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
325 struct vb2_v4l2_buffer *src_buf;
326 struct coda_buffer_meta *meta;
327 u32 start;
329 if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG)
330 return;
332 while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
334 * Only queue two JPEGs into the bitstream buffer to keep
335 * latency low. We need at least one complete buffer and the
336 * header of another buffer (for prescan) in the bitstream.
338 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
339 ctx->num_metas > 1)
340 break;
342 if (ctx->num_internal_frames &&
343 ctx->num_metas >= ctx->num_internal_frames) {
344 meta = list_first_entry(&ctx->buffer_meta_list,
345 struct coda_buffer_meta, list);
348 * If we managed to fill in at least a full reorder
349 * window of buffers (num_internal_frames is a
350 * conservative estimate for this) and the bitstream
351 * prefetcher has at least 2 256 bytes periods beyond
352 * the first buffer to fetch, we can safely stop queuing
353 * in order to limit the decoder drain latency.
355 if (coda_bitstream_can_fetch_past(ctx, meta->end))
356 break;
359 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
361 /* Drop frames that do not start/end with a SOI/EOI markers */
362 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
363 !coda_jpeg_check_buffer(ctx, &src_buf->vb2_buf)) {
364 v4l2_err(&ctx->dev->v4l2_dev,
365 "dropping invalid JPEG frame %d\n",
366 ctx->qsequence);
367 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
368 if (buffer_list) {
369 struct v4l2_m2m_buffer *m2m_buf;
371 m2m_buf = container_of(src_buf,
372 struct v4l2_m2m_buffer,
373 vb);
374 list_add_tail(&m2m_buf->list, buffer_list);
375 } else {
376 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
378 continue;
381 /* Dump empty buffers */
382 if (!vb2_get_plane_payload(&src_buf->vb2_buf, 0)) {
383 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
384 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
385 continue;
388 /* Buffer start position */
389 start = ctx->bitstream_fifo.kfifo.in;
391 if (coda_bitstream_try_queue(ctx, src_buf)) {
393 * Source buffer is queued in the bitstream ringbuffer;
394 * queue the timestamp and mark source buffer as done
396 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
398 meta = kmalloc(sizeof(*meta), GFP_KERNEL);
399 if (meta) {
400 meta->sequence = src_buf->sequence;
401 meta->timecode = src_buf->timecode;
402 meta->timestamp = src_buf->vb2_buf.timestamp;
403 meta->start = start;
404 meta->end = ctx->bitstream_fifo.kfifo.in;
405 meta->last = src_buf->flags & V4L2_BUF_FLAG_LAST;
406 if (meta->last)
407 coda_dbg(1, ctx, "marking last meta");
408 spin_lock(&ctx->buffer_meta_lock);
409 list_add_tail(&meta->list,
410 &ctx->buffer_meta_list);
411 ctx->num_metas++;
412 spin_unlock(&ctx->buffer_meta_lock);
414 trace_coda_bit_queue(ctx, src_buf, meta);
417 if (buffer_list) {
418 struct v4l2_m2m_buffer *m2m_buf;
420 m2m_buf = container_of(src_buf,
421 struct v4l2_m2m_buffer,
422 vb);
423 list_add_tail(&m2m_buf->list, buffer_list);
424 } else {
425 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
427 } else {
428 break;
433 void coda_bit_stream_end_flag(struct coda_ctx *ctx)
435 struct coda_dev *dev = ctx->dev;
437 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
439 /* If this context is currently running, update the hardware flag */
440 if ((dev->devtype->product == CODA_960) &&
441 coda_isbusy(dev) &&
442 (ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) {
443 coda_write(dev, ctx->bit_stream_param,
444 CODA_REG_BIT_BIT_STREAM_PARAM);
448 static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
450 struct coda_dev *dev = ctx->dev;
451 u32 *p = ctx->parabuf.vaddr;
453 if (dev->devtype->product == CODA_DX6)
454 p[index] = value;
455 else
456 p[index ^ 1] = value;
459 static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
460 struct coda_aux_buf *buf, size_t size,
461 const char *name)
463 return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
467 static void coda_free_framebuffers(struct coda_ctx *ctx)
469 int i;
471 for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
472 coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i].buf);
475 static int coda_alloc_framebuffers(struct coda_ctx *ctx,
476 struct coda_q_data *q_data, u32 fourcc)
478 struct coda_dev *dev = ctx->dev;
479 unsigned int ysize, ycbcr_size;
480 int ret;
481 int i;
483 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
484 ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 ||
485 ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 ||
486 ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4)
487 ysize = round_up(q_data->rect.width, 16) *
488 round_up(q_data->rect.height, 16);
489 else
490 ysize = round_up(q_data->rect.width, 8) * q_data->rect.height;
492 if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
493 ycbcr_size = round_up(ysize, 4096) + ysize / 2;
494 else
495 ycbcr_size = ysize + ysize / 2;
497 /* Allocate frame buffers */
498 for (i = 0; i < ctx->num_internal_frames; i++) {
499 size_t size = ycbcr_size;
500 char *name;
502 /* Add space for mvcol buffers */
503 if (dev->devtype->product != CODA_DX6 &&
504 (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
505 (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 && i == 0)))
506 size += ysize / 4;
507 name = kasprintf(GFP_KERNEL, "fb%d", i);
508 if (!name) {
509 coda_free_framebuffers(ctx);
510 return -ENOMEM;
512 ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i].buf,
513 size, name);
514 kfree(name);
515 if (ret < 0) {
516 coda_free_framebuffers(ctx);
517 return ret;
521 /* Register frame buffers in the parameter buffer */
522 for (i = 0; i < ctx->num_internal_frames; i++) {
523 u32 y, cb, cr, mvcol;
525 /* Start addresses of Y, Cb, Cr planes */
526 y = ctx->internal_frames[i].buf.paddr;
527 cb = y + ysize;
528 cr = y + ysize + ysize/4;
529 mvcol = y + ysize + ysize/4 + ysize/4;
530 if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP) {
531 cb = round_up(cb, 4096);
532 mvcol = cb + ysize/2;
533 cr = 0;
534 /* Packed 20-bit MSB of base addresses */
535 /* YYYYYCCC, CCyyyyyc, cccc.... */
536 y = (y & 0xfffff000) | cb >> 20;
537 cb = (cb & 0x000ff000) << 12;
539 coda_parabuf_write(ctx, i * 3 + 0, y);
540 coda_parabuf_write(ctx, i * 3 + 1, cb);
541 coda_parabuf_write(ctx, i * 3 + 2, cr);
543 if (dev->devtype->product == CODA_DX6)
544 continue;
546 /* mvcol buffer for h.264 and mpeg4 */
547 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
548 coda_parabuf_write(ctx, 96 + i, mvcol);
549 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 && i == 0)
550 coda_parabuf_write(ctx, 97, mvcol);
553 return 0;
556 static void coda_free_context_buffers(struct coda_ctx *ctx)
558 struct coda_dev *dev = ctx->dev;
560 coda_free_aux_buf(dev, &ctx->slicebuf);
561 coda_free_aux_buf(dev, &ctx->psbuf);
562 if (dev->devtype->product != CODA_DX6)
563 coda_free_aux_buf(dev, &ctx->workbuf);
564 coda_free_aux_buf(dev, &ctx->parabuf);
567 static int coda_alloc_context_buffers(struct coda_ctx *ctx,
568 struct coda_q_data *q_data)
570 struct coda_dev *dev = ctx->dev;
571 size_t size;
572 int ret;
574 if (!ctx->parabuf.vaddr) {
575 ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
576 CODA_PARA_BUF_SIZE, "parabuf");
577 if (ret < 0)
578 return ret;
581 if (dev->devtype->product == CODA_DX6)
582 return 0;
584 if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
585 /* worst case slice size */
586 size = (DIV_ROUND_UP(q_data->rect.width, 16) *
587 DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
588 ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
589 "slicebuf");
590 if (ret < 0)
591 goto err;
594 if (!ctx->psbuf.vaddr && (dev->devtype->product == CODA_HX4 ||
595 dev->devtype->product == CODA_7541)) {
596 ret = coda_alloc_context_buf(ctx, &ctx->psbuf,
597 CODA7_PS_BUF_SIZE, "psbuf");
598 if (ret < 0)
599 goto err;
602 if (!ctx->workbuf.vaddr) {
603 size = dev->devtype->workbuf_size;
604 if (dev->devtype->product == CODA_960 &&
605 q_data->fourcc == V4L2_PIX_FMT_H264)
606 size += CODA9_PS_SAVE_SIZE;
607 ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size,
608 "workbuf");
609 if (ret < 0)
610 goto err;
613 return 0;
615 err:
616 coda_free_context_buffers(ctx);
617 return ret;
620 static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
621 int header_code, u8 *header, int *size)
623 struct vb2_buffer *vb = &buf->vb2_buf;
624 struct coda_dev *dev = ctx->dev;
625 struct coda_q_data *q_data_src;
626 struct v4l2_rect *r;
627 size_t bufsize;
628 int ret;
629 int i;
631 if (dev->devtype->product == CODA_960)
632 memset(vb2_plane_vaddr(vb, 0), 0, 64);
634 coda_write(dev, vb2_dma_contig_plane_dma_addr(vb, 0),
635 CODA_CMD_ENC_HEADER_BB_START);
636 bufsize = vb2_plane_size(vb, 0);
637 if (dev->devtype->product == CODA_960)
638 bufsize /= 1024;
639 coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
640 if (dev->devtype->product == CODA_960 &&
641 ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 &&
642 header_code == CODA_HEADER_H264_SPS) {
643 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
644 r = &q_data_src->rect;
646 if (r->width % 16 || r->height % 16) {
647 u32 crop_right = round_up(r->width, 16) - r->width;
648 u32 crop_bottom = round_up(r->height, 16) - r->height;
650 coda_write(dev, crop_right,
651 CODA9_CMD_ENC_HEADER_FRAME_CROP_H);
652 coda_write(dev, crop_bottom,
653 CODA9_CMD_ENC_HEADER_FRAME_CROP_V);
654 header_code |= CODA9_HEADER_FRAME_CROP;
657 coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
658 ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
659 if (ret < 0) {
660 v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
661 return ret;
664 if (dev->devtype->product == CODA_960) {
665 for (i = 63; i > 0; i--)
666 if (((char *)vb2_plane_vaddr(vb, 0))[i] != 0)
667 break;
668 *size = i + 1;
669 } else {
670 *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
671 coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
673 memcpy(header, vb2_plane_vaddr(vb, 0), *size);
675 return 0;
678 static u32 coda_slice_mode(struct coda_ctx *ctx)
680 int size, unit;
682 switch (ctx->params.slice_mode) {
683 case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
684 default:
685 return 0;
686 case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB:
687 size = ctx->params.slice_max_mb;
688 unit = 1;
689 break;
690 case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES:
691 size = ctx->params.slice_max_bits;
692 unit = 0;
693 break;
696 return ((size & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET) |
697 ((unit & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET) |
698 ((1 & CODA_SLICING_MODE_MASK) << CODA_SLICING_MODE_OFFSET);
701 static int coda_enc_param_change(struct coda_ctx *ctx)
703 struct coda_dev *dev = ctx->dev;
704 u32 change_enable = 0;
705 u32 success;
706 int ret;
708 if (ctx->params.gop_size_changed) {
709 change_enable |= CODA_PARAM_CHANGE_RC_GOP;
710 coda_write(dev, ctx->params.gop_size,
711 CODA_CMD_ENC_PARAM_RC_GOP);
712 ctx->gopcounter = ctx->params.gop_size - 1;
713 ctx->params.gop_size_changed = false;
715 if (ctx->params.h264_intra_qp_changed) {
716 coda_dbg(1, ctx, "parameter change: intra Qp %u\n",
717 ctx->params.h264_intra_qp);
719 if (ctx->params.bitrate) {
720 change_enable |= CODA_PARAM_CHANGE_RC_INTRA_QP;
721 coda_write(dev, ctx->params.h264_intra_qp,
722 CODA_CMD_ENC_PARAM_RC_INTRA_QP);
724 ctx->params.h264_intra_qp_changed = false;
726 if (ctx->params.bitrate_changed) {
727 coda_dbg(1, ctx, "parameter change: bitrate %u kbit/s\n",
728 ctx->params.bitrate);
729 change_enable |= CODA_PARAM_CHANGE_RC_BITRATE;
730 coda_write(dev, ctx->params.bitrate,
731 CODA_CMD_ENC_PARAM_RC_BITRATE);
732 ctx->params.bitrate_changed = false;
734 if (ctx->params.framerate_changed) {
735 coda_dbg(1, ctx, "parameter change: frame rate %u/%u Hz\n",
736 ctx->params.framerate & 0xffff,
737 (ctx->params.framerate >> 16) + 1);
738 change_enable |= CODA_PARAM_CHANGE_RC_FRAME_RATE;
739 coda_write(dev, ctx->params.framerate,
740 CODA_CMD_ENC_PARAM_RC_FRAME_RATE);
741 ctx->params.framerate_changed = false;
743 if (ctx->params.intra_refresh_changed) {
744 coda_dbg(1, ctx, "parameter change: intra refresh MBs %u\n",
745 ctx->params.intra_refresh);
746 change_enable |= CODA_PARAM_CHANGE_INTRA_MB_NUM;
747 coda_write(dev, ctx->params.intra_refresh,
748 CODA_CMD_ENC_PARAM_INTRA_MB_NUM);
749 ctx->params.intra_refresh_changed = false;
751 if (ctx->params.slice_mode_changed) {
752 change_enable |= CODA_PARAM_CHANGE_SLICE_MODE;
753 coda_write(dev, coda_slice_mode(ctx),
754 CODA_CMD_ENC_PARAM_SLICE_MODE);
755 ctx->params.slice_mode_changed = false;
758 if (!change_enable)
759 return 0;
761 coda_write(dev, change_enable, CODA_CMD_ENC_PARAM_CHANGE_ENABLE);
763 ret = coda_command_sync(ctx, CODA_COMMAND_RC_CHANGE_PARAMETER);
764 if (ret < 0)
765 return ret;
767 success = coda_read(dev, CODA_RET_ENC_PARAM_CHANGE_SUCCESS);
768 if (success != 1)
769 coda_dbg(1, ctx, "parameter change failed: %u\n", success);
771 return 0;
774 static phys_addr_t coda_iram_alloc(struct coda_iram_info *iram, size_t size)
776 phys_addr_t ret;
778 size = round_up(size, 1024);
779 if (size > iram->remaining)
780 return 0;
781 iram->remaining -= size;
783 ret = iram->next_paddr;
784 iram->next_paddr += size;
786 return ret;
789 static void coda_setup_iram(struct coda_ctx *ctx)
791 struct coda_iram_info *iram_info = &ctx->iram_info;
792 struct coda_dev *dev = ctx->dev;
793 int w64, w128;
794 int mb_width;
795 int dbk_bits;
796 int bit_bits;
797 int ip_bits;
798 int me_bits;
800 memset(iram_info, 0, sizeof(*iram_info));
801 iram_info->next_paddr = dev->iram.paddr;
802 iram_info->remaining = dev->iram.size;
804 if (!dev->iram.vaddr)
805 return;
807 switch (dev->devtype->product) {
808 case CODA_HX4:
809 dbk_bits = CODA7_USE_HOST_DBK_ENABLE;
810 bit_bits = CODA7_USE_HOST_BIT_ENABLE;
811 ip_bits = CODA7_USE_HOST_IP_ENABLE;
812 me_bits = CODA7_USE_HOST_ME_ENABLE;
813 break;
814 case CODA_7541:
815 dbk_bits = CODA7_USE_HOST_DBK_ENABLE | CODA7_USE_DBK_ENABLE;
816 bit_bits = CODA7_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
817 ip_bits = CODA7_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
818 me_bits = CODA7_USE_HOST_ME_ENABLE | CODA7_USE_ME_ENABLE;
819 break;
820 case CODA_960:
821 dbk_bits = CODA9_USE_HOST_DBK_ENABLE | CODA9_USE_DBK_ENABLE;
822 bit_bits = CODA9_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
823 ip_bits = CODA9_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
824 me_bits = 0;
825 break;
826 default: /* CODA_DX6 */
827 return;
830 if (ctx->inst_type == CODA_INST_ENCODER) {
831 struct coda_q_data *q_data_src;
833 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
834 mb_width = DIV_ROUND_UP(q_data_src->rect.width, 16);
835 w128 = mb_width * 128;
836 w64 = mb_width * 64;
838 /* Prioritize in case IRAM is too small for everything */
839 if (dev->devtype->product == CODA_HX4 ||
840 dev->devtype->product == CODA_7541) {
841 iram_info->search_ram_size = round_up(mb_width * 16 *
842 36 + 2048, 1024);
843 iram_info->search_ram_paddr = coda_iram_alloc(iram_info,
844 iram_info->search_ram_size);
845 if (!iram_info->search_ram_paddr) {
846 pr_err("IRAM is smaller than the search ram size\n");
847 goto out;
849 iram_info->axi_sram_use |= me_bits;
852 /* Only H.264BP and H.263P3 are considered */
853 iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
854 iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
855 if (!iram_info->buf_dbk_c_use)
856 goto out;
857 iram_info->axi_sram_use |= dbk_bits;
859 iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
860 if (!iram_info->buf_bit_use)
861 goto out;
862 iram_info->axi_sram_use |= bit_bits;
864 iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
865 if (!iram_info->buf_ip_ac_dc_use)
866 goto out;
867 iram_info->axi_sram_use |= ip_bits;
869 /* OVL and BTP disabled for encoder */
870 } else if (ctx->inst_type == CODA_INST_DECODER) {
871 struct coda_q_data *q_data_dst;
873 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
874 mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
875 w128 = mb_width * 128;
877 iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
878 iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
879 if (!iram_info->buf_dbk_c_use)
880 goto out;
881 iram_info->axi_sram_use |= dbk_bits;
883 iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
884 if (!iram_info->buf_bit_use)
885 goto out;
886 iram_info->axi_sram_use |= bit_bits;
888 iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
889 if (!iram_info->buf_ip_ac_dc_use)
890 goto out;
891 iram_info->axi_sram_use |= ip_bits;
893 /* OVL and BTP unused as there is no VC1 support yet */
896 out:
897 if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
898 coda_dbg(1, ctx, "IRAM smaller than needed\n");
900 if (dev->devtype->product == CODA_HX4 ||
901 dev->devtype->product == CODA_7541) {
902 /* TODO - Enabling these causes picture errors on CODA7541 */
903 if (ctx->inst_type == CODA_INST_DECODER) {
904 /* fw 1.4.50 */
905 iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
906 CODA7_USE_IP_ENABLE);
907 } else {
908 /* fw 13.4.29 */
909 iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
910 CODA7_USE_HOST_DBK_ENABLE |
911 CODA7_USE_IP_ENABLE |
912 CODA7_USE_DBK_ENABLE);
917 static u32 coda_supported_firmwares[] = {
918 CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
919 CODA_FIRMWARE_VERNUM(CODA_HX4, 1, 4, 50),
920 CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
921 CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
922 CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 9),
923 CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10),
924 CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1),
927 static bool coda_firmware_supported(u32 vernum)
929 int i;
931 for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
932 if (vernum == coda_supported_firmwares[i])
933 return true;
934 return false;
937 int coda_check_firmware(struct coda_dev *dev)
939 u16 product, major, minor, release;
940 u32 data;
941 int ret;
943 ret = clk_prepare_enable(dev->clk_per);
944 if (ret)
945 goto err_clk_per;
947 ret = clk_prepare_enable(dev->clk_ahb);
948 if (ret)
949 goto err_clk_ahb;
951 coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
952 coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
953 coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
954 coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
955 coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
956 if (coda_wait_timeout(dev)) {
957 v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
958 ret = -EIO;
959 goto err_run_cmd;
962 if (dev->devtype->product == CODA_960) {
963 data = coda_read(dev, CODA9_CMD_FIRMWARE_CODE_REV);
964 v4l2_info(&dev->v4l2_dev, "Firmware code revision: %d\n",
965 data);
968 /* Check we are compatible with the loaded firmware */
969 data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
970 product = CODA_FIRMWARE_PRODUCT(data);
971 major = CODA_FIRMWARE_MAJOR(data);
972 minor = CODA_FIRMWARE_MINOR(data);
973 release = CODA_FIRMWARE_RELEASE(data);
975 clk_disable_unprepare(dev->clk_per);
976 clk_disable_unprepare(dev->clk_ahb);
978 if (product != dev->devtype->product) {
979 v4l2_err(&dev->v4l2_dev,
980 "Wrong firmware. Hw: %s, Fw: %s, Version: %u.%u.%u\n",
981 coda_product_name(dev->devtype->product),
982 coda_product_name(product), major, minor, release);
983 return -EINVAL;
986 v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
987 coda_product_name(product));
989 if (coda_firmware_supported(data)) {
990 v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
991 major, minor, release);
992 } else {
993 v4l2_warn(&dev->v4l2_dev,
994 "Unsupported firmware version: %u.%u.%u\n",
995 major, minor, release);
998 return 0;
1000 err_run_cmd:
1001 clk_disable_unprepare(dev->clk_ahb);
1002 err_clk_ahb:
1003 clk_disable_unprepare(dev->clk_per);
1004 err_clk_per:
1005 return ret;
1008 static void coda9_set_frame_cache(struct coda_ctx *ctx, u32 fourcc)
1010 u32 cache_size, cache_config;
1012 if (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) {
1013 /* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
1014 cache_size = 0x20262024;
1015 cache_config = 2 << CODA9_CACHE_PAGEMERGE_OFFSET;
1016 } else {
1017 /* Luma 0x2 page, 4x4 cache, chroma 0x2 page, 4x3 cache size */
1018 cache_size = 0x02440243;
1019 cache_config = 1 << CODA9_CACHE_PAGEMERGE_OFFSET;
1021 coda_write(ctx->dev, cache_size, CODA9_CMD_SET_FRAME_CACHE_SIZE);
1022 if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_YUYV) {
1023 cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
1024 16 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
1025 0 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
1026 } else {
1027 cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
1028 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
1029 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
1031 coda_write(ctx->dev, cache_config, CODA9_CMD_SET_FRAME_CACHE_CONFIG);
1035 * Encoder context operations
1038 static int coda_encoder_reqbufs(struct coda_ctx *ctx,
1039 struct v4l2_requestbuffers *rb)
1041 struct coda_q_data *q_data_src;
1042 int ret;
1044 if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
1045 return 0;
1047 if (rb->count) {
1048 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1049 ret = coda_alloc_context_buffers(ctx, q_data_src);
1050 if (ret < 0)
1051 return ret;
1052 } else {
1053 coda_free_context_buffers(ctx);
1056 return 0;
1059 static int coda_start_encoding(struct coda_ctx *ctx)
1061 struct coda_dev *dev = ctx->dev;
1062 struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
1063 struct coda_q_data *q_data_src, *q_data_dst;
1064 u32 bitstream_buf, bitstream_size;
1065 struct vb2_v4l2_buffer *buf;
1066 int gamma, ret, value;
1067 u32 dst_fourcc;
1068 int num_fb;
1069 u32 stride;
1071 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1072 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1073 dst_fourcc = q_data_dst->fourcc;
1075 buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1076 bitstream_buf = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
1077 bitstream_size = q_data_dst->sizeimage;
1079 if (!coda_is_initialized(dev)) {
1080 v4l2_err(v4l2_dev, "coda is not initialized.\n");
1081 return -EFAULT;
1084 if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
1085 if (!ctx->params.jpeg_qmat_tab[0])
1086 ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
1087 if (!ctx->params.jpeg_qmat_tab[1])
1088 ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
1089 coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
1092 mutex_lock(&dev->coda_mutex);
1094 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
1095 coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
1096 coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
1097 switch (dev->devtype->product) {
1098 case CODA_DX6:
1099 coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
1100 CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
1101 break;
1102 case CODA_960:
1103 coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
1104 /* fallthrough */
1105 case CODA_HX4:
1106 case CODA_7541:
1107 coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
1108 CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
1109 break;
1112 ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
1113 CODA9_FRAME_TILED2LINEAR);
1114 if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
1115 ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
1116 if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
1117 ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
1118 coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
1120 if (dev->devtype->product == CODA_DX6) {
1121 /* Configure the coda */
1122 coda_write(dev, dev->iram.paddr,
1123 CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR);
1126 /* Could set rotation here if needed */
1127 value = 0;
1128 switch (dev->devtype->product) {
1129 case CODA_DX6:
1130 value = (q_data_src->rect.width & CODADX6_PICWIDTH_MASK)
1131 << CODADX6_PICWIDTH_OFFSET;
1132 value |= (q_data_src->rect.height & CODADX6_PICHEIGHT_MASK)
1133 << CODA_PICHEIGHT_OFFSET;
1134 break;
1135 case CODA_HX4:
1136 case CODA_7541:
1137 if (dst_fourcc == V4L2_PIX_FMT_H264) {
1138 value = (round_up(q_data_src->rect.width, 16) &
1139 CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
1140 value |= (round_up(q_data_src->rect.height, 16) &
1141 CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
1142 break;
1144 /* fallthrough */
1145 case CODA_960:
1146 value = (q_data_src->rect.width & CODA7_PICWIDTH_MASK)
1147 << CODA7_PICWIDTH_OFFSET;
1148 value |= (q_data_src->rect.height & CODA7_PICHEIGHT_MASK)
1149 << CODA_PICHEIGHT_OFFSET;
1151 coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
1152 if (dst_fourcc == V4L2_PIX_FMT_JPEG)
1153 ctx->params.framerate = 0;
1154 coda_write(dev, ctx->params.framerate,
1155 CODA_CMD_ENC_SEQ_SRC_F_RATE);
1157 ctx->params.codec_mode = ctx->codec->mode;
1158 switch (dst_fourcc) {
1159 case V4L2_PIX_FMT_MPEG4:
1160 if (dev->devtype->product == CODA_960)
1161 coda_write(dev, CODA9_STD_MPEG4,
1162 CODA_CMD_ENC_SEQ_COD_STD);
1163 else
1164 coda_write(dev, CODA_STD_MPEG4,
1165 CODA_CMD_ENC_SEQ_COD_STD);
1166 coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
1167 break;
1168 case V4L2_PIX_FMT_H264:
1169 if (dev->devtype->product == CODA_960)
1170 coda_write(dev, CODA9_STD_H264,
1171 CODA_CMD_ENC_SEQ_COD_STD);
1172 else
1173 coda_write(dev, CODA_STD_H264,
1174 CODA_CMD_ENC_SEQ_COD_STD);
1175 value = ((ctx->params.h264_disable_deblocking_filter_idc &
1176 CODA_264PARAM_DISABLEDEBLK_MASK) <<
1177 CODA_264PARAM_DISABLEDEBLK_OFFSET) |
1178 ((ctx->params.h264_slice_alpha_c0_offset_div2 &
1179 CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
1180 CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
1181 ((ctx->params.h264_slice_beta_offset_div2 &
1182 CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
1183 CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET) |
1184 (ctx->params.h264_constrained_intra_pred_flag <<
1185 CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET) |
1186 (ctx->params.h264_chroma_qp_index_offset &
1187 CODA_264PARAM_CHROMAQPOFFSET_MASK);
1188 coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
1189 break;
1190 case V4L2_PIX_FMT_JPEG:
1191 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_PARA);
1192 coda_write(dev, ctx->params.jpeg_restart_interval,
1193 CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL);
1194 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_EN);
1195 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE);
1196 coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET);
1198 coda_jpeg_write_tables(ctx);
1199 break;
1200 default:
1201 v4l2_err(v4l2_dev,
1202 "dst format (0x%08x) invalid.\n", dst_fourcc);
1203 ret = -EINVAL;
1204 goto out;
1208 * slice mode and GOP size registers are used for thumb size/offset
1209 * in JPEG mode
1211 if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
1212 value = coda_slice_mode(ctx);
1213 coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
1214 value = ctx->params.gop_size;
1215 coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
1218 if (ctx->params.bitrate) {
1219 ctx->params.bitrate_changed = false;
1220 ctx->params.h264_intra_qp_changed = false;
1222 /* Rate control enabled */
1223 value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
1224 << CODA_RATECONTROL_BITRATE_OFFSET;
1225 value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
1226 value |= (ctx->params.vbv_delay &
1227 CODA_RATECONTROL_INITIALDELAY_MASK)
1228 << CODA_RATECONTROL_INITIALDELAY_OFFSET;
1229 if (dev->devtype->product == CODA_960)
1230 value |= BIT(31); /* disable autoskip */
1231 } else {
1232 value = 0;
1234 coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
1236 coda_write(dev, ctx->params.vbv_size, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
1237 coda_write(dev, ctx->params.intra_refresh,
1238 CODA_CMD_ENC_SEQ_INTRA_REFRESH);
1240 coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
1241 coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
1244 value = 0;
1245 if (dev->devtype->product == CODA_960)
1246 gamma = CODA9_DEFAULT_GAMMA;
1247 else
1248 gamma = CODA_DEFAULT_GAMMA;
1249 if (gamma > 0) {
1250 coda_write(dev, (gamma & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET,
1251 CODA_CMD_ENC_SEQ_RC_GAMMA);
1254 if (ctx->params.h264_min_qp || ctx->params.h264_max_qp) {
1255 coda_write(dev,
1256 ctx->params.h264_min_qp << CODA_QPMIN_OFFSET |
1257 ctx->params.h264_max_qp << CODA_QPMAX_OFFSET,
1258 CODA_CMD_ENC_SEQ_RC_QP_MIN_MAX);
1260 if (dev->devtype->product == CODA_960) {
1261 if (ctx->params.h264_max_qp)
1262 value |= 1 << CODA9_OPTION_RCQPMAX_OFFSET;
1263 if (CODA_DEFAULT_GAMMA > 0)
1264 value |= 1 << CODA9_OPTION_GAMMA_OFFSET;
1265 } else {
1266 if (CODA_DEFAULT_GAMMA > 0) {
1267 if (dev->devtype->product == CODA_DX6)
1268 value |= 1 << CODADX6_OPTION_GAMMA_OFFSET;
1269 else
1270 value |= 1 << CODA7_OPTION_GAMMA_OFFSET;
1272 if (ctx->params.h264_min_qp)
1273 value |= 1 << CODA7_OPTION_RCQPMIN_OFFSET;
1274 if (ctx->params.h264_max_qp)
1275 value |= 1 << CODA7_OPTION_RCQPMAX_OFFSET;
1277 coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
1279 coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
1281 coda_setup_iram(ctx);
1283 if (dst_fourcc == V4L2_PIX_FMT_H264) {
1284 switch (dev->devtype->product) {
1285 case CODA_DX6:
1286 value = FMO_SLICE_SAVE_BUF_SIZE << 7;
1287 coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
1288 break;
1289 case CODA_HX4:
1290 case CODA_7541:
1291 coda_write(dev, ctx->iram_info.search_ram_paddr,
1292 CODA7_CMD_ENC_SEQ_SEARCH_BASE);
1293 coda_write(dev, ctx->iram_info.search_ram_size,
1294 CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
1295 break;
1296 case CODA_960:
1297 coda_write(dev, 0, CODA9_CMD_ENC_SEQ_ME_OPTION);
1298 coda_write(dev, 0, CODA9_CMD_ENC_SEQ_INTRA_WEIGHT);
1302 ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
1303 if (ret < 0) {
1304 v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
1305 goto out;
1308 if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0) {
1309 v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT failed\n");
1310 ret = -EFAULT;
1311 goto out;
1313 ctx->initialized = 1;
1315 if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
1316 if (dev->devtype->product == CODA_960)
1317 ctx->num_internal_frames = 4;
1318 else
1319 ctx->num_internal_frames = 2;
1320 ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
1321 if (ret < 0) {
1322 v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
1323 goto out;
1325 num_fb = 2;
1326 stride = q_data_src->bytesperline;
1327 } else {
1328 ctx->num_internal_frames = 0;
1329 num_fb = 0;
1330 stride = 0;
1332 coda_write(dev, num_fb, CODA_CMD_SET_FRAME_BUF_NUM);
1333 coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
1335 if (dev->devtype->product == CODA_HX4 ||
1336 dev->devtype->product == CODA_7541) {
1337 coda_write(dev, q_data_src->bytesperline,
1338 CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
1340 if (dev->devtype->product != CODA_DX6) {
1341 coda_write(dev, ctx->iram_info.buf_bit_use,
1342 CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
1343 coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
1344 CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
1345 coda_write(dev, ctx->iram_info.buf_dbk_y_use,
1346 CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
1347 coda_write(dev, ctx->iram_info.buf_dbk_c_use,
1348 CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
1349 coda_write(dev, ctx->iram_info.buf_ovl_use,
1350 CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
1351 if (dev->devtype->product == CODA_960) {
1352 coda_write(dev, ctx->iram_info.buf_btp_use,
1353 CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
1355 coda9_set_frame_cache(ctx, q_data_src->fourcc);
1357 /* FIXME */
1358 coda_write(dev, ctx->internal_frames[2].buf.paddr,
1359 CODA9_CMD_SET_FRAME_SUBSAMP_A);
1360 coda_write(dev, ctx->internal_frames[3].buf.paddr,
1361 CODA9_CMD_SET_FRAME_SUBSAMP_B);
1365 ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
1366 if (ret < 0) {
1367 v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
1368 goto out;
1371 coda_dbg(1, ctx, "start encoding %dx%d %4.4s->%4.4s @ %d/%d Hz\n",
1372 q_data_src->rect.width, q_data_src->rect.height,
1373 (char *)&ctx->codec->src_fourcc, (char *)&dst_fourcc,
1374 ctx->params.framerate & 0xffff,
1375 (ctx->params.framerate >> 16) + 1);
1377 /* Save stream headers */
1378 buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1379 switch (dst_fourcc) {
1380 case V4L2_PIX_FMT_H264:
1382 * Get SPS in the first frame and copy it to an
1383 * intermediate buffer.
1385 ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS,
1386 &ctx->vpu_header[0][0],
1387 &ctx->vpu_header_size[0]);
1388 if (ret < 0)
1389 goto out;
1392 * If visible width or height are not aligned to macroblock
1393 * size, the crop_right and crop_bottom SPS fields must be set
1394 * to the difference between visible and coded size. This is
1395 * only supported by CODA960 firmware. All others do not allow
1396 * writing frame cropping parameters, so we have to manually
1397 * fix up the SPS RBSP (Sequence Parameter Set Raw Byte
1398 * Sequence Payload) ourselves.
1400 if (ctx->dev->devtype->product != CODA_960 &&
1401 ((q_data_src->rect.width % 16) ||
1402 (q_data_src->rect.height % 16))) {
1403 ret = coda_h264_sps_fixup(ctx, q_data_src->rect.width,
1404 q_data_src->rect.height,
1405 &ctx->vpu_header[0][0],
1406 &ctx->vpu_header_size[0],
1407 sizeof(ctx->vpu_header[0]));
1408 if (ret < 0)
1409 goto out;
1413 * Get PPS in the first frame and copy it to an
1414 * intermediate buffer.
1416 ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS,
1417 &ctx->vpu_header[1][0],
1418 &ctx->vpu_header_size[1]);
1419 if (ret < 0)
1420 goto out;
1423 * Length of H.264 headers is variable and thus it might not be
1424 * aligned for the coda to append the encoded frame. In that is
1425 * the case a filler NAL must be added to header 2.
1427 ctx->vpu_header_size[2] = coda_h264_padding(
1428 (ctx->vpu_header_size[0] +
1429 ctx->vpu_header_size[1]),
1430 ctx->vpu_header[2]);
1431 break;
1432 case V4L2_PIX_FMT_MPEG4:
1434 * Get VOS in the first frame and copy it to an
1435 * intermediate buffer
1437 ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS,
1438 &ctx->vpu_header[0][0],
1439 &ctx->vpu_header_size[0]);
1440 if (ret < 0)
1441 goto out;
1443 ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS,
1444 &ctx->vpu_header[1][0],
1445 &ctx->vpu_header_size[1]);
1446 if (ret < 0)
1447 goto out;
1449 ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL,
1450 &ctx->vpu_header[2][0],
1451 &ctx->vpu_header_size[2]);
1452 if (ret < 0)
1453 goto out;
1454 break;
1455 default:
1456 /* No more formats need to save headers at the moment */
1457 break;
1460 out:
1461 mutex_unlock(&dev->coda_mutex);
1462 return ret;
1465 static int coda_prepare_encode(struct coda_ctx *ctx)
1467 struct coda_q_data *q_data_src, *q_data_dst;
1468 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1469 struct coda_dev *dev = ctx->dev;
1470 int force_ipicture;
1471 int quant_param = 0;
1472 u32 pic_stream_buffer_addr, pic_stream_buffer_size;
1473 u32 rot_mode = 0;
1474 u32 dst_fourcc;
1475 u32 reg;
1476 int ret;
1478 ret = coda_enc_param_change(ctx);
1479 if (ret < 0) {
1480 v4l2_warn(&ctx->dev->v4l2_dev, "parameter change failed: %d\n",
1481 ret);
1484 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1485 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1486 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1487 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1488 dst_fourcc = q_data_dst->fourcc;
1490 src_buf->sequence = ctx->osequence;
1491 dst_buf->sequence = ctx->osequence;
1492 ctx->osequence++;
1494 force_ipicture = ctx->params.force_ipicture;
1495 if (force_ipicture)
1496 ctx->params.force_ipicture = false;
1497 else if (ctx->params.gop_size != 0 &&
1498 (src_buf->sequence % ctx->params.gop_size) == 0)
1499 force_ipicture = 1;
1502 * Workaround coda firmware BUG that only marks the first
1503 * frame as IDR. This is a problem for some decoders that can't
1504 * recover when a frame is lost.
1506 if (!force_ipicture) {
1507 src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
1508 src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
1509 } else {
1510 src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
1511 src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
1514 if (dev->devtype->product == CODA_960)
1515 coda_set_gdi_regs(ctx);
1518 * Copy headers in front of the first frame and forced I frames for
1519 * H.264 only. In MPEG4 they are already copied by the CODA.
1521 if (src_buf->sequence == 0 || force_ipicture) {
1522 pic_stream_buffer_addr =
1523 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
1524 ctx->vpu_header_size[0] +
1525 ctx->vpu_header_size[1] +
1526 ctx->vpu_header_size[2];
1527 pic_stream_buffer_size = q_data_dst->sizeimage -
1528 ctx->vpu_header_size[0] -
1529 ctx->vpu_header_size[1] -
1530 ctx->vpu_header_size[2];
1531 memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0),
1532 &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
1533 memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
1534 + ctx->vpu_header_size[0], &ctx->vpu_header[1][0],
1535 ctx->vpu_header_size[1]);
1536 memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
1537 + ctx->vpu_header_size[0] + ctx->vpu_header_size[1],
1538 &ctx->vpu_header[2][0], ctx->vpu_header_size[2]);
1539 } else {
1540 pic_stream_buffer_addr =
1541 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
1542 pic_stream_buffer_size = q_data_dst->sizeimage;
1545 if (force_ipicture) {
1546 switch (dst_fourcc) {
1547 case V4L2_PIX_FMT_H264:
1548 quant_param = ctx->params.h264_intra_qp;
1549 break;
1550 case V4L2_PIX_FMT_MPEG4:
1551 quant_param = ctx->params.mpeg4_intra_qp;
1552 break;
1553 case V4L2_PIX_FMT_JPEG:
1554 quant_param = 30;
1555 break;
1556 default:
1557 v4l2_warn(&ctx->dev->v4l2_dev,
1558 "cannot set intra qp, fmt not supported\n");
1559 break;
1561 } else {
1562 switch (dst_fourcc) {
1563 case V4L2_PIX_FMT_H264:
1564 quant_param = ctx->params.h264_inter_qp;
1565 break;
1566 case V4L2_PIX_FMT_MPEG4:
1567 quant_param = ctx->params.mpeg4_inter_qp;
1568 break;
1569 default:
1570 v4l2_warn(&ctx->dev->v4l2_dev,
1571 "cannot set inter qp, fmt not supported\n");
1572 break;
1576 /* submit */
1577 if (ctx->params.rot_mode)
1578 rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
1579 coda_write(dev, rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
1580 coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
1582 if (dev->devtype->product == CODA_960) {
1583 coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
1584 coda_write(dev, q_data_src->bytesperline,
1585 CODA9_CMD_ENC_PIC_SRC_STRIDE);
1586 coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
1588 reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
1589 } else {
1590 reg = CODA_CMD_ENC_PIC_SRC_ADDR_Y;
1592 coda_write_base(ctx, q_data_src, src_buf, reg);
1594 coda_write(dev, force_ipicture << 1 & 0x2,
1595 CODA_CMD_ENC_PIC_OPTION);
1597 coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
1598 coda_write(dev, pic_stream_buffer_size / 1024,
1599 CODA_CMD_ENC_PIC_BB_SIZE);
1601 if (!ctx->streamon_out) {
1602 /* After streamoff on the output side, set stream end flag */
1603 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
1604 coda_write(dev, ctx->bit_stream_param,
1605 CODA_REG_BIT_BIT_STREAM_PARAM);
1608 if (dev->devtype->product != CODA_DX6)
1609 coda_write(dev, ctx->iram_info.axi_sram_use,
1610 CODA7_REG_BIT_AXI_SRAM_USE);
1612 trace_coda_enc_pic_run(ctx, src_buf);
1614 coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
1616 return 0;
1619 static char coda_frame_type_char(u32 flags)
1621 return (flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' :
1622 (flags & V4L2_BUF_FLAG_PFRAME) ? 'P' :
1623 (flags & V4L2_BUF_FLAG_BFRAME) ? 'B' : '?';
1626 static void coda_finish_encode(struct coda_ctx *ctx)
1628 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1629 struct coda_dev *dev = ctx->dev;
1630 u32 wr_ptr, start_ptr;
1632 if (ctx->aborting)
1633 return;
1636 * Lock to make sure that an encoder stop command running in parallel
1637 * will either already have marked src_buf as last, or it will wake up
1638 * the capture queue after the buffers are returned.
1640 mutex_lock(&ctx->wakeup_mutex);
1641 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1642 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1644 trace_coda_enc_pic_done(ctx, dst_buf);
1646 /* Get results from the coda */
1647 start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
1648 wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
1650 /* Calculate bytesused field */
1651 if (dst_buf->sequence == 0 ||
1652 src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
1653 vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
1654 ctx->vpu_header_size[0] +
1655 ctx->vpu_header_size[1] +
1656 ctx->vpu_header_size[2]);
1657 } else {
1658 vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
1661 coda_dbg(1, ctx, "frame size = %u\n", wr_ptr - start_ptr);
1663 coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
1664 coda_read(dev, CODA_RET_ENC_PIC_FLAG);
1666 dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
1667 V4L2_BUF_FLAG_PFRAME |
1668 V4L2_BUF_FLAG_LAST);
1669 if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0)
1670 dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
1671 else
1672 dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
1673 dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
1675 v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
1677 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
1679 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1680 coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
1681 mutex_unlock(&ctx->wakeup_mutex);
1683 ctx->gopcounter--;
1684 if (ctx->gopcounter < 0)
1685 ctx->gopcounter = ctx->params.gop_size - 1;
1687 coda_dbg(1, ctx, "job finished: encoded %c frame (%d)%s\n",
1688 coda_frame_type_char(dst_buf->flags), dst_buf->sequence,
1689 (dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
1692 static void coda_seq_end_work(struct work_struct *work)
1694 struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work);
1695 struct coda_dev *dev = ctx->dev;
1697 mutex_lock(&ctx->buffer_mutex);
1698 mutex_lock(&dev->coda_mutex);
1700 if (ctx->initialized == 0)
1701 goto out;
1703 coda_dbg(1, ctx, "%s: sent command 'SEQ_END' to coda\n", __func__);
1704 if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
1705 v4l2_err(&dev->v4l2_dev,
1706 "CODA_COMMAND_SEQ_END failed\n");
1710 * FIXME: Sometimes h.264 encoding fails with 8-byte sequences missing
1711 * from the output stream after the h.264 decoder has run. Resetting the
1712 * hardware after the decoder has finished seems to help.
1714 if (dev->devtype->product == CODA_960)
1715 coda_hw_reset(ctx);
1717 kfifo_init(&ctx->bitstream_fifo,
1718 ctx->bitstream.vaddr, ctx->bitstream.size);
1720 coda_free_framebuffers(ctx);
1722 ctx->initialized = 0;
1724 out:
1725 mutex_unlock(&dev->coda_mutex);
1726 mutex_unlock(&ctx->buffer_mutex);
1729 static void coda_bit_release(struct coda_ctx *ctx)
1731 mutex_lock(&ctx->buffer_mutex);
1732 coda_free_framebuffers(ctx);
1733 coda_free_context_buffers(ctx);
1734 coda_free_bitstream_buffer(ctx);
1735 mutex_unlock(&ctx->buffer_mutex);
1738 const struct coda_context_ops coda_bit_encode_ops = {
1739 .queue_init = coda_encoder_queue_init,
1740 .reqbufs = coda_encoder_reqbufs,
1741 .start_streaming = coda_start_encoding,
1742 .prepare_run = coda_prepare_encode,
1743 .finish_run = coda_finish_encode,
1744 .seq_end_work = coda_seq_end_work,
1745 .release = coda_bit_release,
1749 * Decoder context operations
1752 static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
1753 struct coda_q_data *q_data)
1755 if (ctx->bitstream.vaddr)
1756 return 0;
1758 ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
1759 ctx->bitstream.vaddr = dma_alloc_wc(ctx->dev->dev, ctx->bitstream.size,
1760 &ctx->bitstream.paddr, GFP_KERNEL);
1761 if (!ctx->bitstream.vaddr) {
1762 v4l2_err(&ctx->dev->v4l2_dev,
1763 "failed to allocate bitstream ringbuffer");
1764 return -ENOMEM;
1766 kfifo_init(&ctx->bitstream_fifo,
1767 ctx->bitstream.vaddr, ctx->bitstream.size);
1769 return 0;
1772 static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
1774 if (ctx->bitstream.vaddr == NULL)
1775 return;
1777 dma_free_wc(ctx->dev->dev, ctx->bitstream.size, ctx->bitstream.vaddr,
1778 ctx->bitstream.paddr);
1779 ctx->bitstream.vaddr = NULL;
1780 kfifo_init(&ctx->bitstream_fifo, NULL, 0);
1783 static int coda_decoder_reqbufs(struct coda_ctx *ctx,
1784 struct v4l2_requestbuffers *rb)
1786 struct coda_q_data *q_data_src;
1787 int ret;
1789 if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
1790 return 0;
1792 if (rb->count) {
1793 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1794 ret = coda_alloc_context_buffers(ctx, q_data_src);
1795 if (ret < 0)
1796 return ret;
1797 ret = coda_alloc_bitstream_buffer(ctx, q_data_src);
1798 if (ret < 0) {
1799 coda_free_context_buffers(ctx);
1800 return ret;
1802 } else {
1803 coda_free_bitstream_buffer(ctx);
1804 coda_free_context_buffers(ctx);
1807 return 0;
1810 static bool coda_reorder_enable(struct coda_ctx *ctx)
1812 struct coda_dev *dev = ctx->dev;
1813 int profile;
1815 if (dev->devtype->product != CODA_HX4 &&
1816 dev->devtype->product != CODA_7541 &&
1817 dev->devtype->product != CODA_960)
1818 return false;
1820 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
1821 return false;
1823 if (ctx->codec->src_fourcc != V4L2_PIX_FMT_H264)
1824 return true;
1826 profile = coda_h264_profile(ctx->params.h264_profile_idc);
1827 if (profile < 0)
1828 v4l2_warn(&dev->v4l2_dev, "Unknown H264 Profile: %u\n",
1829 ctx->params.h264_profile_idc);
1831 /* Baseline profile does not support reordering */
1832 return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
1835 static int __coda_decoder_seq_init(struct coda_ctx *ctx)
1837 struct coda_q_data *q_data_src, *q_data_dst;
1838 u32 bitstream_buf, bitstream_size;
1839 struct coda_dev *dev = ctx->dev;
1840 int width, height;
1841 u32 src_fourcc, dst_fourcc;
1842 u32 val;
1843 int ret;
1845 lockdep_assert_held(&dev->coda_mutex);
1847 coda_dbg(1, ctx, "Video Data Order Adapter: %s\n",
1848 ctx->use_vdoa ? "Enabled" : "Disabled");
1850 /* Start decoding */
1851 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
1852 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
1853 bitstream_buf = ctx->bitstream.paddr;
1854 bitstream_size = ctx->bitstream.size;
1855 src_fourcc = q_data_src->fourcc;
1856 dst_fourcc = q_data_dst->fourcc;
1858 /* Update coda bitstream read and write pointers from kfifo */
1859 coda_kfifo_sync_to_device_full(ctx);
1861 ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
1862 CODA9_FRAME_TILED2LINEAR);
1863 if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
1864 ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
1865 if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
1866 ctx->frame_mem_ctrl |= (0x3 << 9) |
1867 ((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
1868 coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
1870 ctx->display_idx = -1;
1871 ctx->frm_dis_flg = 0;
1872 coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
1874 coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
1875 coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
1876 val = 0;
1877 if (coda_reorder_enable(ctx))
1878 val |= CODA_REORDER_ENABLE;
1879 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
1880 val |= CODA_NO_INT_ENABLE;
1881 coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
1883 ctx->params.codec_mode = ctx->codec->mode;
1884 if (dev->devtype->product == CODA_960 &&
1885 src_fourcc == V4L2_PIX_FMT_MPEG4)
1886 ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4;
1887 else
1888 ctx->params.codec_mode_aux = 0;
1889 if (src_fourcc == V4L2_PIX_FMT_MPEG4) {
1890 coda_write(dev, CODA_MP4_CLASS_MPEG4,
1891 CODA_CMD_DEC_SEQ_MP4_ASP_CLASS);
1893 if (src_fourcc == V4L2_PIX_FMT_H264) {
1894 if (dev->devtype->product == CODA_HX4 ||
1895 dev->devtype->product == CODA_7541) {
1896 coda_write(dev, ctx->psbuf.paddr,
1897 CODA_CMD_DEC_SEQ_PS_BB_START);
1898 coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
1899 CODA_CMD_DEC_SEQ_PS_BB_SIZE);
1901 if (dev->devtype->product == CODA_960) {
1902 coda_write(dev, 0, CODA_CMD_DEC_SEQ_X264_MV_EN);
1903 coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
1906 if (src_fourcc == V4L2_PIX_FMT_JPEG)
1907 coda_write(dev, 0, CODA_CMD_DEC_SEQ_JPG_THUMB_EN);
1908 if (dev->devtype->product != CODA_960)
1909 coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
1911 ctx->bit_stream_param = CODA_BIT_DEC_SEQ_INIT_ESCAPE;
1912 ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
1913 ctx->bit_stream_param = 0;
1914 if (ret) {
1915 v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
1916 return ret;
1918 ctx->sequence_offset = ~0U;
1919 ctx->initialized = 1;
1921 /* Update kfifo out pointer from coda bitstream read pointer */
1922 coda_kfifo_sync_from_device(ctx);
1924 if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
1925 v4l2_err(&dev->v4l2_dev,
1926 "CODA_COMMAND_SEQ_INIT failed, error code = 0x%x\n",
1927 coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
1928 return -EAGAIN;
1931 val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
1932 if (dev->devtype->product == CODA_DX6) {
1933 width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
1934 height = val & CODADX6_PICHEIGHT_MASK;
1935 } else {
1936 width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
1937 height = val & CODA7_PICHEIGHT_MASK;
1940 if (width > q_data_dst->bytesperline || height > q_data_dst->height) {
1941 v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
1942 width, height, q_data_dst->bytesperline,
1943 q_data_dst->height);
1944 return -EINVAL;
1947 width = round_up(width, 16);
1948 height = round_up(height, 16);
1950 coda_dbg(1, ctx, "start decoding: %dx%d\n", width, height);
1952 ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
1954 * If the VDOA is used, the decoder needs one additional frame,
1955 * because the frames are freed when the next frame is decoded.
1956 * Otherwise there are visible errors in the decoded frames (green
1957 * regions in displayed frames) and a broken order of frames (earlier
1958 * frames are sporadically displayed after later frames).
1960 if (ctx->use_vdoa)
1961 ctx->num_internal_frames += 1;
1962 if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
1963 v4l2_err(&dev->v4l2_dev,
1964 "not enough framebuffers to decode (%d < %d)\n",
1965 CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
1966 return -EINVAL;
1969 if (src_fourcc == V4L2_PIX_FMT_H264) {
1970 u32 left_right;
1971 u32 top_bottom;
1973 left_right = coda_read(dev, CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT);
1974 top_bottom = coda_read(dev, CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM);
1976 q_data_dst->rect.left = (left_right >> 10) & 0x3ff;
1977 q_data_dst->rect.top = (top_bottom >> 10) & 0x3ff;
1978 q_data_dst->rect.width = width - q_data_dst->rect.left -
1979 (left_right & 0x3ff);
1980 q_data_dst->rect.height = height - q_data_dst->rect.top -
1981 (top_bottom & 0x3ff);
1984 if (dev->devtype->product != CODA_DX6) {
1985 u8 profile, level;
1987 val = coda_read(dev, CODA7_RET_DEC_SEQ_HEADER_REPORT);
1988 profile = val & 0xff;
1989 level = (val >> 8) & 0x7f;
1991 if (profile || level)
1992 coda_update_profile_level_ctrls(ctx, profile, level);
1995 return 0;
1998 static void coda_dec_seq_init_work(struct work_struct *work)
2000 struct coda_ctx *ctx = container_of(work,
2001 struct coda_ctx, seq_init_work);
2002 struct coda_dev *dev = ctx->dev;
2003 int ret;
2005 mutex_lock(&ctx->buffer_mutex);
2006 mutex_lock(&dev->coda_mutex);
2008 if (ctx->initialized == 1)
2009 goto out;
2011 ret = __coda_decoder_seq_init(ctx);
2012 if (ret < 0)
2013 goto out;
2015 ctx->initialized = 1;
2017 out:
2018 mutex_unlock(&dev->coda_mutex);
2019 mutex_unlock(&ctx->buffer_mutex);
2022 static int __coda_start_decoding(struct coda_ctx *ctx)
2024 struct coda_q_data *q_data_src, *q_data_dst;
2025 struct coda_dev *dev = ctx->dev;
2026 u32 src_fourcc, dst_fourcc;
2027 int ret;
2029 if (!ctx->initialized) {
2030 ret = __coda_decoder_seq_init(ctx);
2031 if (ret < 0)
2032 return ret;
2035 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
2036 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
2037 src_fourcc = q_data_src->fourcc;
2038 dst_fourcc = q_data_dst->fourcc;
2040 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
2042 ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
2043 if (ret < 0) {
2044 v4l2_err(&dev->v4l2_dev, "failed to allocate framebuffers\n");
2045 return ret;
2048 /* Tell the decoder how many frame buffers we allocated. */
2049 coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
2050 coda_write(dev, round_up(q_data_dst->rect.width, 16),
2051 CODA_CMD_SET_FRAME_BUF_STRIDE);
2053 if (dev->devtype->product != CODA_DX6) {
2054 /* Set secondary AXI IRAM */
2055 coda_setup_iram(ctx);
2057 coda_write(dev, ctx->iram_info.buf_bit_use,
2058 CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
2059 coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
2060 CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
2061 coda_write(dev, ctx->iram_info.buf_dbk_y_use,
2062 CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
2063 coda_write(dev, ctx->iram_info.buf_dbk_c_use,
2064 CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
2065 coda_write(dev, ctx->iram_info.buf_ovl_use,
2066 CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
2067 if (dev->devtype->product == CODA_960) {
2068 coda_write(dev, ctx->iram_info.buf_btp_use,
2069 CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
2071 coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
2072 coda9_set_frame_cache(ctx, dst_fourcc);
2076 if (src_fourcc == V4L2_PIX_FMT_H264) {
2077 coda_write(dev, ctx->slicebuf.paddr,
2078 CODA_CMD_SET_FRAME_SLICE_BB_START);
2079 coda_write(dev, ctx->slicebuf.size / 1024,
2080 CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
2083 if (dev->devtype->product == CODA_HX4 ||
2084 dev->devtype->product == CODA_7541) {
2085 int max_mb_x = 1920 / 16;
2086 int max_mb_y = 1088 / 16;
2087 int max_mb_num = max_mb_x * max_mb_y;
2089 coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
2090 CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
2091 } else if (dev->devtype->product == CODA_960) {
2092 int max_mb_x = 1920 / 16;
2093 int max_mb_y = 1088 / 16;
2094 int max_mb_num = max_mb_x * max_mb_y;
2096 coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
2097 CODA9_CMD_SET_FRAME_MAX_DEC_SIZE);
2100 if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
2101 v4l2_err(&ctx->dev->v4l2_dev,
2102 "CODA_COMMAND_SET_FRAME_BUF timeout\n");
2103 return -ETIMEDOUT;
2106 return 0;
2109 static int coda_start_decoding(struct coda_ctx *ctx)
2111 struct coda_dev *dev = ctx->dev;
2112 int ret;
2114 mutex_lock(&dev->coda_mutex);
2115 ret = __coda_start_decoding(ctx);
2116 mutex_unlock(&dev->coda_mutex);
2118 return ret;
2121 static int coda_prepare_decode(struct coda_ctx *ctx)
2123 struct vb2_v4l2_buffer *dst_buf;
2124 struct coda_dev *dev = ctx->dev;
2125 struct coda_q_data *q_data_dst;
2126 struct coda_buffer_meta *meta;
2127 u32 rot_mode = 0;
2128 u32 reg_addr, reg_stride;
2130 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
2131 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
2133 /* Try to copy source buffer contents into the bitstream ringbuffer */
2134 mutex_lock(&ctx->bitstream_mutex);
2135 coda_fill_bitstream(ctx, NULL);
2136 mutex_unlock(&ctx->bitstream_mutex);
2138 if (coda_get_bitstream_payload(ctx) < 512 &&
2139 (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
2140 coda_dbg(1, ctx, "bitstream payload: %d, skipping\n",
2141 coda_get_bitstream_payload(ctx));
2142 v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
2143 return -EAGAIN;
2146 /* Run coda_start_decoding (again) if not yet initialized */
2147 if (!ctx->initialized) {
2148 int ret = __coda_start_decoding(ctx);
2150 if (ret < 0) {
2151 v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
2152 v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
2153 return -EAGAIN;
2154 } else {
2155 ctx->initialized = 1;
2159 if (dev->devtype->product == CODA_960)
2160 coda_set_gdi_regs(ctx);
2162 if (ctx->use_vdoa &&
2163 ctx->display_idx >= 0 &&
2164 ctx->display_idx < ctx->num_internal_frames) {
2165 vdoa_device_run(ctx->vdoa,
2166 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0),
2167 ctx->internal_frames[ctx->display_idx].buf.paddr);
2168 } else {
2169 if (dev->devtype->product == CODA_960) {
2171 * It was previously assumed that the CODA960 has an
2172 * internal list of 64 buffer entries that contains
2173 * both the registered internal frame buffers as well
2174 * as the rotator buffer output, and that the ROT_INDEX
2175 * register must be set to a value between the last
2176 * internal frame buffers' index and 64.
2177 * At least on firmware version 3.1.1 it turns out that
2178 * setting ROT_INDEX to any value >= 32 causes CODA
2179 * hangups that it can not recover from with the SRC VPU
2180 * reset.
2181 * It does appear to work however, to just set it to a
2182 * fixed value in the [ctx->num_internal_frames, 31]
2183 * range, for example CODA_MAX_FRAMEBUFFERS.
2185 coda_write(dev, CODA_MAX_FRAMEBUFFERS,
2186 CODA9_CMD_DEC_PIC_ROT_INDEX);
2188 reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
2189 reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
2190 } else {
2191 reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
2192 reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
2194 coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
2195 coda_write(dev, q_data_dst->bytesperline, reg_stride);
2197 rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
2200 coda_write(dev, rot_mode, CODA_CMD_DEC_PIC_ROT_MODE);
2202 switch (dev->devtype->product) {
2203 case CODA_DX6:
2204 /* TBD */
2205 case CODA_HX4:
2206 case CODA_7541:
2207 coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
2208 break;
2209 case CODA_960:
2210 /* 'hardcode to use interrupt disable mode'? */
2211 coda_write(dev, (1 << 10), CODA_CMD_DEC_PIC_OPTION);
2212 break;
2215 coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
2217 coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
2218 coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
2220 if (dev->devtype->product != CODA_DX6)
2221 coda_write(dev, ctx->iram_info.axi_sram_use,
2222 CODA7_REG_BIT_AXI_SRAM_USE);
2224 spin_lock(&ctx->buffer_meta_lock);
2225 meta = list_first_entry_or_null(&ctx->buffer_meta_list,
2226 struct coda_buffer_meta, list);
2228 if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
2230 /* If this is the last buffer in the bitstream, add padding */
2231 if (meta->end == ctx->bitstream_fifo.kfifo.in) {
2232 static unsigned char buf[512];
2233 unsigned int pad;
2235 /* Pad to multiple of 256 and then add 256 more */
2236 pad = ((0 - meta->end) & 0xff) + 256;
2238 memset(buf, 0xff, sizeof(buf));
2240 kfifo_in(&ctx->bitstream_fifo, buf, pad);
2243 spin_unlock(&ctx->buffer_meta_lock);
2245 coda_kfifo_sync_to_device_full(ctx);
2247 /* Clear decode success flag */
2248 coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
2250 /* Clear error return value */
2251 coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
2253 trace_coda_dec_pic_run(ctx, meta);
2255 coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
2257 return 0;
2260 static void coda_finish_decode(struct coda_ctx *ctx)
2262 struct coda_dev *dev = ctx->dev;
2263 struct coda_q_data *q_data_src;
2264 struct coda_q_data *q_data_dst;
2265 struct vb2_v4l2_buffer *dst_buf;
2266 struct coda_buffer_meta *meta;
2267 int width, height;
2268 int decoded_idx;
2269 int display_idx;
2270 struct coda_internal_frame *decoded_frame = NULL;
2271 u32 src_fourcc;
2272 int success;
2273 u32 err_mb;
2274 int err_vdoa = 0;
2275 u32 val;
2277 if (ctx->aborting)
2278 return;
2280 /* Update kfifo out pointer from coda bitstream read pointer */
2281 coda_kfifo_sync_from_device(ctx);
2284 * in stream-end mode, the read pointer can overshoot the write pointer
2285 * by up to 512 bytes
2287 if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
2288 if (coda_get_bitstream_payload(ctx) >= ctx->bitstream.size - 512)
2289 kfifo_init(&ctx->bitstream_fifo,
2290 ctx->bitstream.vaddr, ctx->bitstream.size);
2293 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
2294 src_fourcc = q_data_src->fourcc;
2296 val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
2297 if (val != 1)
2298 pr_err("DEC_PIC_SUCCESS = %d\n", val);
2300 success = val & 0x1;
2301 if (!success)
2302 v4l2_err(&dev->v4l2_dev, "decode failed\n");
2304 if (src_fourcc == V4L2_PIX_FMT_H264) {
2305 if (val & (1 << 3))
2306 v4l2_err(&dev->v4l2_dev,
2307 "insufficient PS buffer space (%d bytes)\n",
2308 ctx->psbuf.size);
2309 if (val & (1 << 2))
2310 v4l2_err(&dev->v4l2_dev,
2311 "insufficient slice buffer space (%d bytes)\n",
2312 ctx->slicebuf.size);
2315 val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
2316 width = (val >> 16) & 0xffff;
2317 height = val & 0xffff;
2319 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
2321 /* frame crop information */
2322 if (src_fourcc == V4L2_PIX_FMT_H264) {
2323 u32 left_right;
2324 u32 top_bottom;
2326 left_right = coda_read(dev, CODA_RET_DEC_PIC_CROP_LEFT_RIGHT);
2327 top_bottom = coda_read(dev, CODA_RET_DEC_PIC_CROP_TOP_BOTTOM);
2329 if (left_right == 0xffffffff && top_bottom == 0xffffffff) {
2330 /* Keep current crop information */
2331 } else {
2332 struct v4l2_rect *rect = &q_data_dst->rect;
2334 rect->left = left_right >> 16 & 0xffff;
2335 rect->top = top_bottom >> 16 & 0xffff;
2336 rect->width = width - rect->left -
2337 (left_right & 0xffff);
2338 rect->height = height - rect->top -
2339 (top_bottom & 0xffff);
2341 } else {
2342 /* no cropping */
2345 err_mb = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
2346 if (err_mb > 0)
2347 v4l2_err(&dev->v4l2_dev,
2348 "errors in %d macroblocks\n", err_mb);
2350 if (dev->devtype->product == CODA_HX4 ||
2351 dev->devtype->product == CODA_7541) {
2352 val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
2353 if (val == 0) {
2354 /* not enough bitstream data */
2355 coda_dbg(1, ctx, "prescan failed: %d\n", val);
2356 ctx->hold = true;
2357 return;
2361 /* Wait until the VDOA finished writing the previous display frame */
2362 if (ctx->use_vdoa &&
2363 ctx->display_idx >= 0 &&
2364 ctx->display_idx < ctx->num_internal_frames) {
2365 err_vdoa = vdoa_wait_for_completion(ctx->vdoa);
2368 ctx->frm_dis_flg = coda_read(dev,
2369 CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
2371 /* The previous display frame was copied out and can be overwritten */
2372 if (ctx->display_idx >= 0 &&
2373 ctx->display_idx < ctx->num_internal_frames) {
2374 ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
2375 coda_write(dev, ctx->frm_dis_flg,
2376 CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
2380 * The index of the last decoded frame, not necessarily in
2381 * display order, and the index of the next display frame.
2382 * The latter could have been decoded in a previous run.
2384 decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
2385 display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
2387 if (decoded_idx == -1) {
2388 /* no frame was decoded, but we might have a display frame */
2389 if (display_idx >= 0 && display_idx < ctx->num_internal_frames)
2390 ctx->sequence_offset++;
2391 else if (ctx->display_idx < 0)
2392 ctx->hold = true;
2393 } else if (decoded_idx == -2) {
2394 if (ctx->display_idx >= 0 &&
2395 ctx->display_idx < ctx->num_internal_frames)
2396 ctx->sequence_offset++;
2397 /* no frame was decoded, we still return remaining buffers */
2398 } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
2399 v4l2_err(&dev->v4l2_dev,
2400 "decoded frame index out of range: %d\n", decoded_idx);
2401 } else {
2402 decoded_frame = &ctx->internal_frames[decoded_idx];
2404 val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
2405 if (ctx->sequence_offset == -1)
2406 ctx->sequence_offset = val;
2407 val -= ctx->sequence_offset;
2408 spin_lock(&ctx->buffer_meta_lock);
2409 if (!list_empty(&ctx->buffer_meta_list)) {
2410 meta = list_first_entry(&ctx->buffer_meta_list,
2411 struct coda_buffer_meta, list);
2412 list_del(&meta->list);
2413 ctx->num_metas--;
2414 spin_unlock(&ctx->buffer_meta_lock);
2416 * Clamp counters to 16 bits for comparison, as the HW
2417 * counter rolls over at this point for h.264. This
2418 * may be different for other formats, but using 16 bits
2419 * should be enough to detect most errors and saves us
2420 * from doing different things based on the format.
2422 if ((val & 0xffff) != (meta->sequence & 0xffff)) {
2423 v4l2_err(&dev->v4l2_dev,
2424 "sequence number mismatch (%d(%d) != %d)\n",
2425 val, ctx->sequence_offset,
2426 meta->sequence);
2428 decoded_frame->meta = *meta;
2429 kfree(meta);
2430 } else {
2431 spin_unlock(&ctx->buffer_meta_lock);
2432 v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
2433 memset(&decoded_frame->meta, 0,
2434 sizeof(struct coda_buffer_meta));
2435 decoded_frame->meta.sequence = val;
2436 decoded_frame->meta.last = false;
2437 ctx->sequence_offset++;
2440 trace_coda_dec_pic_done(ctx, &decoded_frame->meta);
2442 val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
2443 decoded_frame->type = (val == 0) ? V4L2_BUF_FLAG_KEYFRAME :
2444 (val == 1) ? V4L2_BUF_FLAG_PFRAME :
2445 V4L2_BUF_FLAG_BFRAME;
2447 decoded_frame->error = err_mb;
2450 if (display_idx == -1) {
2452 * no more frames to be decoded, but there could still
2453 * be rotator output to dequeue
2455 ctx->hold = true;
2456 } else if (display_idx == -3) {
2457 /* possibly prescan failure */
2458 } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
2459 v4l2_err(&dev->v4l2_dev,
2460 "presentation frame index out of range: %d\n",
2461 display_idx);
2464 /* If a frame was copied out, return it */
2465 if (ctx->display_idx >= 0 &&
2466 ctx->display_idx < ctx->num_internal_frames) {
2467 struct coda_internal_frame *ready_frame;
2469 ready_frame = &ctx->internal_frames[ctx->display_idx];
2471 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2472 dst_buf->sequence = ctx->osequence++;
2474 dst_buf->field = V4L2_FIELD_NONE;
2475 dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
2476 V4L2_BUF_FLAG_PFRAME |
2477 V4L2_BUF_FLAG_BFRAME);
2478 dst_buf->flags |= ready_frame->type;
2479 meta = &ready_frame->meta;
2480 if (meta->last && !coda_reorder_enable(ctx)) {
2482 * If this was the last decoded frame, and reordering
2483 * is disabled, this will be the last display frame.
2485 coda_dbg(1, ctx, "last meta, marking as last frame\n");
2486 dst_buf->flags |= V4L2_BUF_FLAG_LAST;
2487 } else if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG &&
2488 display_idx == -1) {
2490 * If there is no designated presentation frame anymore,
2491 * this frame has to be the last one.
2493 coda_dbg(1, ctx,
2494 "no more frames to return, marking as last frame\n");
2495 dst_buf->flags |= V4L2_BUF_FLAG_LAST;
2497 dst_buf->timecode = meta->timecode;
2498 dst_buf->vb2_buf.timestamp = meta->timestamp;
2500 trace_coda_dec_rot_done(ctx, dst_buf, meta);
2502 vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
2503 q_data_dst->sizeimage);
2505 if (ready_frame->error || err_vdoa)
2506 coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
2507 else
2508 coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
2510 if (decoded_frame) {
2511 coda_dbg(1, ctx, "job finished: decoded %c frame %u, returned %c frame %u (%u/%u)%s\n",
2512 coda_frame_type_char(decoded_frame->type),
2513 decoded_frame->meta.sequence,
2514 coda_frame_type_char(dst_buf->flags),
2515 ready_frame->meta.sequence,
2516 dst_buf->sequence, ctx->qsequence,
2517 (dst_buf->flags & V4L2_BUF_FLAG_LAST) ?
2518 " (last)" : "");
2519 } else {
2520 coda_dbg(1, ctx, "job finished: no frame decoded (%d), returned %c frame %u (%u/%u)%s\n",
2521 decoded_idx,
2522 coda_frame_type_char(dst_buf->flags),
2523 ready_frame->meta.sequence,
2524 dst_buf->sequence, ctx->qsequence,
2525 (dst_buf->flags & V4L2_BUF_FLAG_LAST) ?
2526 " (last)" : "");
2528 } else {
2529 if (decoded_frame) {
2530 coda_dbg(1, ctx, "job finished: decoded %c frame %u, no frame returned (%d)\n",
2531 coda_frame_type_char(decoded_frame->type),
2532 decoded_frame->meta.sequence,
2533 ctx->display_idx);
2534 } else {
2535 coda_dbg(1, ctx, "job finished: no frame decoded (%d) or returned (%d)\n",
2536 decoded_idx, ctx->display_idx);
2540 /* The rotator will copy the current display frame next time */
2541 ctx->display_idx = display_idx;
2544 * The current decode run might have brought the bitstream fill level
2545 * below the size where we can start the next decode run. As userspace
2546 * might have filled the output queue completely and might thus be
2547 * blocked, we can't rely on the next qbuf to trigger the bitstream
2548 * refill. Check if we have data to refill the bitstream now.
2550 mutex_lock(&ctx->bitstream_mutex);
2551 coda_fill_bitstream(ctx, NULL);
2552 mutex_unlock(&ctx->bitstream_mutex);
2555 static void coda_decode_timeout(struct coda_ctx *ctx)
2557 struct vb2_v4l2_buffer *dst_buf;
2560 * For now this only handles the case where we would deadlock with
2561 * userspace, i.e. userspace issued DEC_CMD_STOP and waits for EOS,
2562 * but after a failed decode run we would hold the context and wait for
2563 * userspace to queue more buffers.
2565 if (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))
2566 return;
2568 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2569 dst_buf->sequence = ctx->qsequence - 1;
2571 coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
2574 const struct coda_context_ops coda_bit_decode_ops = {
2575 .queue_init = coda_decoder_queue_init,
2576 .reqbufs = coda_decoder_reqbufs,
2577 .start_streaming = coda_start_decoding,
2578 .prepare_run = coda_prepare_decode,
2579 .finish_run = coda_finish_decode,
2580 .run_timeout = coda_decode_timeout,
2581 .seq_init_work = coda_dec_seq_init_work,
2582 .seq_end_work = coda_seq_end_work,
2583 .release = coda_bit_release,
2586 irqreturn_t coda_irq_handler(int irq, void *data)
2588 struct coda_dev *dev = data;
2589 struct coda_ctx *ctx;
2591 /* read status register to attend the IRQ */
2592 coda_read(dev, CODA_REG_BIT_INT_STATUS);
2593 coda_write(dev, 0, CODA_REG_BIT_INT_REASON);
2594 coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
2595 CODA_REG_BIT_INT_CLEAR);
2597 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
2598 if (ctx == NULL) {
2599 v4l2_err(&dev->v4l2_dev,
2600 "Instance released before the end of transaction\n");
2601 return IRQ_HANDLED;
2604 trace_coda_bit_done(ctx);
2606 if (ctx->aborting) {
2607 coda_dbg(1, ctx, "task has been aborted\n");
2610 if (coda_isbusy(ctx->dev)) {
2611 coda_dbg(1, ctx, "coda is still busy!!!!\n");
2612 return IRQ_NONE;
2615 complete(&ctx->completion);
2617 return IRQ_HANDLED;