2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
4 * Queued image conversion support, with tiling and rotation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 #include <linux/interrupt.h>
18 #include <linux/dma-mapping.h>
19 #include <video/imx-ipu-image-convert.h>
23 * The IC Resizer has a restriction that the output frame from the
24 * resizer must be 1024 or less in both width (pixels) and height
27 * The image converter attempts to split up a conversion when
28 * the desired output (converted) frame resolution exceeds the
29 * IC resizer limit of 1024 in either dimension.
31 * If either dimension of the output frame exceeds the limit, the
32 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
33 * of 4*4 or 16 tiles. A conversion is then carried out for each
34 * tile (but taking care to pass the full frame stride length to
35 * the DMA channel's parameter memory!). IDMA double-buffering is used
36 * to convert each tile back-to-back when possible (see note below
37 * when double_buffering boolean is set).
39 * Note that the input frame must be split up into the same number
40 * of tiles as the output frame.
42 * FIXME: at this point there is no attempt to deal with visible seams
43 * at the tile boundaries when upscaling. The seams are caused by a reset
44 * of the bilinear upscale interpolation when starting a new tile. The
45 * seams are barely visible for small upscale factors, but become
46 * increasingly visible as the upscale factor gets larger, since more
47 * interpolated pixels get thrown out at the tile boundaries. A possilble
48 * fix might be to overlap tiles of different sizes, but this must be done
49 * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
50 * alignment restrictions of each tile.
53 #define MAX_STRIPES_W 4
54 #define MAX_STRIPES_H 4
55 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
62 enum ipu_image_convert_type
{
67 struct ipu_image_convert_dma_buf
{
73 struct ipu_image_convert_dma_chan
{
83 /* dimensions of one tile */
84 struct ipu_image_tile
{
87 /* size and strides are in bytes */
91 /* start Y or packed offset of this tile */
93 /* offset from start to tile in U plane, for planar formats */
95 /* offset from start to tile in V plane, for planar formats */
99 struct ipu_image_convert_image
{
100 struct ipu_image base
;
101 enum ipu_image_convert_type type
;
103 const struct ipu_image_pixfmt
*fmt
;
106 /* # of rows (horizontal stripes) if dest height is > 1024 */
107 unsigned int num_rows
;
108 /* # of columns (vertical stripes) if dest width is > 1024 */
109 unsigned int num_cols
;
111 struct ipu_image_tile tile
[MAX_TILES
];
114 struct ipu_image_pixfmt
{
115 u32 fourcc
; /* V4L2 fourcc */
116 int bpp
; /* total bpp */
117 int uv_width_dec
; /* decimation in width for U/V planes */
118 int uv_height_dec
; /* decimation in height for U/V planes */
119 bool planar
; /* planar format */
120 bool uv_swapped
; /* U and V planes are swapped */
121 bool uv_packed
; /* partial planar (U and V in same plane) */
124 struct ipu_image_convert_ctx
;
125 struct ipu_image_convert_chan
;
126 struct ipu_image_convert_priv
;
128 struct ipu_image_convert_ctx
{
129 struct ipu_image_convert_chan
*chan
;
131 ipu_image_convert_cb_t complete
;
132 void *complete_context
;
134 /* Source/destination image data and rotation mode */
135 struct ipu_image_convert_image in
;
136 struct ipu_image_convert_image out
;
137 enum ipu_rotate_mode rot_mode
;
139 /* intermediate buffer for rotation */
140 struct ipu_image_convert_dma_buf rot_intermediate
[2];
142 /* current buffer number for double buffering */
146 struct completion aborted
;
148 /* can we use double-buffering for this conversion operation? */
149 bool double_buffering
;
150 /* num_rows * num_cols */
151 unsigned int num_tiles
;
152 /* next tile to process */
153 unsigned int next_tile
;
154 /* where to place converted tile in dest image */
155 unsigned int out_tile_map
[MAX_TILES
];
157 struct list_head list
;
160 struct ipu_image_convert_chan
{
161 struct ipu_image_convert_priv
*priv
;
163 enum ipu_ic_task ic_task
;
164 const struct ipu_image_convert_dma_chan
*dma_ch
;
167 struct ipuv3_channel
*in_chan
;
168 struct ipuv3_channel
*out_chan
;
169 struct ipuv3_channel
*rotation_in_chan
;
170 struct ipuv3_channel
*rotation_out_chan
;
172 /* the IPU end-of-frame irqs */
178 /* list of convert contexts */
179 struct list_head ctx_list
;
180 /* queue of conversion runs */
181 struct list_head pending_q
;
182 /* queue of completed runs */
183 struct list_head done_q
;
185 /* the current conversion run */
186 struct ipu_image_convert_run
*current_run
;
189 struct ipu_image_convert_priv
{
190 struct ipu_image_convert_chan chan
[IC_NUM_TASKS
];
194 static const struct ipu_image_convert_dma_chan
195 image_convert_dma_chan
[IC_NUM_TASKS
] = {
196 [IC_TASK_VIEWFINDER
] = {
197 .in
= IPUV3_CHANNEL_MEM_IC_PRP_VF
,
198 .out
= IPUV3_CHANNEL_IC_PRP_VF_MEM
,
199 .rot_in
= IPUV3_CHANNEL_MEM_ROT_VF
,
200 .rot_out
= IPUV3_CHANNEL_ROT_VF_MEM
,
201 .vdi_in_p
= IPUV3_CHANNEL_MEM_VDI_PREV
,
202 .vdi_in
= IPUV3_CHANNEL_MEM_VDI_CUR
,
203 .vdi_in_n
= IPUV3_CHANNEL_MEM_VDI_NEXT
,
205 [IC_TASK_POST_PROCESSOR
] = {
206 .in
= IPUV3_CHANNEL_MEM_IC_PP
,
207 .out
= IPUV3_CHANNEL_IC_PP_MEM
,
208 .rot_in
= IPUV3_CHANNEL_MEM_ROT_PP
,
209 .rot_out
= IPUV3_CHANNEL_ROT_PP_MEM
,
213 static const struct ipu_image_pixfmt image_convert_formats
[] = {
215 .fourcc
= V4L2_PIX_FMT_RGB565
,
218 .fourcc
= V4L2_PIX_FMT_RGB24
,
221 .fourcc
= V4L2_PIX_FMT_BGR24
,
224 .fourcc
= V4L2_PIX_FMT_RGB32
,
227 .fourcc
= V4L2_PIX_FMT_BGR32
,
230 .fourcc
= V4L2_PIX_FMT_YUYV
,
235 .fourcc
= V4L2_PIX_FMT_UYVY
,
240 .fourcc
= V4L2_PIX_FMT_YUV420
,
246 .fourcc
= V4L2_PIX_FMT_YVU420
,
253 .fourcc
= V4L2_PIX_FMT_NV12
,
260 .fourcc
= V4L2_PIX_FMT_YUV422P
,
266 .fourcc
= V4L2_PIX_FMT_NV16
,
275 static const struct ipu_image_pixfmt
*get_format(u32 fourcc
)
277 const struct ipu_image_pixfmt
*ret
= NULL
;
280 for (i
= 0; i
< ARRAY_SIZE(image_convert_formats
); i
++) {
281 if (image_convert_formats
[i
].fourcc
== fourcc
) {
282 ret
= &image_convert_formats
[i
];
290 static void dump_format(struct ipu_image_convert_ctx
*ctx
,
291 struct ipu_image_convert_image
*ic_image
)
293 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
294 struct ipu_image_convert_priv
*priv
= chan
->priv
;
296 dev_dbg(priv
->ipu
->dev
,
297 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
299 ic_image
->type
== IMAGE_CONVERT_OUT
? "Output" : "Input",
300 ic_image
->base
.pix
.width
, ic_image
->base
.pix
.height
,
301 ic_image
->num_cols
, ic_image
->num_rows
,
302 ic_image
->tile
[0].width
, ic_image
->tile
[0].height
,
303 ic_image
->fmt
->fourcc
& 0xff,
304 (ic_image
->fmt
->fourcc
>> 8) & 0xff,
305 (ic_image
->fmt
->fourcc
>> 16) & 0xff,
306 (ic_image
->fmt
->fourcc
>> 24) & 0xff);
309 int ipu_image_convert_enum_format(int index
, u32
*fourcc
)
311 const struct ipu_image_pixfmt
*fmt
;
313 if (index
>= (int)ARRAY_SIZE(image_convert_formats
))
317 fmt
= &image_convert_formats
[index
];
318 *fourcc
= fmt
->fourcc
;
321 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format
);
323 static void free_dma_buf(struct ipu_image_convert_priv
*priv
,
324 struct ipu_image_convert_dma_buf
*buf
)
327 dma_free_coherent(priv
->ipu
->dev
,
328 buf
->len
, buf
->virt
, buf
->phys
);
333 static int alloc_dma_buf(struct ipu_image_convert_priv
*priv
,
334 struct ipu_image_convert_dma_buf
*buf
,
337 buf
->len
= PAGE_ALIGN(size
);
338 buf
->virt
= dma_alloc_coherent(priv
->ipu
->dev
, buf
->len
, &buf
->phys
,
339 GFP_DMA
| GFP_KERNEL
);
341 dev_err(priv
->ipu
->dev
, "failed to alloc dma buffer\n");
348 static inline int num_stripes(int dim
)
352 else if (dim
<= 2048)
358 static void calc_tile_dimensions(struct ipu_image_convert_ctx
*ctx
,
359 struct ipu_image_convert_image
*image
)
363 for (i
= 0; i
< ctx
->num_tiles
; i
++) {
364 struct ipu_image_tile
*tile
= &image
->tile
[i
];
366 tile
->height
= image
->base
.pix
.height
/ image
->num_rows
;
367 tile
->width
= image
->base
.pix
.width
/ image
->num_cols
;
368 tile
->size
= ((tile
->height
* image
->fmt
->bpp
) >> 3) *
371 if (image
->fmt
->planar
) {
372 tile
->stride
= tile
->width
;
373 tile
->rot_stride
= tile
->height
;
376 (image
->fmt
->bpp
* tile
->width
) >> 3;
378 (image
->fmt
->bpp
* tile
->height
) >> 3;
384 * Use the rotation transformation to find the tile coordinates
385 * (row, col) of a tile in the destination frame that corresponds
386 * to the given tile coordinates of a source frame. The destination
387 * coordinate is then converted to a tile index.
389 static int transform_tile_index(struct ipu_image_convert_ctx
*ctx
,
390 int src_row
, int src_col
)
392 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
393 struct ipu_image_convert_priv
*priv
= chan
->priv
;
394 struct ipu_image_convert_image
*s_image
= &ctx
->in
;
395 struct ipu_image_convert_image
*d_image
= &ctx
->out
;
396 int dst_row
, dst_col
;
398 /* with no rotation it's a 1:1 mapping */
399 if (ctx
->rot_mode
== IPU_ROTATE_NONE
)
400 return src_row
* s_image
->num_cols
+ src_col
;
403 * before doing the transform, first we have to translate
404 * source row,col for an origin in the center of s_image
406 src_row
= src_row
* 2 - (s_image
->num_rows
- 1);
407 src_col
= src_col
* 2 - (s_image
->num_cols
- 1);
409 /* do the rotation transform */
410 if (ctx
->rot_mode
& IPU_ROT_BIT_90
) {
419 if (ctx
->rot_mode
& IPU_ROT_BIT_HFLIP
)
421 if (ctx
->rot_mode
& IPU_ROT_BIT_VFLIP
)
424 dev_dbg(priv
->ipu
->dev
, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
425 chan
->ic_task
, ctx
, src_col
, src_row
, dst_col
, dst_row
);
428 * finally translate dest row,col using an origin in upper
431 dst_row
+= d_image
->num_rows
- 1;
432 dst_col
+= d_image
->num_cols
- 1;
436 return dst_row
* d_image
->num_cols
+ dst_col
;
440 * Fill the out_tile_map[] with transformed destination tile indeces.
442 static void calc_out_tile_map(struct ipu_image_convert_ctx
*ctx
)
444 struct ipu_image_convert_image
*s_image
= &ctx
->in
;
445 unsigned int row
, col
, tile
= 0;
447 for (row
= 0; row
< s_image
->num_rows
; row
++) {
448 for (col
= 0; col
< s_image
->num_cols
; col
++) {
449 ctx
->out_tile_map
[tile
] =
450 transform_tile_index(ctx
, row
, col
);
456 static void calc_tile_offsets_planar(struct ipu_image_convert_ctx
*ctx
,
457 struct ipu_image_convert_image
*image
)
459 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
460 struct ipu_image_convert_priv
*priv
= chan
->priv
;
461 const struct ipu_image_pixfmt
*fmt
= image
->fmt
;
462 unsigned int row
, col
, tile
= 0;
463 u32 H
, w
, h
, y_stride
, uv_stride
;
464 u32 uv_row_off
, uv_col_off
, uv_off
, u_off
, v_off
, tmp
;
465 u32 y_row_off
, y_col_off
, y_off
;
468 /* setup some convenience vars */
469 H
= image
->base
.pix
.height
;
471 y_stride
= image
->stride
;
472 uv_stride
= y_stride
/ fmt
->uv_width_dec
;
476 y_size
= H
* y_stride
;
477 uv_size
= y_size
/ (fmt
->uv_width_dec
* fmt
->uv_height_dec
);
479 for (row
= 0; row
< image
->num_rows
; row
++) {
480 w
= image
->tile
[tile
].width
;
481 h
= image
->tile
[tile
].height
;
482 y_row_off
= row
* h
* y_stride
;
483 uv_row_off
= (row
* h
* uv_stride
) / fmt
->uv_height_dec
;
485 for (col
= 0; col
< image
->num_cols
; col
++) {
487 uv_col_off
= y_col_off
/ fmt
->uv_width_dec
;
491 y_off
= y_row_off
+ y_col_off
;
492 uv_off
= uv_row_off
+ uv_col_off
;
494 u_off
= y_size
- y_off
+ uv_off
;
495 v_off
= (fmt
->uv_packed
) ? 0 : u_off
+ uv_size
;
496 if (fmt
->uv_swapped
) {
502 image
->tile
[tile
].offset
= y_off
;
503 image
->tile
[tile
].u_off
= u_off
;
504 image
->tile
[tile
++].v_off
= v_off
;
506 dev_dbg(priv
->ipu
->dev
,
507 "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
509 image
->type
== IMAGE_CONVERT_IN
?
510 "Input" : "Output", row
, col
,
511 y_off
, u_off
, v_off
);
516 static void calc_tile_offsets_packed(struct ipu_image_convert_ctx
*ctx
,
517 struct ipu_image_convert_image
*image
)
519 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
520 struct ipu_image_convert_priv
*priv
= chan
->priv
;
521 const struct ipu_image_pixfmt
*fmt
= image
->fmt
;
522 unsigned int row
, col
, tile
= 0;
523 u32 w
, h
, bpp
, stride
;
524 u32 row_off
, col_off
;
526 /* setup some convenience vars */
527 stride
= image
->stride
;
530 for (row
= 0; row
< image
->num_rows
; row
++) {
531 w
= image
->tile
[tile
].width
;
532 h
= image
->tile
[tile
].height
;
533 row_off
= row
* h
* stride
;
535 for (col
= 0; col
< image
->num_cols
; col
++) {
536 col_off
= (col
* w
* bpp
) >> 3;
538 image
->tile
[tile
].offset
= row_off
+ col_off
;
539 image
->tile
[tile
].u_off
= 0;
540 image
->tile
[tile
++].v_off
= 0;
542 dev_dbg(priv
->ipu
->dev
,
543 "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
545 image
->type
== IMAGE_CONVERT_IN
?
546 "Input" : "Output", row
, col
,
552 static void calc_tile_offsets(struct ipu_image_convert_ctx
*ctx
,
553 struct ipu_image_convert_image
*image
)
555 if (image
->fmt
->planar
)
556 calc_tile_offsets_planar(ctx
, image
);
558 calc_tile_offsets_packed(ctx
, image
);
562 * return the number of runs in given queue (pending_q or done_q)
563 * for this context. hold irqlock when calling.
565 static int get_run_count(struct ipu_image_convert_ctx
*ctx
,
568 struct ipu_image_convert_run
*run
;
571 lockdep_assert_held(&ctx
->chan
->irqlock
);
573 list_for_each_entry(run
, q
, list
) {
581 static void convert_stop(struct ipu_image_convert_run
*run
)
583 struct ipu_image_convert_ctx
*ctx
= run
->ctx
;
584 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
585 struct ipu_image_convert_priv
*priv
= chan
->priv
;
587 dev_dbg(priv
->ipu
->dev
, "%s: task %u: stopping ctx %p run %p\n",
588 __func__
, chan
->ic_task
, ctx
, run
);
590 /* disable IC tasks and the channels */
591 ipu_ic_task_disable(chan
->ic
);
592 ipu_idmac_disable_channel(chan
->in_chan
);
593 ipu_idmac_disable_channel(chan
->out_chan
);
595 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
596 ipu_idmac_disable_channel(chan
->rotation_in_chan
);
597 ipu_idmac_disable_channel(chan
->rotation_out_chan
);
598 ipu_idmac_unlink(chan
->out_chan
, chan
->rotation_in_chan
);
601 ipu_ic_disable(chan
->ic
);
604 static void init_idmac_channel(struct ipu_image_convert_ctx
*ctx
,
605 struct ipuv3_channel
*channel
,
606 struct ipu_image_convert_image
*image
,
607 enum ipu_rotate_mode rot_mode
,
608 bool rot_swap_width_height
)
610 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
611 unsigned int burst_size
;
612 u32 width
, height
, stride
;
613 dma_addr_t addr0
, addr1
= 0;
614 struct ipu_image tile_image
;
615 unsigned int tile_idx
[2];
617 if (image
->type
== IMAGE_CONVERT_OUT
) {
618 tile_idx
[0] = ctx
->out_tile_map
[0];
619 tile_idx
[1] = ctx
->out_tile_map
[1];
625 if (rot_swap_width_height
) {
626 width
= image
->tile
[0].height
;
627 height
= image
->tile
[0].width
;
628 stride
= image
->tile
[0].rot_stride
;
629 addr0
= ctx
->rot_intermediate
[0].phys
;
630 if (ctx
->double_buffering
)
631 addr1
= ctx
->rot_intermediate
[1].phys
;
633 width
= image
->tile
[0].width
;
634 height
= image
->tile
[0].height
;
635 stride
= image
->stride
;
636 addr0
= image
->base
.phys0
+
637 image
->tile
[tile_idx
[0]].offset
;
638 if (ctx
->double_buffering
)
639 addr1
= image
->base
.phys0
+
640 image
->tile
[tile_idx
[1]].offset
;
643 ipu_cpmem_zero(channel
);
645 memset(&tile_image
, 0, sizeof(tile_image
));
646 tile_image
.pix
.width
= tile_image
.rect
.width
= width
;
647 tile_image
.pix
.height
= tile_image
.rect
.height
= height
;
648 tile_image
.pix
.bytesperline
= stride
;
649 tile_image
.pix
.pixelformat
= image
->fmt
->fourcc
;
650 tile_image
.phys0
= addr0
;
651 tile_image
.phys1
= addr1
;
652 ipu_cpmem_set_image(channel
, &tile_image
);
654 if (image
->fmt
->planar
&& !rot_swap_width_height
)
655 ipu_cpmem_set_uv_offset(channel
,
656 image
->tile
[tile_idx
[0]].u_off
,
657 image
->tile
[tile_idx
[0]].v_off
);
660 ipu_cpmem_set_rotation(channel
, rot_mode
);
662 if (channel
== chan
->rotation_in_chan
||
663 channel
== chan
->rotation_out_chan
) {
665 ipu_cpmem_set_block_mode(channel
);
667 burst_size
= (width
% 16) ? 8 : 16;
669 ipu_cpmem_set_burstsize(channel
, burst_size
);
671 ipu_ic_task_idma_init(chan
->ic
, channel
, width
, height
,
672 burst_size
, rot_mode
);
675 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
676 * only do this when there is no PRG present.
678 if (!channel
->ipu
->prg_priv
)
679 ipu_cpmem_set_axi_id(channel
, 1);
681 ipu_idmac_set_double_buffer(channel
, ctx
->double_buffering
);
684 static int convert_start(struct ipu_image_convert_run
*run
)
686 struct ipu_image_convert_ctx
*ctx
= run
->ctx
;
687 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
688 struct ipu_image_convert_priv
*priv
= chan
->priv
;
689 struct ipu_image_convert_image
*s_image
= &ctx
->in
;
690 struct ipu_image_convert_image
*d_image
= &ctx
->out
;
691 enum ipu_color_space src_cs
, dest_cs
;
692 unsigned int dest_width
, dest_height
;
695 dev_dbg(priv
->ipu
->dev
, "%s: task %u: starting ctx %p run %p\n",
696 __func__
, chan
->ic_task
, ctx
, run
);
698 src_cs
= ipu_pixelformat_to_colorspace(s_image
->fmt
->fourcc
);
699 dest_cs
= ipu_pixelformat_to_colorspace(d_image
->fmt
->fourcc
);
701 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
702 /* swap width/height for resizer */
703 dest_width
= d_image
->tile
[0].height
;
704 dest_height
= d_image
->tile
[0].width
;
706 dest_width
= d_image
->tile
[0].width
;
707 dest_height
= d_image
->tile
[0].height
;
710 /* setup the IC resizer and CSC */
711 ret
= ipu_ic_task_init(chan
->ic
,
712 s_image
->tile
[0].width
,
713 s_image
->tile
[0].height
,
718 dev_err(priv
->ipu
->dev
, "ipu_ic_task_init failed, %d\n", ret
);
722 /* init the source MEM-->IC PP IDMAC channel */
723 init_idmac_channel(ctx
, chan
->in_chan
, s_image
,
724 IPU_ROTATE_NONE
, false);
726 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
727 /* init the IC PP-->MEM IDMAC channel */
728 init_idmac_channel(ctx
, chan
->out_chan
, d_image
,
729 IPU_ROTATE_NONE
, true);
731 /* init the MEM-->IC PP ROT IDMAC channel */
732 init_idmac_channel(ctx
, chan
->rotation_in_chan
, d_image
,
733 ctx
->rot_mode
, true);
735 /* init the destination IC PP ROT-->MEM IDMAC channel */
736 init_idmac_channel(ctx
, chan
->rotation_out_chan
, d_image
,
737 IPU_ROTATE_NONE
, false);
739 /* now link IC PP-->MEM to MEM-->IC PP ROT */
740 ipu_idmac_link(chan
->out_chan
, chan
->rotation_in_chan
);
742 /* init the destination IC PP-->MEM IDMAC channel */
743 init_idmac_channel(ctx
, chan
->out_chan
, d_image
,
744 ctx
->rot_mode
, false);
748 ipu_ic_enable(chan
->ic
);
750 /* set buffers ready */
751 ipu_idmac_select_buffer(chan
->in_chan
, 0);
752 ipu_idmac_select_buffer(chan
->out_chan
, 0);
753 if (ipu_rot_mode_is_irt(ctx
->rot_mode
))
754 ipu_idmac_select_buffer(chan
->rotation_out_chan
, 0);
755 if (ctx
->double_buffering
) {
756 ipu_idmac_select_buffer(chan
->in_chan
, 1);
757 ipu_idmac_select_buffer(chan
->out_chan
, 1);
758 if (ipu_rot_mode_is_irt(ctx
->rot_mode
))
759 ipu_idmac_select_buffer(chan
->rotation_out_chan
, 1);
762 /* enable the channels! */
763 ipu_idmac_enable_channel(chan
->in_chan
);
764 ipu_idmac_enable_channel(chan
->out_chan
);
765 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
766 ipu_idmac_enable_channel(chan
->rotation_in_chan
);
767 ipu_idmac_enable_channel(chan
->rotation_out_chan
);
770 ipu_ic_task_enable(chan
->ic
);
772 ipu_cpmem_dump(chan
->in_chan
);
773 ipu_cpmem_dump(chan
->out_chan
);
774 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
775 ipu_cpmem_dump(chan
->rotation_in_chan
);
776 ipu_cpmem_dump(chan
->rotation_out_chan
);
784 /* hold irqlock when calling */
785 static int do_run(struct ipu_image_convert_run
*run
)
787 struct ipu_image_convert_ctx
*ctx
= run
->ctx
;
788 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
790 lockdep_assert_held(&chan
->irqlock
);
792 ctx
->in
.base
.phys0
= run
->in_phys
;
793 ctx
->out
.base
.phys0
= run
->out_phys
;
795 ctx
->cur_buf_num
= 0;
798 /* remove run from pending_q and set as current */
799 list_del(&run
->list
);
800 chan
->current_run
= run
;
802 return convert_start(run
);
805 /* hold irqlock when calling */
806 static void run_next(struct ipu_image_convert_chan
*chan
)
808 struct ipu_image_convert_priv
*priv
= chan
->priv
;
809 struct ipu_image_convert_run
*run
, *tmp
;
812 lockdep_assert_held(&chan
->irqlock
);
814 list_for_each_entry_safe(run
, tmp
, &chan
->pending_q
, list
) {
815 /* skip contexts that are aborting */
816 if (run
->ctx
->aborting
) {
817 dev_dbg(priv
->ipu
->dev
,
818 "%s: task %u: skipping aborting ctx %p run %p\n",
819 __func__
, chan
->ic_task
, run
->ctx
, run
);
828 * something went wrong with start, add the run
829 * to done q and continue to the next run in the
833 list_add_tail(&run
->list
, &chan
->done_q
);
834 chan
->current_run
= NULL
;
838 static void empty_done_q(struct ipu_image_convert_chan
*chan
)
840 struct ipu_image_convert_priv
*priv
= chan
->priv
;
841 struct ipu_image_convert_run
*run
;
844 spin_lock_irqsave(&chan
->irqlock
, flags
);
846 while (!list_empty(&chan
->done_q
)) {
847 run
= list_entry(chan
->done_q
.next
,
848 struct ipu_image_convert_run
,
851 list_del(&run
->list
);
853 dev_dbg(priv
->ipu
->dev
,
854 "%s: task %u: completing ctx %p run %p with %d\n",
855 __func__
, chan
->ic_task
, run
->ctx
, run
, run
->status
);
857 /* call the completion callback and free the run */
858 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
859 run
->ctx
->complete(run
, run
->ctx
->complete_context
);
860 spin_lock_irqsave(&chan
->irqlock
, flags
);
863 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
867 * the bottom half thread clears out the done_q, calling the
868 * completion handler for each.
870 static irqreturn_t
do_bh(int irq
, void *dev_id
)
872 struct ipu_image_convert_chan
*chan
= dev_id
;
873 struct ipu_image_convert_priv
*priv
= chan
->priv
;
874 struct ipu_image_convert_ctx
*ctx
;
877 dev_dbg(priv
->ipu
->dev
, "%s: task %u: enter\n", __func__
,
882 spin_lock_irqsave(&chan
->irqlock
, flags
);
885 * the done_q is cleared out, signal any contexts
886 * that are aborting that abort can complete.
888 list_for_each_entry(ctx
, &chan
->ctx_list
, list
) {
890 dev_dbg(priv
->ipu
->dev
,
891 "%s: task %u: signaling abort for ctx %p\n",
892 __func__
, chan
->ic_task
, ctx
);
893 complete(&ctx
->aborted
);
897 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
899 dev_dbg(priv
->ipu
->dev
, "%s: task %u: exit\n", __func__
,
905 /* hold irqlock when calling */
906 static irqreturn_t
do_irq(struct ipu_image_convert_run
*run
)
908 struct ipu_image_convert_ctx
*ctx
= run
->ctx
;
909 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
910 struct ipu_image_tile
*src_tile
, *dst_tile
;
911 struct ipu_image_convert_image
*s_image
= &ctx
->in
;
912 struct ipu_image_convert_image
*d_image
= &ctx
->out
;
913 struct ipuv3_channel
*outch
;
914 unsigned int dst_idx
;
916 lockdep_assert_held(&chan
->irqlock
);
918 outch
= ipu_rot_mode_is_irt(ctx
->rot_mode
) ?
919 chan
->rotation_out_chan
: chan
->out_chan
;
922 * It is difficult to stop the channel DMA before the channels
923 * enter the paused state. Without double-buffering the channels
924 * are always in a paused state when the EOF irq occurs, so it
925 * is safe to stop the channels now. For double-buffering we
926 * just ignore the abort until the operation completes, when it
927 * is safe to shut down.
929 if (ctx
->aborting
&& !ctx
->double_buffering
) {
935 if (ctx
->next_tile
== ctx
->num_tiles
) {
937 * the conversion is complete
945 * not done, place the next tile buffers.
947 if (!ctx
->double_buffering
) {
949 src_tile
= &s_image
->tile
[ctx
->next_tile
];
950 dst_idx
= ctx
->out_tile_map
[ctx
->next_tile
];
951 dst_tile
= &d_image
->tile
[dst_idx
];
953 ipu_cpmem_set_buffer(chan
->in_chan
, 0,
954 s_image
->base
.phys0
+ src_tile
->offset
);
955 ipu_cpmem_set_buffer(outch
, 0,
956 d_image
->base
.phys0
+ dst_tile
->offset
);
957 if (s_image
->fmt
->planar
)
958 ipu_cpmem_set_uv_offset(chan
->in_chan
,
961 if (d_image
->fmt
->planar
)
962 ipu_cpmem_set_uv_offset(outch
,
966 ipu_idmac_select_buffer(chan
->in_chan
, 0);
967 ipu_idmac_select_buffer(outch
, 0);
969 } else if (ctx
->next_tile
< ctx
->num_tiles
- 1) {
971 src_tile
= &s_image
->tile
[ctx
->next_tile
+ 1];
972 dst_idx
= ctx
->out_tile_map
[ctx
->next_tile
+ 1];
973 dst_tile
= &d_image
->tile
[dst_idx
];
975 ipu_cpmem_set_buffer(chan
->in_chan
, ctx
->cur_buf_num
,
976 s_image
->base
.phys0
+ src_tile
->offset
);
977 ipu_cpmem_set_buffer(outch
, ctx
->cur_buf_num
,
978 d_image
->base
.phys0
+ dst_tile
->offset
);
980 ipu_idmac_select_buffer(chan
->in_chan
, ctx
->cur_buf_num
);
981 ipu_idmac_select_buffer(outch
, ctx
->cur_buf_num
);
983 ctx
->cur_buf_num
^= 1;
989 list_add_tail(&run
->list
, &chan
->done_q
);
990 chan
->current_run
= NULL
;
992 return IRQ_WAKE_THREAD
;
995 static irqreturn_t
norotate_irq(int irq
, void *data
)
997 struct ipu_image_convert_chan
*chan
= data
;
998 struct ipu_image_convert_ctx
*ctx
;
999 struct ipu_image_convert_run
*run
;
1000 unsigned long flags
;
1003 spin_lock_irqsave(&chan
->irqlock
, flags
);
1005 /* get current run and its context */
1006 run
= chan
->current_run
;
1014 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
1015 /* this is a rotation operation, just ignore */
1016 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1022 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1026 static irqreturn_t
rotate_irq(int irq
, void *data
)
1028 struct ipu_image_convert_chan
*chan
= data
;
1029 struct ipu_image_convert_priv
*priv
= chan
->priv
;
1030 struct ipu_image_convert_ctx
*ctx
;
1031 struct ipu_image_convert_run
*run
;
1032 unsigned long flags
;
1035 spin_lock_irqsave(&chan
->irqlock
, flags
);
1037 /* get current run and its context */
1038 run
= chan
->current_run
;
1046 if (!ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
1047 /* this was NOT a rotation operation, shouldn't happen */
1048 dev_err(priv
->ipu
->dev
, "Unexpected rotation interrupt\n");
1049 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1055 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1060 * try to force the completion of runs for this ctx. Called when
1061 * abort wait times out in ipu_image_convert_abort().
1063 static void force_abort(struct ipu_image_convert_ctx
*ctx
)
1065 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
1066 struct ipu_image_convert_run
*run
;
1067 unsigned long flags
;
1069 spin_lock_irqsave(&chan
->irqlock
, flags
);
1071 run
= chan
->current_run
;
1072 if (run
&& run
->ctx
== ctx
) {
1075 list_add_tail(&run
->list
, &chan
->done_q
);
1076 chan
->current_run
= NULL
;
1080 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1085 static void release_ipu_resources(struct ipu_image_convert_chan
*chan
)
1087 if (chan
->out_eof_irq
>= 0)
1088 free_irq(chan
->out_eof_irq
, chan
);
1089 if (chan
->rot_out_eof_irq
>= 0)
1090 free_irq(chan
->rot_out_eof_irq
, chan
);
1092 if (!IS_ERR_OR_NULL(chan
->in_chan
))
1093 ipu_idmac_put(chan
->in_chan
);
1094 if (!IS_ERR_OR_NULL(chan
->out_chan
))
1095 ipu_idmac_put(chan
->out_chan
);
1096 if (!IS_ERR_OR_NULL(chan
->rotation_in_chan
))
1097 ipu_idmac_put(chan
->rotation_in_chan
);
1098 if (!IS_ERR_OR_NULL(chan
->rotation_out_chan
))
1099 ipu_idmac_put(chan
->rotation_out_chan
);
1100 if (!IS_ERR_OR_NULL(chan
->ic
))
1101 ipu_ic_put(chan
->ic
);
1103 chan
->in_chan
= chan
->out_chan
= chan
->rotation_in_chan
=
1104 chan
->rotation_out_chan
= NULL
;
1105 chan
->out_eof_irq
= chan
->rot_out_eof_irq
= -1;
1108 static int get_ipu_resources(struct ipu_image_convert_chan
*chan
)
1110 const struct ipu_image_convert_dma_chan
*dma
= chan
->dma_ch
;
1111 struct ipu_image_convert_priv
*priv
= chan
->priv
;
1115 chan
->ic
= ipu_ic_get(priv
->ipu
, chan
->ic_task
);
1116 if (IS_ERR(chan
->ic
)) {
1117 dev_err(priv
->ipu
->dev
, "could not acquire IC\n");
1118 ret
= PTR_ERR(chan
->ic
);
1122 /* get IDMAC channels */
1123 chan
->in_chan
= ipu_idmac_get(priv
->ipu
, dma
->in
);
1124 chan
->out_chan
= ipu_idmac_get(priv
->ipu
, dma
->out
);
1125 if (IS_ERR(chan
->in_chan
) || IS_ERR(chan
->out_chan
)) {
1126 dev_err(priv
->ipu
->dev
, "could not acquire idmac channels\n");
1131 chan
->rotation_in_chan
= ipu_idmac_get(priv
->ipu
, dma
->rot_in
);
1132 chan
->rotation_out_chan
= ipu_idmac_get(priv
->ipu
, dma
->rot_out
);
1133 if (IS_ERR(chan
->rotation_in_chan
) || IS_ERR(chan
->rotation_out_chan
)) {
1134 dev_err(priv
->ipu
->dev
,
1135 "could not acquire idmac rotation channels\n");
1140 /* acquire the EOF interrupts */
1141 chan
->out_eof_irq
= ipu_idmac_channel_irq(priv
->ipu
,
1145 ret
= request_threaded_irq(chan
->out_eof_irq
, norotate_irq
, do_bh
,
1148 dev_err(priv
->ipu
->dev
, "could not acquire irq %d\n",
1150 chan
->out_eof_irq
= -1;
1154 chan
->rot_out_eof_irq
= ipu_idmac_channel_irq(priv
->ipu
,
1155 chan
->rotation_out_chan
,
1158 ret
= request_threaded_irq(chan
->rot_out_eof_irq
, rotate_irq
, do_bh
,
1161 dev_err(priv
->ipu
->dev
, "could not acquire irq %d\n",
1162 chan
->rot_out_eof_irq
);
1163 chan
->rot_out_eof_irq
= -1;
1169 release_ipu_resources(chan
);
1173 static int fill_image(struct ipu_image_convert_ctx
*ctx
,
1174 struct ipu_image_convert_image
*ic_image
,
1175 struct ipu_image
*image
,
1176 enum ipu_image_convert_type type
)
1178 struct ipu_image_convert_priv
*priv
= ctx
->chan
->priv
;
1180 ic_image
->base
= *image
;
1181 ic_image
->type
= type
;
1183 ic_image
->fmt
= get_format(image
->pix
.pixelformat
);
1184 if (!ic_image
->fmt
) {
1185 dev_err(priv
->ipu
->dev
, "pixelformat not supported for %s\n",
1186 type
== IMAGE_CONVERT_OUT
? "Output" : "Input");
1190 if (ic_image
->fmt
->planar
)
1191 ic_image
->stride
= ic_image
->base
.pix
.width
;
1193 ic_image
->stride
= ic_image
->base
.pix
.bytesperline
;
1195 calc_tile_dimensions(ctx
, ic_image
);
1196 calc_tile_offsets(ctx
, ic_image
);
1201 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1202 static unsigned int clamp_align(unsigned int x
, unsigned int min
,
1203 unsigned int max
, unsigned int align
)
1205 /* Bits that must be zero to be aligned */
1206 unsigned int mask
= ~((1 << align
) - 1);
1208 /* Clamp to aligned min and max */
1209 x
= clamp(x
, (min
+ ~mask
) & mask
, max
& mask
);
1211 /* Round to nearest aligned value */
1213 x
= (x
+ (1 << (align
- 1))) & mask
;
1219 * We have to adjust the tile width such that the tile physaddrs and
1220 * U and V plane offsets are multiples of 8 bytes as required by
1221 * the IPU DMA Controller. For the planar formats, this corresponds
1222 * to a pixel alignment of 16 (but use a more formal equation since
1223 * the variables are available). For all the packed formats, 8 is
1226 static inline u32
tile_width_align(const struct ipu_image_pixfmt
*fmt
)
1228 return fmt
->planar
? 8 * fmt
->uv_width_dec
: 8;
1232 * For tile height alignment, we have to ensure that the output tile
1233 * heights are multiples of 8 lines if the IRT is required by the
1234 * given rotation mode (the IRT performs rotations on 8x8 blocks
1235 * at a time). If the IRT is not used, or for input image tiles,
1236 * 2 lines are good enough.
1238 static inline u32
tile_height_align(enum ipu_image_convert_type type
,
1239 enum ipu_rotate_mode rot_mode
)
1241 return (type
== IMAGE_CONVERT_OUT
&&
1242 ipu_rot_mode_is_irt(rot_mode
)) ? 8 : 2;
1245 /* Adjusts input/output images to IPU restrictions */
1246 void ipu_image_convert_adjust(struct ipu_image
*in
, struct ipu_image
*out
,
1247 enum ipu_rotate_mode rot_mode
)
1249 const struct ipu_image_pixfmt
*infmt
, *outfmt
;
1250 unsigned int num_in_rows
, num_in_cols
;
1251 unsigned int num_out_rows
, num_out_cols
;
1252 u32 w_align
, h_align
;
1254 infmt
= get_format(in
->pix
.pixelformat
);
1255 outfmt
= get_format(out
->pix
.pixelformat
);
1257 /* set some default pixel formats if needed */
1259 in
->pix
.pixelformat
= V4L2_PIX_FMT_RGB24
;
1260 infmt
= get_format(V4L2_PIX_FMT_RGB24
);
1263 out
->pix
.pixelformat
= V4L2_PIX_FMT_RGB24
;
1264 outfmt
= get_format(V4L2_PIX_FMT_RGB24
);
1267 /* image converter does not handle fields */
1268 in
->pix
.field
= out
->pix
.field
= V4L2_FIELD_NONE
;
1270 /* resizer cannot downsize more than 4:1 */
1271 if (ipu_rot_mode_is_irt(rot_mode
)) {
1272 out
->pix
.height
= max_t(__u32
, out
->pix
.height
,
1274 out
->pix
.width
= max_t(__u32
, out
->pix
.width
,
1275 in
->pix
.height
/ 4);
1277 out
->pix
.width
= max_t(__u32
, out
->pix
.width
,
1279 out
->pix
.height
= max_t(__u32
, out
->pix
.height
,
1280 in
->pix
.height
/ 4);
1283 /* get tiling rows/cols from output format */
1284 num_out_rows
= num_stripes(out
->pix
.height
);
1285 num_out_cols
= num_stripes(out
->pix
.width
);
1286 if (ipu_rot_mode_is_irt(rot_mode
)) {
1287 num_in_rows
= num_out_cols
;
1288 num_in_cols
= num_out_rows
;
1290 num_in_rows
= num_out_rows
;
1291 num_in_cols
= num_out_cols
;
1294 /* align input width/height */
1295 w_align
= ilog2(tile_width_align(infmt
) * num_in_cols
);
1296 h_align
= ilog2(tile_height_align(IMAGE_CONVERT_IN
, rot_mode
) *
1298 in
->pix
.width
= clamp_align(in
->pix
.width
, MIN_W
, MAX_W
, w_align
);
1299 in
->pix
.height
= clamp_align(in
->pix
.height
, MIN_H
, MAX_H
, h_align
);
1301 /* align output width/height */
1302 w_align
= ilog2(tile_width_align(outfmt
) * num_out_cols
);
1303 h_align
= ilog2(tile_height_align(IMAGE_CONVERT_OUT
, rot_mode
) *
1305 out
->pix
.width
= clamp_align(out
->pix
.width
, MIN_W
, MAX_W
, w_align
);
1306 out
->pix
.height
= clamp_align(out
->pix
.height
, MIN_H
, MAX_H
, h_align
);
1308 /* set input/output strides and image sizes */
1309 in
->pix
.bytesperline
= (in
->pix
.width
* infmt
->bpp
) >> 3;
1310 in
->pix
.sizeimage
= in
->pix
.height
* in
->pix
.bytesperline
;
1311 out
->pix
.bytesperline
= (out
->pix
.width
* outfmt
->bpp
) >> 3;
1312 out
->pix
.sizeimage
= out
->pix
.height
* out
->pix
.bytesperline
;
1314 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust
);
1317 * this is used by ipu_image_convert_prepare() to verify set input and
1318 * output images are valid before starting the conversion. Clients can
1319 * also call it before calling ipu_image_convert_prepare().
1321 int ipu_image_convert_verify(struct ipu_image
*in
, struct ipu_image
*out
,
1322 enum ipu_rotate_mode rot_mode
)
1324 struct ipu_image testin
, testout
;
1329 ipu_image_convert_adjust(&testin
, &testout
, rot_mode
);
1331 if (testin
.pix
.width
!= in
->pix
.width
||
1332 testin
.pix
.height
!= in
->pix
.height
||
1333 testout
.pix
.width
!= out
->pix
.width
||
1334 testout
.pix
.height
!= out
->pix
.height
)
1339 EXPORT_SYMBOL_GPL(ipu_image_convert_verify
);
1342 * Call ipu_image_convert_prepare() to prepare for the conversion of
1343 * given images and rotation mode. Returns a new conversion context.
1345 struct ipu_image_convert_ctx
*
1346 ipu_image_convert_prepare(struct ipu_soc
*ipu
, enum ipu_ic_task ic_task
,
1347 struct ipu_image
*in
, struct ipu_image
*out
,
1348 enum ipu_rotate_mode rot_mode
,
1349 ipu_image_convert_cb_t complete
,
1350 void *complete_context
)
1352 struct ipu_image_convert_priv
*priv
= ipu
->image_convert_priv
;
1353 struct ipu_image_convert_image
*s_image
, *d_image
;
1354 struct ipu_image_convert_chan
*chan
;
1355 struct ipu_image_convert_ctx
*ctx
;
1356 unsigned long flags
;
1360 if (!in
|| !out
|| !complete
||
1361 (ic_task
!= IC_TASK_VIEWFINDER
&&
1362 ic_task
!= IC_TASK_POST_PROCESSOR
))
1363 return ERR_PTR(-EINVAL
);
1365 /* verify the in/out images before continuing */
1366 ret
= ipu_image_convert_verify(in
, out
, rot_mode
);
1368 dev_err(priv
->ipu
->dev
, "%s: in/out formats invalid\n",
1370 return ERR_PTR(ret
);
1373 chan
= &priv
->chan
[ic_task
];
1375 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
1377 return ERR_PTR(-ENOMEM
);
1379 dev_dbg(priv
->ipu
->dev
, "%s: task %u: ctx %p\n", __func__
,
1380 chan
->ic_task
, ctx
);
1383 init_completion(&ctx
->aborted
);
1386 d_image
= &ctx
->out
;
1388 /* set tiling and rotation */
1389 d_image
->num_rows
= num_stripes(out
->pix
.height
);
1390 d_image
->num_cols
= num_stripes(out
->pix
.width
);
1391 if (ipu_rot_mode_is_irt(rot_mode
)) {
1392 s_image
->num_rows
= d_image
->num_cols
;
1393 s_image
->num_cols
= d_image
->num_rows
;
1395 s_image
->num_rows
= d_image
->num_rows
;
1396 s_image
->num_cols
= d_image
->num_cols
;
1399 ctx
->num_tiles
= d_image
->num_cols
* d_image
->num_rows
;
1400 ctx
->rot_mode
= rot_mode
;
1402 ret
= fill_image(ctx
, s_image
, in
, IMAGE_CONVERT_IN
);
1405 ret
= fill_image(ctx
, d_image
, out
, IMAGE_CONVERT_OUT
);
1409 calc_out_tile_map(ctx
);
1411 dump_format(ctx
, s_image
);
1412 dump_format(ctx
, d_image
);
1414 ctx
->complete
= complete
;
1415 ctx
->complete_context
= complete_context
;
1418 * Can we use double-buffering for this operation? If there is
1419 * only one tile (the whole image can be converted in a single
1420 * operation) there's no point in using double-buffering. Also,
1421 * the IPU's IDMAC channels allow only a single U and V plane
1422 * offset shared between both buffers, but these offsets change
1423 * for every tile, and therefore would have to be updated for
1424 * each buffer which is not possible. So double-buffering is
1425 * impossible when either the source or destination images are
1426 * a planar format (YUV420, YUV422P, etc.).
1428 ctx
->double_buffering
= (ctx
->num_tiles
> 1 &&
1429 !s_image
->fmt
->planar
&&
1430 !d_image
->fmt
->planar
);
1432 if (ipu_rot_mode_is_irt(ctx
->rot_mode
)) {
1433 ret
= alloc_dma_buf(priv
, &ctx
->rot_intermediate
[0],
1434 d_image
->tile
[0].size
);
1437 if (ctx
->double_buffering
) {
1438 ret
= alloc_dma_buf(priv
,
1439 &ctx
->rot_intermediate
[1],
1440 d_image
->tile
[0].size
);
1442 goto out_free_dmabuf0
;
1446 spin_lock_irqsave(&chan
->irqlock
, flags
);
1448 get_res
= list_empty(&chan
->ctx_list
);
1450 list_add_tail(&ctx
->list
, &chan
->ctx_list
);
1452 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1455 ret
= get_ipu_resources(chan
);
1457 goto out_free_dmabuf1
;
1463 free_dma_buf(priv
, &ctx
->rot_intermediate
[1]);
1464 spin_lock_irqsave(&chan
->irqlock
, flags
);
1465 list_del(&ctx
->list
);
1466 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1468 free_dma_buf(priv
, &ctx
->rot_intermediate
[0]);
1471 return ERR_PTR(ret
);
1473 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare
);
1476 * Carry out a single image conversion run. Only the physaddr's of the input
1477 * and output image buffers are needed. The conversion context must have
1478 * been created previously with ipu_image_convert_prepare().
1480 int ipu_image_convert_queue(struct ipu_image_convert_run
*run
)
1482 struct ipu_image_convert_chan
*chan
;
1483 struct ipu_image_convert_priv
*priv
;
1484 struct ipu_image_convert_ctx
*ctx
;
1485 unsigned long flags
;
1488 if (!run
|| !run
->ctx
|| !run
->in_phys
|| !run
->out_phys
)
1495 dev_dbg(priv
->ipu
->dev
, "%s: task %u: ctx %p run %p\n", __func__
,
1496 chan
->ic_task
, ctx
, run
);
1498 INIT_LIST_HEAD(&run
->list
);
1500 spin_lock_irqsave(&chan
->irqlock
, flags
);
1502 if (ctx
->aborting
) {
1507 list_add_tail(&run
->list
, &chan
->pending_q
);
1509 if (!chan
->current_run
) {
1512 chan
->current_run
= NULL
;
1515 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1518 EXPORT_SYMBOL_GPL(ipu_image_convert_queue
);
1520 /* Abort any active or pending conversions for this context */
1521 void ipu_image_convert_abort(struct ipu_image_convert_ctx
*ctx
)
1523 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
1524 struct ipu_image_convert_priv
*priv
= chan
->priv
;
1525 struct ipu_image_convert_run
*run
, *active_run
, *tmp
;
1526 unsigned long flags
;
1530 reinit_completion(&ctx
->aborted
);
1532 spin_lock_irqsave(&chan
->irqlock
, flags
);
1534 /* move all remaining pending runs in this context to done_q */
1535 list_for_each_entry_safe(run
, tmp
, &chan
->pending_q
, list
) {
1536 if (run
->ctx
!= ctx
)
1539 list_move_tail(&run
->list
, &chan
->done_q
);
1542 run_count
= get_run_count(ctx
, &chan
->done_q
);
1543 active_run
= (chan
->current_run
&& chan
->current_run
->ctx
== ctx
) ?
1544 chan
->current_run
: NULL
;
1546 need_abort
= (run_count
|| active_run
);
1548 ctx
->aborting
= need_abort
;
1550 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1553 dev_dbg(priv
->ipu
->dev
,
1554 "%s: task %u: no abort needed for ctx %p\n",
1555 __func__
, chan
->ic_task
, ctx
);
1559 dev_dbg(priv
->ipu
->dev
,
1560 "%s: task %u: wait for completion: %d runs, active run %p\n",
1561 __func__
, chan
->ic_task
, run_count
, active_run
);
1563 ret
= wait_for_completion_timeout(&ctx
->aborted
,
1564 msecs_to_jiffies(10000));
1566 dev_warn(priv
->ipu
->dev
, "%s: timeout\n", __func__
);
1570 ctx
->aborting
= false;
1572 EXPORT_SYMBOL_GPL(ipu_image_convert_abort
);
1574 /* Unprepare image conversion context */
1575 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx
*ctx
)
1577 struct ipu_image_convert_chan
*chan
= ctx
->chan
;
1578 struct ipu_image_convert_priv
*priv
= chan
->priv
;
1579 unsigned long flags
;
1582 /* make sure no runs are hanging around */
1583 ipu_image_convert_abort(ctx
);
1585 dev_dbg(priv
->ipu
->dev
, "%s: task %u: removing ctx %p\n", __func__
,
1586 chan
->ic_task
, ctx
);
1588 spin_lock_irqsave(&chan
->irqlock
, flags
);
1590 list_del(&ctx
->list
);
1592 put_res
= list_empty(&chan
->ctx_list
);
1594 spin_unlock_irqrestore(&chan
->irqlock
, flags
);
1597 release_ipu_resources(chan
);
1599 free_dma_buf(priv
, &ctx
->rot_intermediate
[1]);
1600 free_dma_buf(priv
, &ctx
->rot_intermediate
[0]);
1604 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare
);
1607 * "Canned" asynchronous single image conversion. Allocates and returns
1608 * a new conversion run. On successful return the caller must free the
1609 * run and call ipu_image_convert_unprepare() after conversion completes.
1611 struct ipu_image_convert_run
*
1612 ipu_image_convert(struct ipu_soc
*ipu
, enum ipu_ic_task ic_task
,
1613 struct ipu_image
*in
, struct ipu_image
*out
,
1614 enum ipu_rotate_mode rot_mode
,
1615 ipu_image_convert_cb_t complete
,
1616 void *complete_context
)
1618 struct ipu_image_convert_ctx
*ctx
;
1619 struct ipu_image_convert_run
*run
;
1622 ctx
= ipu_image_convert_prepare(ipu
, ic_task
, in
, out
, rot_mode
,
1623 complete
, complete_context
);
1625 return ERR_CAST(ctx
);
1627 run
= kzalloc(sizeof(*run
), GFP_KERNEL
);
1629 ipu_image_convert_unprepare(ctx
);
1630 return ERR_PTR(-ENOMEM
);
1634 run
->in_phys
= in
->phys0
;
1635 run
->out_phys
= out
->phys0
;
1637 ret
= ipu_image_convert_queue(run
);
1639 ipu_image_convert_unprepare(ctx
);
1641 return ERR_PTR(ret
);
1646 EXPORT_SYMBOL_GPL(ipu_image_convert
);
1648 /* "Canned" synchronous single image conversion */
1649 static void image_convert_sync_complete(struct ipu_image_convert_run
*run
,
1652 struct completion
*comp
= data
;
1657 int ipu_image_convert_sync(struct ipu_soc
*ipu
, enum ipu_ic_task ic_task
,
1658 struct ipu_image
*in
, struct ipu_image
*out
,
1659 enum ipu_rotate_mode rot_mode
)
1661 struct ipu_image_convert_run
*run
;
1662 struct completion comp
;
1665 init_completion(&comp
);
1667 run
= ipu_image_convert(ipu
, ic_task
, in
, out
, rot_mode
,
1668 image_convert_sync_complete
, &comp
);
1670 return PTR_ERR(run
);
1672 ret
= wait_for_completion_timeout(&comp
, msecs_to_jiffies(10000));
1673 ret
= (ret
== 0) ? -ETIMEDOUT
: 0;
1675 ipu_image_convert_unprepare(run
->ctx
);
1680 EXPORT_SYMBOL_GPL(ipu_image_convert_sync
);
1682 int ipu_image_convert_init(struct ipu_soc
*ipu
, struct device
*dev
)
1684 struct ipu_image_convert_priv
*priv
;
1687 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1691 ipu
->image_convert_priv
= priv
;
1694 for (i
= 0; i
< IC_NUM_TASKS
; i
++) {
1695 struct ipu_image_convert_chan
*chan
= &priv
->chan
[i
];
1699 chan
->dma_ch
= &image_convert_dma_chan
[i
];
1700 chan
->out_eof_irq
= -1;
1701 chan
->rot_out_eof_irq
= -1;
1703 spin_lock_init(&chan
->irqlock
);
1704 INIT_LIST_HEAD(&chan
->ctx_list
);
1705 INIT_LIST_HEAD(&chan
->pending_q
);
1706 INIT_LIST_HEAD(&chan
->done_q
);
1712 void ipu_image_convert_exit(struct ipu_soc
*ipu
)