thermal/drivers/hisi: Set the thermal zone private data to the sensor pointer
[linux/fpc-iii.git] / drivers / gpu / ipu-v3 / ipu-image-convert.c
blobf4081962784ccee322282a5099590c42dcbcccc9
1 /*
2 * Copyright (C) 2012-2016 Mentor Graphics Inc.
4 * Queued image conversion support, with tiling and rotation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
17 #include <linux/interrupt.h>
18 #include <linux/dma-mapping.h>
19 #include <video/imx-ipu-image-convert.h>
20 #include "ipu-prv.h"
23 * The IC Resizer has a restriction that the output frame from the
24 * resizer must be 1024 or less in both width (pixels) and height
25 * (lines).
27 * The image converter attempts to split up a conversion when
28 * the desired output (converted) frame resolution exceeds the
29 * IC resizer limit of 1024 in either dimension.
31 * If either dimension of the output frame exceeds the limit, the
32 * dimension is split into 1, 2, or 4 equal stripes, for a maximum
33 * of 4*4 or 16 tiles. A conversion is then carried out for each
34 * tile (but taking care to pass the full frame stride length to
35 * the DMA channel's parameter memory!). IDMA double-buffering is used
36 * to convert each tile back-to-back when possible (see note below
37 * when double_buffering boolean is set).
39 * Note that the input frame must be split up into the same number
40 * of tiles as the output frame.
42 * FIXME: at this point there is no attempt to deal with visible seams
43 * at the tile boundaries when upscaling. The seams are caused by a reset
44 * of the bilinear upscale interpolation when starting a new tile. The
45 * seams are barely visible for small upscale factors, but become
46 * increasingly visible as the upscale factor gets larger, since more
47 * interpolated pixels get thrown out at the tile boundaries. A possilble
48 * fix might be to overlap tiles of different sizes, but this must be done
49 * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
50 * alignment restrictions of each tile.
53 #define MAX_STRIPES_W 4
54 #define MAX_STRIPES_H 4
55 #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
57 #define MIN_W 16
58 #define MIN_H 8
59 #define MAX_W 4096
60 #define MAX_H 4096
62 enum ipu_image_convert_type {
63 IMAGE_CONVERT_IN = 0,
64 IMAGE_CONVERT_OUT,
67 struct ipu_image_convert_dma_buf {
68 void *virt;
69 dma_addr_t phys;
70 unsigned long len;
73 struct ipu_image_convert_dma_chan {
74 int in;
75 int out;
76 int rot_in;
77 int rot_out;
78 int vdi_in_p;
79 int vdi_in;
80 int vdi_in_n;
83 /* dimensions of one tile */
84 struct ipu_image_tile {
85 u32 width;
86 u32 height;
87 /* size and strides are in bytes */
88 u32 size;
89 u32 stride;
90 u32 rot_stride;
91 /* start Y or packed offset of this tile */
92 u32 offset;
93 /* offset from start to tile in U plane, for planar formats */
94 u32 u_off;
95 /* offset from start to tile in V plane, for planar formats */
96 u32 v_off;
99 struct ipu_image_convert_image {
100 struct ipu_image base;
101 enum ipu_image_convert_type type;
103 const struct ipu_image_pixfmt *fmt;
104 unsigned int stride;
106 /* # of rows (horizontal stripes) if dest height is > 1024 */
107 unsigned int num_rows;
108 /* # of columns (vertical stripes) if dest width is > 1024 */
109 unsigned int num_cols;
111 struct ipu_image_tile tile[MAX_TILES];
114 struct ipu_image_pixfmt {
115 u32 fourcc; /* V4L2 fourcc */
116 int bpp; /* total bpp */
117 int uv_width_dec; /* decimation in width for U/V planes */
118 int uv_height_dec; /* decimation in height for U/V planes */
119 bool planar; /* planar format */
120 bool uv_swapped; /* U and V planes are swapped */
121 bool uv_packed; /* partial planar (U and V in same plane) */
124 struct ipu_image_convert_ctx;
125 struct ipu_image_convert_chan;
126 struct ipu_image_convert_priv;
128 struct ipu_image_convert_ctx {
129 struct ipu_image_convert_chan *chan;
131 ipu_image_convert_cb_t complete;
132 void *complete_context;
134 /* Source/destination image data and rotation mode */
135 struct ipu_image_convert_image in;
136 struct ipu_image_convert_image out;
137 enum ipu_rotate_mode rot_mode;
139 /* intermediate buffer for rotation */
140 struct ipu_image_convert_dma_buf rot_intermediate[2];
142 /* current buffer number for double buffering */
143 int cur_buf_num;
145 bool aborting;
146 struct completion aborted;
148 /* can we use double-buffering for this conversion operation? */
149 bool double_buffering;
150 /* num_rows * num_cols */
151 unsigned int num_tiles;
152 /* next tile to process */
153 unsigned int next_tile;
154 /* where to place converted tile in dest image */
155 unsigned int out_tile_map[MAX_TILES];
157 struct list_head list;
160 struct ipu_image_convert_chan {
161 struct ipu_image_convert_priv *priv;
163 enum ipu_ic_task ic_task;
164 const struct ipu_image_convert_dma_chan *dma_ch;
166 struct ipu_ic *ic;
167 struct ipuv3_channel *in_chan;
168 struct ipuv3_channel *out_chan;
169 struct ipuv3_channel *rotation_in_chan;
170 struct ipuv3_channel *rotation_out_chan;
172 /* the IPU end-of-frame irqs */
173 int out_eof_irq;
174 int rot_out_eof_irq;
176 spinlock_t irqlock;
178 /* list of convert contexts */
179 struct list_head ctx_list;
180 /* queue of conversion runs */
181 struct list_head pending_q;
182 /* queue of completed runs */
183 struct list_head done_q;
185 /* the current conversion run */
186 struct ipu_image_convert_run *current_run;
189 struct ipu_image_convert_priv {
190 struct ipu_image_convert_chan chan[IC_NUM_TASKS];
191 struct ipu_soc *ipu;
194 static const struct ipu_image_convert_dma_chan
195 image_convert_dma_chan[IC_NUM_TASKS] = {
196 [IC_TASK_VIEWFINDER] = {
197 .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
198 .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
199 .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
200 .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
201 .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
202 .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
203 .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
205 [IC_TASK_POST_PROCESSOR] = {
206 .in = IPUV3_CHANNEL_MEM_IC_PP,
207 .out = IPUV3_CHANNEL_IC_PP_MEM,
208 .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
209 .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
213 static const struct ipu_image_pixfmt image_convert_formats[] = {
215 .fourcc = V4L2_PIX_FMT_RGB565,
216 .bpp = 16,
217 }, {
218 .fourcc = V4L2_PIX_FMT_RGB24,
219 .bpp = 24,
220 }, {
221 .fourcc = V4L2_PIX_FMT_BGR24,
222 .bpp = 24,
223 }, {
224 .fourcc = V4L2_PIX_FMT_RGB32,
225 .bpp = 32,
226 }, {
227 .fourcc = V4L2_PIX_FMT_BGR32,
228 .bpp = 32,
229 }, {
230 .fourcc = V4L2_PIX_FMT_XRGB32,
231 .bpp = 32,
232 }, {
233 .fourcc = V4L2_PIX_FMT_XBGR32,
234 .bpp = 32,
235 }, {
236 .fourcc = V4L2_PIX_FMT_YUYV,
237 .bpp = 16,
238 .uv_width_dec = 2,
239 .uv_height_dec = 1,
240 }, {
241 .fourcc = V4L2_PIX_FMT_UYVY,
242 .bpp = 16,
243 .uv_width_dec = 2,
244 .uv_height_dec = 1,
245 }, {
246 .fourcc = V4L2_PIX_FMT_YUV420,
247 .bpp = 12,
248 .planar = true,
249 .uv_width_dec = 2,
250 .uv_height_dec = 2,
251 }, {
252 .fourcc = V4L2_PIX_FMT_YVU420,
253 .bpp = 12,
254 .planar = true,
255 .uv_width_dec = 2,
256 .uv_height_dec = 2,
257 .uv_swapped = true,
258 }, {
259 .fourcc = V4L2_PIX_FMT_NV12,
260 .bpp = 12,
261 .planar = true,
262 .uv_width_dec = 2,
263 .uv_height_dec = 2,
264 .uv_packed = true,
265 }, {
266 .fourcc = V4L2_PIX_FMT_YUV422P,
267 .bpp = 16,
268 .planar = true,
269 .uv_width_dec = 2,
270 .uv_height_dec = 1,
271 }, {
272 .fourcc = V4L2_PIX_FMT_NV16,
273 .bpp = 16,
274 .planar = true,
275 .uv_width_dec = 2,
276 .uv_height_dec = 1,
277 .uv_packed = true,
281 static const struct ipu_image_pixfmt *get_format(u32 fourcc)
283 const struct ipu_image_pixfmt *ret = NULL;
284 unsigned int i;
286 for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
287 if (image_convert_formats[i].fourcc == fourcc) {
288 ret = &image_convert_formats[i];
289 break;
293 return ret;
296 static void dump_format(struct ipu_image_convert_ctx *ctx,
297 struct ipu_image_convert_image *ic_image)
299 struct ipu_image_convert_chan *chan = ctx->chan;
300 struct ipu_image_convert_priv *priv = chan->priv;
302 dev_dbg(priv->ipu->dev,
303 "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
304 chan->ic_task, ctx,
305 ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
306 ic_image->base.pix.width, ic_image->base.pix.height,
307 ic_image->num_cols, ic_image->num_rows,
308 ic_image->tile[0].width, ic_image->tile[0].height,
309 ic_image->fmt->fourcc & 0xff,
310 (ic_image->fmt->fourcc >> 8) & 0xff,
311 (ic_image->fmt->fourcc >> 16) & 0xff,
312 (ic_image->fmt->fourcc >> 24) & 0xff);
315 int ipu_image_convert_enum_format(int index, u32 *fourcc)
317 const struct ipu_image_pixfmt *fmt;
319 if (index >= (int)ARRAY_SIZE(image_convert_formats))
320 return -EINVAL;
322 /* Format found */
323 fmt = &image_convert_formats[index];
324 *fourcc = fmt->fourcc;
325 return 0;
327 EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
329 static void free_dma_buf(struct ipu_image_convert_priv *priv,
330 struct ipu_image_convert_dma_buf *buf)
332 if (buf->virt)
333 dma_free_coherent(priv->ipu->dev,
334 buf->len, buf->virt, buf->phys);
335 buf->virt = NULL;
336 buf->phys = 0;
339 static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
340 struct ipu_image_convert_dma_buf *buf,
341 int size)
343 buf->len = PAGE_ALIGN(size);
344 buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
345 GFP_DMA | GFP_KERNEL);
346 if (!buf->virt) {
347 dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
348 return -ENOMEM;
351 return 0;
354 static inline int num_stripes(int dim)
356 if (dim <= 1024)
357 return 1;
358 else if (dim <= 2048)
359 return 2;
360 else
361 return 4;
364 static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
365 struct ipu_image_convert_image *image)
367 int i;
369 for (i = 0; i < ctx->num_tiles; i++) {
370 struct ipu_image_tile *tile = &image->tile[i];
372 tile->height = image->base.pix.height / image->num_rows;
373 tile->width = image->base.pix.width / image->num_cols;
374 tile->size = ((tile->height * image->fmt->bpp) >> 3) *
375 tile->width;
377 if (image->fmt->planar) {
378 tile->stride = tile->width;
379 tile->rot_stride = tile->height;
380 } else {
381 tile->stride =
382 (image->fmt->bpp * tile->width) >> 3;
383 tile->rot_stride =
384 (image->fmt->bpp * tile->height) >> 3;
390 * Use the rotation transformation to find the tile coordinates
391 * (row, col) of a tile in the destination frame that corresponds
392 * to the given tile coordinates of a source frame. The destination
393 * coordinate is then converted to a tile index.
395 static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
396 int src_row, int src_col)
398 struct ipu_image_convert_chan *chan = ctx->chan;
399 struct ipu_image_convert_priv *priv = chan->priv;
400 struct ipu_image_convert_image *s_image = &ctx->in;
401 struct ipu_image_convert_image *d_image = &ctx->out;
402 int dst_row, dst_col;
404 /* with no rotation it's a 1:1 mapping */
405 if (ctx->rot_mode == IPU_ROTATE_NONE)
406 return src_row * s_image->num_cols + src_col;
409 * before doing the transform, first we have to translate
410 * source row,col for an origin in the center of s_image
412 src_row = src_row * 2 - (s_image->num_rows - 1);
413 src_col = src_col * 2 - (s_image->num_cols - 1);
415 /* do the rotation transform */
416 if (ctx->rot_mode & IPU_ROT_BIT_90) {
417 dst_col = -src_row;
418 dst_row = src_col;
419 } else {
420 dst_col = src_col;
421 dst_row = src_row;
424 /* apply flip */
425 if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
426 dst_col = -dst_col;
427 if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
428 dst_row = -dst_row;
430 dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
431 chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
434 * finally translate dest row,col using an origin in upper
435 * left of d_image
437 dst_row += d_image->num_rows - 1;
438 dst_col += d_image->num_cols - 1;
439 dst_row /= 2;
440 dst_col /= 2;
442 return dst_row * d_image->num_cols + dst_col;
446 * Fill the out_tile_map[] with transformed destination tile indeces.
448 static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
450 struct ipu_image_convert_image *s_image = &ctx->in;
451 unsigned int row, col, tile = 0;
453 for (row = 0; row < s_image->num_rows; row++) {
454 for (col = 0; col < s_image->num_cols; col++) {
455 ctx->out_tile_map[tile] =
456 transform_tile_index(ctx, row, col);
457 tile++;
462 static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
463 struct ipu_image_convert_image *image)
465 struct ipu_image_convert_chan *chan = ctx->chan;
466 struct ipu_image_convert_priv *priv = chan->priv;
467 const struct ipu_image_pixfmt *fmt = image->fmt;
468 unsigned int row, col, tile = 0;
469 u32 H, w, h, y_stride, uv_stride;
470 u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
471 u32 y_row_off, y_col_off, y_off;
472 u32 y_size, uv_size;
474 /* setup some convenience vars */
475 H = image->base.pix.height;
477 y_stride = image->stride;
478 uv_stride = y_stride / fmt->uv_width_dec;
479 if (fmt->uv_packed)
480 uv_stride *= 2;
482 y_size = H * y_stride;
483 uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
485 for (row = 0; row < image->num_rows; row++) {
486 w = image->tile[tile].width;
487 h = image->tile[tile].height;
488 y_row_off = row * h * y_stride;
489 uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
491 for (col = 0; col < image->num_cols; col++) {
492 y_col_off = col * w;
493 uv_col_off = y_col_off / fmt->uv_width_dec;
494 if (fmt->uv_packed)
495 uv_col_off *= 2;
497 y_off = y_row_off + y_col_off;
498 uv_off = uv_row_off + uv_col_off;
500 u_off = y_size - y_off + uv_off;
501 v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
502 if (fmt->uv_swapped) {
503 tmp = u_off;
504 u_off = v_off;
505 v_off = tmp;
508 image->tile[tile].offset = y_off;
509 image->tile[tile].u_off = u_off;
510 image->tile[tile++].v_off = v_off;
512 dev_dbg(priv->ipu->dev,
513 "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
514 chan->ic_task, ctx,
515 image->type == IMAGE_CONVERT_IN ?
516 "Input" : "Output", row, col,
517 y_off, u_off, v_off);
522 static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
523 struct ipu_image_convert_image *image)
525 struct ipu_image_convert_chan *chan = ctx->chan;
526 struct ipu_image_convert_priv *priv = chan->priv;
527 const struct ipu_image_pixfmt *fmt = image->fmt;
528 unsigned int row, col, tile = 0;
529 u32 w, h, bpp, stride;
530 u32 row_off, col_off;
532 /* setup some convenience vars */
533 stride = image->stride;
534 bpp = fmt->bpp;
536 for (row = 0; row < image->num_rows; row++) {
537 w = image->tile[tile].width;
538 h = image->tile[tile].height;
539 row_off = row * h * stride;
541 for (col = 0; col < image->num_cols; col++) {
542 col_off = (col * w * bpp) >> 3;
544 image->tile[tile].offset = row_off + col_off;
545 image->tile[tile].u_off = 0;
546 image->tile[tile++].v_off = 0;
548 dev_dbg(priv->ipu->dev,
549 "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
550 chan->ic_task, ctx,
551 image->type == IMAGE_CONVERT_IN ?
552 "Input" : "Output", row, col,
553 row_off + col_off);
558 static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
559 struct ipu_image_convert_image *image)
561 if (image->fmt->planar)
562 calc_tile_offsets_planar(ctx, image);
563 else
564 calc_tile_offsets_packed(ctx, image);
568 * return the number of runs in given queue (pending_q or done_q)
569 * for this context. hold irqlock when calling.
571 static int get_run_count(struct ipu_image_convert_ctx *ctx,
572 struct list_head *q)
574 struct ipu_image_convert_run *run;
575 int count = 0;
577 lockdep_assert_held(&ctx->chan->irqlock);
579 list_for_each_entry(run, q, list) {
580 if (run->ctx == ctx)
581 count++;
584 return count;
587 static void convert_stop(struct ipu_image_convert_run *run)
589 struct ipu_image_convert_ctx *ctx = run->ctx;
590 struct ipu_image_convert_chan *chan = ctx->chan;
591 struct ipu_image_convert_priv *priv = chan->priv;
593 dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
594 __func__, chan->ic_task, ctx, run);
596 /* disable IC tasks and the channels */
597 ipu_ic_task_disable(chan->ic);
598 ipu_idmac_disable_channel(chan->in_chan);
599 ipu_idmac_disable_channel(chan->out_chan);
601 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
602 ipu_idmac_disable_channel(chan->rotation_in_chan);
603 ipu_idmac_disable_channel(chan->rotation_out_chan);
604 ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
607 ipu_ic_disable(chan->ic);
610 static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
611 struct ipuv3_channel *channel,
612 struct ipu_image_convert_image *image,
613 enum ipu_rotate_mode rot_mode,
614 bool rot_swap_width_height)
616 struct ipu_image_convert_chan *chan = ctx->chan;
617 unsigned int burst_size;
618 u32 width, height, stride;
619 dma_addr_t addr0, addr1 = 0;
620 struct ipu_image tile_image;
621 unsigned int tile_idx[2];
623 if (image->type == IMAGE_CONVERT_OUT) {
624 tile_idx[0] = ctx->out_tile_map[0];
625 tile_idx[1] = ctx->out_tile_map[1];
626 } else {
627 tile_idx[0] = 0;
628 tile_idx[1] = 1;
631 if (rot_swap_width_height) {
632 width = image->tile[0].height;
633 height = image->tile[0].width;
634 stride = image->tile[0].rot_stride;
635 addr0 = ctx->rot_intermediate[0].phys;
636 if (ctx->double_buffering)
637 addr1 = ctx->rot_intermediate[1].phys;
638 } else {
639 width = image->tile[0].width;
640 height = image->tile[0].height;
641 stride = image->stride;
642 addr0 = image->base.phys0 +
643 image->tile[tile_idx[0]].offset;
644 if (ctx->double_buffering)
645 addr1 = image->base.phys0 +
646 image->tile[tile_idx[1]].offset;
649 ipu_cpmem_zero(channel);
651 memset(&tile_image, 0, sizeof(tile_image));
652 tile_image.pix.width = tile_image.rect.width = width;
653 tile_image.pix.height = tile_image.rect.height = height;
654 tile_image.pix.bytesperline = stride;
655 tile_image.pix.pixelformat = image->fmt->fourcc;
656 tile_image.phys0 = addr0;
657 tile_image.phys1 = addr1;
658 ipu_cpmem_set_image(channel, &tile_image);
660 if (image->fmt->planar && !rot_swap_width_height)
661 ipu_cpmem_set_uv_offset(channel,
662 image->tile[tile_idx[0]].u_off,
663 image->tile[tile_idx[0]].v_off);
665 if (rot_mode)
666 ipu_cpmem_set_rotation(channel, rot_mode);
668 if (channel == chan->rotation_in_chan ||
669 channel == chan->rotation_out_chan) {
670 burst_size = 8;
671 ipu_cpmem_set_block_mode(channel);
672 } else
673 burst_size = (width % 16) ? 8 : 16;
675 ipu_cpmem_set_burstsize(channel, burst_size);
677 ipu_ic_task_idma_init(chan->ic, channel, width, height,
678 burst_size, rot_mode);
681 * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
682 * only do this when there is no PRG present.
684 if (!channel->ipu->prg_priv)
685 ipu_cpmem_set_axi_id(channel, 1);
687 ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
690 static int convert_start(struct ipu_image_convert_run *run)
692 struct ipu_image_convert_ctx *ctx = run->ctx;
693 struct ipu_image_convert_chan *chan = ctx->chan;
694 struct ipu_image_convert_priv *priv = chan->priv;
695 struct ipu_image_convert_image *s_image = &ctx->in;
696 struct ipu_image_convert_image *d_image = &ctx->out;
697 enum ipu_color_space src_cs, dest_cs;
698 unsigned int dest_width, dest_height;
699 int ret;
701 dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
702 __func__, chan->ic_task, ctx, run);
704 src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
705 dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
707 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
708 /* swap width/height for resizer */
709 dest_width = d_image->tile[0].height;
710 dest_height = d_image->tile[0].width;
711 } else {
712 dest_width = d_image->tile[0].width;
713 dest_height = d_image->tile[0].height;
716 /* setup the IC resizer and CSC */
717 ret = ipu_ic_task_init(chan->ic,
718 s_image->tile[0].width,
719 s_image->tile[0].height,
720 dest_width,
721 dest_height,
722 src_cs, dest_cs);
723 if (ret) {
724 dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
725 return ret;
728 /* init the source MEM-->IC PP IDMAC channel */
729 init_idmac_channel(ctx, chan->in_chan, s_image,
730 IPU_ROTATE_NONE, false);
732 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
733 /* init the IC PP-->MEM IDMAC channel */
734 init_idmac_channel(ctx, chan->out_chan, d_image,
735 IPU_ROTATE_NONE, true);
737 /* init the MEM-->IC PP ROT IDMAC channel */
738 init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
739 ctx->rot_mode, true);
741 /* init the destination IC PP ROT-->MEM IDMAC channel */
742 init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
743 IPU_ROTATE_NONE, false);
745 /* now link IC PP-->MEM to MEM-->IC PP ROT */
746 ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
747 } else {
748 /* init the destination IC PP-->MEM IDMAC channel */
749 init_idmac_channel(ctx, chan->out_chan, d_image,
750 ctx->rot_mode, false);
753 /* enable the IC */
754 ipu_ic_enable(chan->ic);
756 /* set buffers ready */
757 ipu_idmac_select_buffer(chan->in_chan, 0);
758 ipu_idmac_select_buffer(chan->out_chan, 0);
759 if (ipu_rot_mode_is_irt(ctx->rot_mode))
760 ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
761 if (ctx->double_buffering) {
762 ipu_idmac_select_buffer(chan->in_chan, 1);
763 ipu_idmac_select_buffer(chan->out_chan, 1);
764 if (ipu_rot_mode_is_irt(ctx->rot_mode))
765 ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
768 /* enable the channels! */
769 ipu_idmac_enable_channel(chan->in_chan);
770 ipu_idmac_enable_channel(chan->out_chan);
771 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
772 ipu_idmac_enable_channel(chan->rotation_in_chan);
773 ipu_idmac_enable_channel(chan->rotation_out_chan);
776 ipu_ic_task_enable(chan->ic);
778 ipu_cpmem_dump(chan->in_chan);
779 ipu_cpmem_dump(chan->out_chan);
780 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
781 ipu_cpmem_dump(chan->rotation_in_chan);
782 ipu_cpmem_dump(chan->rotation_out_chan);
785 ipu_dump(priv->ipu);
787 return 0;
790 /* hold irqlock when calling */
791 static int do_run(struct ipu_image_convert_run *run)
793 struct ipu_image_convert_ctx *ctx = run->ctx;
794 struct ipu_image_convert_chan *chan = ctx->chan;
796 lockdep_assert_held(&chan->irqlock);
798 ctx->in.base.phys0 = run->in_phys;
799 ctx->out.base.phys0 = run->out_phys;
801 ctx->cur_buf_num = 0;
802 ctx->next_tile = 1;
804 /* remove run from pending_q and set as current */
805 list_del(&run->list);
806 chan->current_run = run;
808 return convert_start(run);
811 /* hold irqlock when calling */
812 static void run_next(struct ipu_image_convert_chan *chan)
814 struct ipu_image_convert_priv *priv = chan->priv;
815 struct ipu_image_convert_run *run, *tmp;
816 int ret;
818 lockdep_assert_held(&chan->irqlock);
820 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
821 /* skip contexts that are aborting */
822 if (run->ctx->aborting) {
823 dev_dbg(priv->ipu->dev,
824 "%s: task %u: skipping aborting ctx %p run %p\n",
825 __func__, chan->ic_task, run->ctx, run);
826 continue;
829 ret = do_run(run);
830 if (!ret)
831 break;
834 * something went wrong with start, add the run
835 * to done q and continue to the next run in the
836 * pending q.
838 run->status = ret;
839 list_add_tail(&run->list, &chan->done_q);
840 chan->current_run = NULL;
844 static void empty_done_q(struct ipu_image_convert_chan *chan)
846 struct ipu_image_convert_priv *priv = chan->priv;
847 struct ipu_image_convert_run *run;
848 unsigned long flags;
850 spin_lock_irqsave(&chan->irqlock, flags);
852 while (!list_empty(&chan->done_q)) {
853 run = list_entry(chan->done_q.next,
854 struct ipu_image_convert_run,
855 list);
857 list_del(&run->list);
859 dev_dbg(priv->ipu->dev,
860 "%s: task %u: completing ctx %p run %p with %d\n",
861 __func__, chan->ic_task, run->ctx, run, run->status);
863 /* call the completion callback and free the run */
864 spin_unlock_irqrestore(&chan->irqlock, flags);
865 run->ctx->complete(run, run->ctx->complete_context);
866 spin_lock_irqsave(&chan->irqlock, flags);
869 spin_unlock_irqrestore(&chan->irqlock, flags);
873 * the bottom half thread clears out the done_q, calling the
874 * completion handler for each.
876 static irqreturn_t do_bh(int irq, void *dev_id)
878 struct ipu_image_convert_chan *chan = dev_id;
879 struct ipu_image_convert_priv *priv = chan->priv;
880 struct ipu_image_convert_ctx *ctx;
881 unsigned long flags;
883 dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
884 chan->ic_task);
886 empty_done_q(chan);
888 spin_lock_irqsave(&chan->irqlock, flags);
891 * the done_q is cleared out, signal any contexts
892 * that are aborting that abort can complete.
894 list_for_each_entry(ctx, &chan->ctx_list, list) {
895 if (ctx->aborting) {
896 dev_dbg(priv->ipu->dev,
897 "%s: task %u: signaling abort for ctx %p\n",
898 __func__, chan->ic_task, ctx);
899 complete(&ctx->aborted);
903 spin_unlock_irqrestore(&chan->irqlock, flags);
905 dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
906 chan->ic_task);
908 return IRQ_HANDLED;
911 /* hold irqlock when calling */
912 static irqreturn_t do_irq(struct ipu_image_convert_run *run)
914 struct ipu_image_convert_ctx *ctx = run->ctx;
915 struct ipu_image_convert_chan *chan = ctx->chan;
916 struct ipu_image_tile *src_tile, *dst_tile;
917 struct ipu_image_convert_image *s_image = &ctx->in;
918 struct ipu_image_convert_image *d_image = &ctx->out;
919 struct ipuv3_channel *outch;
920 unsigned int dst_idx;
922 lockdep_assert_held(&chan->irqlock);
924 outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
925 chan->rotation_out_chan : chan->out_chan;
928 * It is difficult to stop the channel DMA before the channels
929 * enter the paused state. Without double-buffering the channels
930 * are always in a paused state when the EOF irq occurs, so it
931 * is safe to stop the channels now. For double-buffering we
932 * just ignore the abort until the operation completes, when it
933 * is safe to shut down.
935 if (ctx->aborting && !ctx->double_buffering) {
936 convert_stop(run);
937 run->status = -EIO;
938 goto done;
941 if (ctx->next_tile == ctx->num_tiles) {
943 * the conversion is complete
945 convert_stop(run);
946 run->status = 0;
947 goto done;
951 * not done, place the next tile buffers.
953 if (!ctx->double_buffering) {
955 src_tile = &s_image->tile[ctx->next_tile];
956 dst_idx = ctx->out_tile_map[ctx->next_tile];
957 dst_tile = &d_image->tile[dst_idx];
959 ipu_cpmem_set_buffer(chan->in_chan, 0,
960 s_image->base.phys0 + src_tile->offset);
961 ipu_cpmem_set_buffer(outch, 0,
962 d_image->base.phys0 + dst_tile->offset);
963 if (s_image->fmt->planar)
964 ipu_cpmem_set_uv_offset(chan->in_chan,
965 src_tile->u_off,
966 src_tile->v_off);
967 if (d_image->fmt->planar)
968 ipu_cpmem_set_uv_offset(outch,
969 dst_tile->u_off,
970 dst_tile->v_off);
972 ipu_idmac_select_buffer(chan->in_chan, 0);
973 ipu_idmac_select_buffer(outch, 0);
975 } else if (ctx->next_tile < ctx->num_tiles - 1) {
977 src_tile = &s_image->tile[ctx->next_tile + 1];
978 dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
979 dst_tile = &d_image->tile[dst_idx];
981 ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
982 s_image->base.phys0 + src_tile->offset);
983 ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
984 d_image->base.phys0 + dst_tile->offset);
986 ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
987 ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
989 ctx->cur_buf_num ^= 1;
992 ctx->next_tile++;
993 return IRQ_HANDLED;
994 done:
995 list_add_tail(&run->list, &chan->done_q);
996 chan->current_run = NULL;
997 run_next(chan);
998 return IRQ_WAKE_THREAD;
1001 static irqreturn_t norotate_irq(int irq, void *data)
1003 struct ipu_image_convert_chan *chan = data;
1004 struct ipu_image_convert_ctx *ctx;
1005 struct ipu_image_convert_run *run;
1006 unsigned long flags;
1007 irqreturn_t ret;
1009 spin_lock_irqsave(&chan->irqlock, flags);
1011 /* get current run and its context */
1012 run = chan->current_run;
1013 if (!run) {
1014 ret = IRQ_NONE;
1015 goto out;
1018 ctx = run->ctx;
1020 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1021 /* this is a rotation operation, just ignore */
1022 spin_unlock_irqrestore(&chan->irqlock, flags);
1023 return IRQ_HANDLED;
1026 ret = do_irq(run);
1027 out:
1028 spin_unlock_irqrestore(&chan->irqlock, flags);
1029 return ret;
1032 static irqreturn_t rotate_irq(int irq, void *data)
1034 struct ipu_image_convert_chan *chan = data;
1035 struct ipu_image_convert_priv *priv = chan->priv;
1036 struct ipu_image_convert_ctx *ctx;
1037 struct ipu_image_convert_run *run;
1038 unsigned long flags;
1039 irqreturn_t ret;
1041 spin_lock_irqsave(&chan->irqlock, flags);
1043 /* get current run and its context */
1044 run = chan->current_run;
1045 if (!run) {
1046 ret = IRQ_NONE;
1047 goto out;
1050 ctx = run->ctx;
1052 if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
1053 /* this was NOT a rotation operation, shouldn't happen */
1054 dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
1055 spin_unlock_irqrestore(&chan->irqlock, flags);
1056 return IRQ_HANDLED;
1059 ret = do_irq(run);
1060 out:
1061 spin_unlock_irqrestore(&chan->irqlock, flags);
1062 return ret;
1066 * try to force the completion of runs for this ctx. Called when
1067 * abort wait times out in ipu_image_convert_abort().
1069 static void force_abort(struct ipu_image_convert_ctx *ctx)
1071 struct ipu_image_convert_chan *chan = ctx->chan;
1072 struct ipu_image_convert_run *run;
1073 unsigned long flags;
1075 spin_lock_irqsave(&chan->irqlock, flags);
1077 run = chan->current_run;
1078 if (run && run->ctx == ctx) {
1079 convert_stop(run);
1080 run->status = -EIO;
1081 list_add_tail(&run->list, &chan->done_q);
1082 chan->current_run = NULL;
1083 run_next(chan);
1086 spin_unlock_irqrestore(&chan->irqlock, flags);
1088 empty_done_q(chan);
1091 static void release_ipu_resources(struct ipu_image_convert_chan *chan)
1093 if (chan->out_eof_irq >= 0)
1094 free_irq(chan->out_eof_irq, chan);
1095 if (chan->rot_out_eof_irq >= 0)
1096 free_irq(chan->rot_out_eof_irq, chan);
1098 if (!IS_ERR_OR_NULL(chan->in_chan))
1099 ipu_idmac_put(chan->in_chan);
1100 if (!IS_ERR_OR_NULL(chan->out_chan))
1101 ipu_idmac_put(chan->out_chan);
1102 if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
1103 ipu_idmac_put(chan->rotation_in_chan);
1104 if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
1105 ipu_idmac_put(chan->rotation_out_chan);
1106 if (!IS_ERR_OR_NULL(chan->ic))
1107 ipu_ic_put(chan->ic);
1109 chan->in_chan = chan->out_chan = chan->rotation_in_chan =
1110 chan->rotation_out_chan = NULL;
1111 chan->out_eof_irq = chan->rot_out_eof_irq = -1;
1114 static int get_ipu_resources(struct ipu_image_convert_chan *chan)
1116 const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
1117 struct ipu_image_convert_priv *priv = chan->priv;
1118 int ret;
1120 /* get IC */
1121 chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
1122 if (IS_ERR(chan->ic)) {
1123 dev_err(priv->ipu->dev, "could not acquire IC\n");
1124 ret = PTR_ERR(chan->ic);
1125 goto err;
1128 /* get IDMAC channels */
1129 chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
1130 chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
1131 if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
1132 dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
1133 ret = -EBUSY;
1134 goto err;
1137 chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
1138 chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
1139 if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
1140 dev_err(priv->ipu->dev,
1141 "could not acquire idmac rotation channels\n");
1142 ret = -EBUSY;
1143 goto err;
1146 /* acquire the EOF interrupts */
1147 chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1148 chan->out_chan,
1149 IPU_IRQ_EOF);
1151 ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
1152 0, "ipu-ic", chan);
1153 if (ret < 0) {
1154 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1155 chan->out_eof_irq);
1156 chan->out_eof_irq = -1;
1157 goto err;
1160 chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
1161 chan->rotation_out_chan,
1162 IPU_IRQ_EOF);
1164 ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
1165 0, "ipu-ic", chan);
1166 if (ret < 0) {
1167 dev_err(priv->ipu->dev, "could not acquire irq %d\n",
1168 chan->rot_out_eof_irq);
1169 chan->rot_out_eof_irq = -1;
1170 goto err;
1173 return 0;
1174 err:
1175 release_ipu_resources(chan);
1176 return ret;
1179 static int fill_image(struct ipu_image_convert_ctx *ctx,
1180 struct ipu_image_convert_image *ic_image,
1181 struct ipu_image *image,
1182 enum ipu_image_convert_type type)
1184 struct ipu_image_convert_priv *priv = ctx->chan->priv;
1186 ic_image->base = *image;
1187 ic_image->type = type;
1189 ic_image->fmt = get_format(image->pix.pixelformat);
1190 if (!ic_image->fmt) {
1191 dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
1192 type == IMAGE_CONVERT_OUT ? "Output" : "Input");
1193 return -EINVAL;
1196 if (ic_image->fmt->planar)
1197 ic_image->stride = ic_image->base.pix.width;
1198 else
1199 ic_image->stride = ic_image->base.pix.bytesperline;
1201 calc_tile_dimensions(ctx, ic_image);
1202 calc_tile_offsets(ctx, ic_image);
1204 return 0;
1207 /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
1208 static unsigned int clamp_align(unsigned int x, unsigned int min,
1209 unsigned int max, unsigned int align)
1211 /* Bits that must be zero to be aligned */
1212 unsigned int mask = ~((1 << align) - 1);
1214 /* Clamp to aligned min and max */
1215 x = clamp(x, (min + ~mask) & mask, max & mask);
1217 /* Round to nearest aligned value */
1218 if (align)
1219 x = (x + (1 << (align - 1))) & mask;
1221 return x;
1225 * We have to adjust the tile width such that the tile physaddrs and
1226 * U and V plane offsets are multiples of 8 bytes as required by
1227 * the IPU DMA Controller. For the planar formats, this corresponds
1228 * to a pixel alignment of 16 (but use a more formal equation since
1229 * the variables are available). For all the packed formats, 8 is
1230 * good enough.
1232 static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
1234 return fmt->planar ? 8 * fmt->uv_width_dec : 8;
1238 * For tile height alignment, we have to ensure that the output tile
1239 * heights are multiples of 8 lines if the IRT is required by the
1240 * given rotation mode (the IRT performs rotations on 8x8 blocks
1241 * at a time). If the IRT is not used, or for input image tiles,
1242 * 2 lines are good enough.
1244 static inline u32 tile_height_align(enum ipu_image_convert_type type,
1245 enum ipu_rotate_mode rot_mode)
1247 return (type == IMAGE_CONVERT_OUT &&
1248 ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
1251 /* Adjusts input/output images to IPU restrictions */
1252 void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
1253 enum ipu_rotate_mode rot_mode)
1255 const struct ipu_image_pixfmt *infmt, *outfmt;
1256 unsigned int num_in_rows, num_in_cols;
1257 unsigned int num_out_rows, num_out_cols;
1258 u32 w_align, h_align;
1260 infmt = get_format(in->pix.pixelformat);
1261 outfmt = get_format(out->pix.pixelformat);
1263 /* set some default pixel formats if needed */
1264 if (!infmt) {
1265 in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1266 infmt = get_format(V4L2_PIX_FMT_RGB24);
1268 if (!outfmt) {
1269 out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
1270 outfmt = get_format(V4L2_PIX_FMT_RGB24);
1273 /* image converter does not handle fields */
1274 in->pix.field = out->pix.field = V4L2_FIELD_NONE;
1276 /* resizer cannot downsize more than 4:1 */
1277 if (ipu_rot_mode_is_irt(rot_mode)) {
1278 out->pix.height = max_t(__u32, out->pix.height,
1279 in->pix.width / 4);
1280 out->pix.width = max_t(__u32, out->pix.width,
1281 in->pix.height / 4);
1282 } else {
1283 out->pix.width = max_t(__u32, out->pix.width,
1284 in->pix.width / 4);
1285 out->pix.height = max_t(__u32, out->pix.height,
1286 in->pix.height / 4);
1289 /* get tiling rows/cols from output format */
1290 num_out_rows = num_stripes(out->pix.height);
1291 num_out_cols = num_stripes(out->pix.width);
1292 if (ipu_rot_mode_is_irt(rot_mode)) {
1293 num_in_rows = num_out_cols;
1294 num_in_cols = num_out_rows;
1295 } else {
1296 num_in_rows = num_out_rows;
1297 num_in_cols = num_out_cols;
1300 /* align input width/height */
1301 w_align = ilog2(tile_width_align(infmt) * num_in_cols);
1302 h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
1303 num_in_rows);
1304 in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
1305 in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
1307 /* align output width/height */
1308 w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
1309 h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
1310 num_out_rows);
1311 out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
1312 out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
1314 /* set input/output strides and image sizes */
1315 in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
1316 in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
1317 out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
1318 out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
1320 EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
1323 * this is used by ipu_image_convert_prepare() to verify set input and
1324 * output images are valid before starting the conversion. Clients can
1325 * also call it before calling ipu_image_convert_prepare().
1327 int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
1328 enum ipu_rotate_mode rot_mode)
1330 struct ipu_image testin, testout;
1332 testin = *in;
1333 testout = *out;
1335 ipu_image_convert_adjust(&testin, &testout, rot_mode);
1337 if (testin.pix.width != in->pix.width ||
1338 testin.pix.height != in->pix.height ||
1339 testout.pix.width != out->pix.width ||
1340 testout.pix.height != out->pix.height)
1341 return -EINVAL;
1343 return 0;
1345 EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
1348 * Call ipu_image_convert_prepare() to prepare for the conversion of
1349 * given images and rotation mode. Returns a new conversion context.
1351 struct ipu_image_convert_ctx *
1352 ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1353 struct ipu_image *in, struct ipu_image *out,
1354 enum ipu_rotate_mode rot_mode,
1355 ipu_image_convert_cb_t complete,
1356 void *complete_context)
1358 struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
1359 struct ipu_image_convert_image *s_image, *d_image;
1360 struct ipu_image_convert_chan *chan;
1361 struct ipu_image_convert_ctx *ctx;
1362 unsigned long flags;
1363 bool get_res;
1364 int ret;
1366 if (!in || !out || !complete ||
1367 (ic_task != IC_TASK_VIEWFINDER &&
1368 ic_task != IC_TASK_POST_PROCESSOR))
1369 return ERR_PTR(-EINVAL);
1371 /* verify the in/out images before continuing */
1372 ret = ipu_image_convert_verify(in, out, rot_mode);
1373 if (ret) {
1374 dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
1375 __func__);
1376 return ERR_PTR(ret);
1379 chan = &priv->chan[ic_task];
1381 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1382 if (!ctx)
1383 return ERR_PTR(-ENOMEM);
1385 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
1386 chan->ic_task, ctx);
1388 ctx->chan = chan;
1389 init_completion(&ctx->aborted);
1391 s_image = &ctx->in;
1392 d_image = &ctx->out;
1394 /* set tiling and rotation */
1395 d_image->num_rows = num_stripes(out->pix.height);
1396 d_image->num_cols = num_stripes(out->pix.width);
1397 if (ipu_rot_mode_is_irt(rot_mode)) {
1398 s_image->num_rows = d_image->num_cols;
1399 s_image->num_cols = d_image->num_rows;
1400 } else {
1401 s_image->num_rows = d_image->num_rows;
1402 s_image->num_cols = d_image->num_cols;
1405 ctx->num_tiles = d_image->num_cols * d_image->num_rows;
1406 ctx->rot_mode = rot_mode;
1408 ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
1409 if (ret)
1410 goto out_free;
1411 ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
1412 if (ret)
1413 goto out_free;
1415 calc_out_tile_map(ctx);
1417 dump_format(ctx, s_image);
1418 dump_format(ctx, d_image);
1420 ctx->complete = complete;
1421 ctx->complete_context = complete_context;
1424 * Can we use double-buffering for this operation? If there is
1425 * only one tile (the whole image can be converted in a single
1426 * operation) there's no point in using double-buffering. Also,
1427 * the IPU's IDMAC channels allow only a single U and V plane
1428 * offset shared between both buffers, but these offsets change
1429 * for every tile, and therefore would have to be updated for
1430 * each buffer which is not possible. So double-buffering is
1431 * impossible when either the source or destination images are
1432 * a planar format (YUV420, YUV422P, etc.).
1434 ctx->double_buffering = (ctx->num_tiles > 1 &&
1435 !s_image->fmt->planar &&
1436 !d_image->fmt->planar);
1438 if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
1439 ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
1440 d_image->tile[0].size);
1441 if (ret)
1442 goto out_free;
1443 if (ctx->double_buffering) {
1444 ret = alloc_dma_buf(priv,
1445 &ctx->rot_intermediate[1],
1446 d_image->tile[0].size);
1447 if (ret)
1448 goto out_free_dmabuf0;
1452 spin_lock_irqsave(&chan->irqlock, flags);
1454 get_res = list_empty(&chan->ctx_list);
1456 list_add_tail(&ctx->list, &chan->ctx_list);
1458 spin_unlock_irqrestore(&chan->irqlock, flags);
1460 if (get_res) {
1461 ret = get_ipu_resources(chan);
1462 if (ret)
1463 goto out_free_dmabuf1;
1466 return ctx;
1468 out_free_dmabuf1:
1469 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1470 spin_lock_irqsave(&chan->irqlock, flags);
1471 list_del(&ctx->list);
1472 spin_unlock_irqrestore(&chan->irqlock, flags);
1473 out_free_dmabuf0:
1474 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1475 out_free:
1476 kfree(ctx);
1477 return ERR_PTR(ret);
1479 EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
1482 * Carry out a single image conversion run. Only the physaddr's of the input
1483 * and output image buffers are needed. The conversion context must have
1484 * been created previously with ipu_image_convert_prepare().
1486 int ipu_image_convert_queue(struct ipu_image_convert_run *run)
1488 struct ipu_image_convert_chan *chan;
1489 struct ipu_image_convert_priv *priv;
1490 struct ipu_image_convert_ctx *ctx;
1491 unsigned long flags;
1492 int ret = 0;
1494 if (!run || !run->ctx || !run->in_phys || !run->out_phys)
1495 return -EINVAL;
1497 ctx = run->ctx;
1498 chan = ctx->chan;
1499 priv = chan->priv;
1501 dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
1502 chan->ic_task, ctx, run);
1504 INIT_LIST_HEAD(&run->list);
1506 spin_lock_irqsave(&chan->irqlock, flags);
1508 if (ctx->aborting) {
1509 ret = -EIO;
1510 goto unlock;
1513 list_add_tail(&run->list, &chan->pending_q);
1515 if (!chan->current_run) {
1516 ret = do_run(run);
1517 if (ret)
1518 chan->current_run = NULL;
1520 unlock:
1521 spin_unlock_irqrestore(&chan->irqlock, flags);
1522 return ret;
1524 EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
1526 /* Abort any active or pending conversions for this context */
1527 void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
1529 struct ipu_image_convert_chan *chan = ctx->chan;
1530 struct ipu_image_convert_priv *priv = chan->priv;
1531 struct ipu_image_convert_run *run, *active_run, *tmp;
1532 unsigned long flags;
1533 int run_count, ret;
1534 bool need_abort;
1536 reinit_completion(&ctx->aborted);
1538 spin_lock_irqsave(&chan->irqlock, flags);
1540 /* move all remaining pending runs in this context to done_q */
1541 list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
1542 if (run->ctx != ctx)
1543 continue;
1544 run->status = -EIO;
1545 list_move_tail(&run->list, &chan->done_q);
1548 run_count = get_run_count(ctx, &chan->done_q);
1549 active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
1550 chan->current_run : NULL;
1552 need_abort = (run_count || active_run);
1554 ctx->aborting = need_abort;
1556 spin_unlock_irqrestore(&chan->irqlock, flags);
1558 if (!need_abort) {
1559 dev_dbg(priv->ipu->dev,
1560 "%s: task %u: no abort needed for ctx %p\n",
1561 __func__, chan->ic_task, ctx);
1562 return;
1565 dev_dbg(priv->ipu->dev,
1566 "%s: task %u: wait for completion: %d runs, active run %p\n",
1567 __func__, chan->ic_task, run_count, active_run);
1569 ret = wait_for_completion_timeout(&ctx->aborted,
1570 msecs_to_jiffies(10000));
1571 if (ret == 0) {
1572 dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
1573 force_abort(ctx);
1576 ctx->aborting = false;
1578 EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
1580 /* Unprepare image conversion context */
1581 void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
1583 struct ipu_image_convert_chan *chan = ctx->chan;
1584 struct ipu_image_convert_priv *priv = chan->priv;
1585 unsigned long flags;
1586 bool put_res;
1588 /* make sure no runs are hanging around */
1589 ipu_image_convert_abort(ctx);
1591 dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
1592 chan->ic_task, ctx);
1594 spin_lock_irqsave(&chan->irqlock, flags);
1596 list_del(&ctx->list);
1598 put_res = list_empty(&chan->ctx_list);
1600 spin_unlock_irqrestore(&chan->irqlock, flags);
1602 if (put_res)
1603 release_ipu_resources(chan);
1605 free_dma_buf(priv, &ctx->rot_intermediate[1]);
1606 free_dma_buf(priv, &ctx->rot_intermediate[0]);
1608 kfree(ctx);
1610 EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
1613 * "Canned" asynchronous single image conversion. Allocates and returns
1614 * a new conversion run. On successful return the caller must free the
1615 * run and call ipu_image_convert_unprepare() after conversion completes.
1617 struct ipu_image_convert_run *
1618 ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1619 struct ipu_image *in, struct ipu_image *out,
1620 enum ipu_rotate_mode rot_mode,
1621 ipu_image_convert_cb_t complete,
1622 void *complete_context)
1624 struct ipu_image_convert_ctx *ctx;
1625 struct ipu_image_convert_run *run;
1626 int ret;
1628 ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
1629 complete, complete_context);
1630 if (IS_ERR(ctx))
1631 return ERR_CAST(ctx);
1633 run = kzalloc(sizeof(*run), GFP_KERNEL);
1634 if (!run) {
1635 ipu_image_convert_unprepare(ctx);
1636 return ERR_PTR(-ENOMEM);
1639 run->ctx = ctx;
1640 run->in_phys = in->phys0;
1641 run->out_phys = out->phys0;
1643 ret = ipu_image_convert_queue(run);
1644 if (ret) {
1645 ipu_image_convert_unprepare(ctx);
1646 kfree(run);
1647 return ERR_PTR(ret);
1650 return run;
1652 EXPORT_SYMBOL_GPL(ipu_image_convert);
1654 /* "Canned" synchronous single image conversion */
1655 static void image_convert_sync_complete(struct ipu_image_convert_run *run,
1656 void *data)
1658 struct completion *comp = data;
1660 complete(comp);
1663 int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
1664 struct ipu_image *in, struct ipu_image *out,
1665 enum ipu_rotate_mode rot_mode)
1667 struct ipu_image_convert_run *run;
1668 struct completion comp;
1669 int ret;
1671 init_completion(&comp);
1673 run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
1674 image_convert_sync_complete, &comp);
1675 if (IS_ERR(run))
1676 return PTR_ERR(run);
1678 ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
1679 ret = (ret == 0) ? -ETIMEDOUT : 0;
1681 ipu_image_convert_unprepare(run->ctx);
1682 kfree(run);
1684 return ret;
1686 EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
1688 int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
1690 struct ipu_image_convert_priv *priv;
1691 int i;
1693 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1694 if (!priv)
1695 return -ENOMEM;
1697 ipu->image_convert_priv = priv;
1698 priv->ipu = ipu;
1700 for (i = 0; i < IC_NUM_TASKS; i++) {
1701 struct ipu_image_convert_chan *chan = &priv->chan[i];
1703 chan->ic_task = i;
1704 chan->priv = priv;
1705 chan->dma_ch = &image_convert_dma_chan[i];
1706 chan->out_eof_irq = -1;
1707 chan->rot_out_eof_irq = -1;
1709 spin_lock_init(&chan->irqlock);
1710 INIT_LIST_HEAD(&chan->ctx_list);
1711 INIT_LIST_HEAD(&chan->pending_q);
1712 INIT_LIST_HEAD(&chan->done_q);
1715 return 0;
1718 void ipu_image_convert_exit(struct ipu_soc *ipu)