4 * Copyright (c) 2013 Texas Instruments Inc.
6 * David Griego, <dagriego@biglakesoftware.com>
7 * Dale Farnsworth, <dale@farnsworth.org>
8 * Archit Taneja, <archit@ti.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/firmware.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/videodev2.h>
27 #include "vpdma_priv.h"
29 #define VPDMA_FIRMWARE "vpdma-1b8.bin"
31 const struct vpdma_data_format vpdma_yuv_fmts
[] = {
32 [VPDMA_DATA_FMT_Y444
] = {
33 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
34 .data_type
= DATA_TYPE_Y444
,
37 [VPDMA_DATA_FMT_Y422
] = {
38 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
39 .data_type
= DATA_TYPE_Y422
,
42 [VPDMA_DATA_FMT_Y420
] = {
43 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
44 .data_type
= DATA_TYPE_Y420
,
47 [VPDMA_DATA_FMT_C444
] = {
48 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
49 .data_type
= DATA_TYPE_C444
,
52 [VPDMA_DATA_FMT_C422
] = {
53 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
54 .data_type
= DATA_TYPE_C422
,
57 [VPDMA_DATA_FMT_C420
] = {
58 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
59 .data_type
= DATA_TYPE_C420
,
62 [VPDMA_DATA_FMT_YC422
] = {
63 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
64 .data_type
= DATA_TYPE_YC422
,
67 [VPDMA_DATA_FMT_YC444
] = {
68 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
69 .data_type
= DATA_TYPE_YC444
,
72 [VPDMA_DATA_FMT_CY422
] = {
73 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
74 .data_type
= DATA_TYPE_CY422
,
79 const struct vpdma_data_format vpdma_rgb_fmts
[] = {
80 [VPDMA_DATA_FMT_RGB565
] = {
81 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
82 .data_type
= DATA_TYPE_RGB16_565
,
85 [VPDMA_DATA_FMT_ARGB16_1555
] = {
86 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
87 .data_type
= DATA_TYPE_ARGB_1555
,
90 [VPDMA_DATA_FMT_ARGB16
] = {
91 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
92 .data_type
= DATA_TYPE_ARGB_4444
,
95 [VPDMA_DATA_FMT_RGBA16_5551
] = {
96 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
97 .data_type
= DATA_TYPE_RGBA_5551
,
100 [VPDMA_DATA_FMT_RGBA16
] = {
101 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
102 .data_type
= DATA_TYPE_RGBA_4444
,
105 [VPDMA_DATA_FMT_ARGB24
] = {
106 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
107 .data_type
= DATA_TYPE_ARGB24_6666
,
110 [VPDMA_DATA_FMT_RGB24
] = {
111 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
112 .data_type
= DATA_TYPE_RGB24_888
,
115 [VPDMA_DATA_FMT_ARGB32
] = {
116 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
117 .data_type
= DATA_TYPE_ARGB32_8888
,
120 [VPDMA_DATA_FMT_RGBA24
] = {
121 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
122 .data_type
= DATA_TYPE_RGBA24_6666
,
125 [VPDMA_DATA_FMT_RGBA32
] = {
126 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
127 .data_type
= DATA_TYPE_RGBA32_8888
,
130 [VPDMA_DATA_FMT_BGR565
] = {
131 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
132 .data_type
= DATA_TYPE_BGR16_565
,
135 [VPDMA_DATA_FMT_ABGR16_1555
] = {
136 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
137 .data_type
= DATA_TYPE_ABGR_1555
,
140 [VPDMA_DATA_FMT_ABGR16
] = {
141 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
142 .data_type
= DATA_TYPE_ABGR_4444
,
145 [VPDMA_DATA_FMT_BGRA16_5551
] = {
146 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
147 .data_type
= DATA_TYPE_BGRA_5551
,
150 [VPDMA_DATA_FMT_BGRA16
] = {
151 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
152 .data_type
= DATA_TYPE_BGRA_4444
,
155 [VPDMA_DATA_FMT_ABGR24
] = {
156 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
157 .data_type
= DATA_TYPE_ABGR24_6666
,
160 [VPDMA_DATA_FMT_BGR24
] = {
161 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
162 .data_type
= DATA_TYPE_BGR24_888
,
165 [VPDMA_DATA_FMT_ABGR32
] = {
166 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
167 .data_type
= DATA_TYPE_ABGR32_8888
,
170 [VPDMA_DATA_FMT_BGRA24
] = {
171 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
172 .data_type
= DATA_TYPE_BGRA24_6666
,
175 [VPDMA_DATA_FMT_BGRA32
] = {
176 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
177 .data_type
= DATA_TYPE_BGRA32_8888
,
182 const struct vpdma_data_format vpdma_misc_fmts
[] = {
183 [VPDMA_DATA_FMT_MV
] = {
184 .type
= VPDMA_DATA_FMT_TYPE_MISC
,
185 .data_type
= DATA_TYPE_MV
,
190 struct vpdma_channel_info
{
191 int num
; /* VPDMA channel number */
192 int cstat_offset
; /* client CSTAT register offset */
195 static const struct vpdma_channel_info chan_info
[] = {
196 [VPE_CHAN_LUMA1_IN
] = {
197 .num
= VPE_CHAN_NUM_LUMA1_IN
,
198 .cstat_offset
= VPDMA_DEI_LUMA1_CSTAT
,
200 [VPE_CHAN_CHROMA1_IN
] = {
201 .num
= VPE_CHAN_NUM_CHROMA1_IN
,
202 .cstat_offset
= VPDMA_DEI_CHROMA1_CSTAT
,
204 [VPE_CHAN_LUMA2_IN
] = {
205 .num
= VPE_CHAN_NUM_LUMA2_IN
,
206 .cstat_offset
= VPDMA_DEI_LUMA2_CSTAT
,
208 [VPE_CHAN_CHROMA2_IN
] = {
209 .num
= VPE_CHAN_NUM_CHROMA2_IN
,
210 .cstat_offset
= VPDMA_DEI_CHROMA2_CSTAT
,
212 [VPE_CHAN_LUMA3_IN
] = {
213 .num
= VPE_CHAN_NUM_LUMA3_IN
,
214 .cstat_offset
= VPDMA_DEI_LUMA3_CSTAT
,
216 [VPE_CHAN_CHROMA3_IN
] = {
217 .num
= VPE_CHAN_NUM_CHROMA3_IN
,
218 .cstat_offset
= VPDMA_DEI_CHROMA3_CSTAT
,
221 .num
= VPE_CHAN_NUM_MV_IN
,
222 .cstat_offset
= VPDMA_DEI_MV_IN_CSTAT
,
224 [VPE_CHAN_MV_OUT
] = {
225 .num
= VPE_CHAN_NUM_MV_OUT
,
226 .cstat_offset
= VPDMA_DEI_MV_OUT_CSTAT
,
228 [VPE_CHAN_LUMA_OUT
] = {
229 .num
= VPE_CHAN_NUM_LUMA_OUT
,
230 .cstat_offset
= VPDMA_VIP_UP_Y_CSTAT
,
232 [VPE_CHAN_CHROMA_OUT
] = {
233 .num
= VPE_CHAN_NUM_CHROMA_OUT
,
234 .cstat_offset
= VPDMA_VIP_UP_UV_CSTAT
,
236 [VPE_CHAN_RGB_OUT
] = {
237 .num
= VPE_CHAN_NUM_RGB_OUT
,
238 .cstat_offset
= VPDMA_VIP_UP_Y_CSTAT
,
242 static u32
read_reg(struct vpdma_data
*vpdma
, int offset
)
244 return ioread32(vpdma
->base
+ offset
);
247 static void write_reg(struct vpdma_data
*vpdma
, int offset
, u32 value
)
249 iowrite32(value
, vpdma
->base
+ offset
);
252 static int read_field_reg(struct vpdma_data
*vpdma
, int offset
,
255 return (read_reg(vpdma
, offset
) & (mask
<< shift
)) >> shift
;
258 static void write_field_reg(struct vpdma_data
*vpdma
, int offset
, u32 field
,
261 u32 val
= read_reg(vpdma
, offset
);
263 val
&= ~(mask
<< shift
);
264 val
|= (field
& mask
) << shift
;
266 write_reg(vpdma
, offset
, val
);
269 void vpdma_dump_regs(struct vpdma_data
*vpdma
)
271 struct device
*dev
= &vpdma
->pdev
->dev
;
273 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
275 dev_dbg(dev
, "VPDMA Registers:\n");
280 DUMPREG(LIST_STAT_SYNC
);
289 * dumping registers of only group0 and group3, because VPE channels
290 * lie within group0 and group3 registers
292 DUMPREG(INT_CHAN_STAT(0));
293 DUMPREG(INT_CHAN_MASK(0));
294 DUMPREG(INT_CHAN_STAT(3));
295 DUMPREG(INT_CHAN_MASK(3));
296 DUMPREG(INT_CLIENT0_STAT
);
297 DUMPREG(INT_CLIENT0_MASK
);
298 DUMPREG(INT_CLIENT1_STAT
);
299 DUMPREG(INT_CLIENT1_MASK
);
300 DUMPREG(INT_LIST0_STAT
);
301 DUMPREG(INT_LIST0_MASK
);
304 * these are registers specific to VPE clients, we can make this
305 * function dump client registers specific to VPE or VIP based on
308 DUMPREG(DEI_CHROMA1_CSTAT
);
309 DUMPREG(DEI_LUMA1_CSTAT
);
310 DUMPREG(DEI_CHROMA2_CSTAT
);
311 DUMPREG(DEI_LUMA2_CSTAT
);
312 DUMPREG(DEI_CHROMA3_CSTAT
);
313 DUMPREG(DEI_LUMA3_CSTAT
);
314 DUMPREG(DEI_MV_IN_CSTAT
);
315 DUMPREG(DEI_MV_OUT_CSTAT
);
316 DUMPREG(VIP_UP_Y_CSTAT
);
317 DUMPREG(VIP_UP_UV_CSTAT
);
318 DUMPREG(VPI_CTL_CSTAT
);
322 * Allocate a DMA buffer
324 int vpdma_alloc_desc_buf(struct vpdma_buf
*buf
, size_t size
)
328 buf
->addr
= kzalloc(size
, GFP_KERNEL
);
332 WARN_ON(((unsigned long)buf
->addr
& VPDMA_DESC_ALIGN
) != 0);
337 void vpdma_free_desc_buf(struct vpdma_buf
*buf
)
339 WARN_ON(buf
->mapped
);
346 * map descriptor/payload DMA buffer, enabling DMA access
348 int vpdma_map_desc_buf(struct vpdma_data
*vpdma
, struct vpdma_buf
*buf
)
350 struct device
*dev
= &vpdma
->pdev
->dev
;
352 WARN_ON(buf
->mapped
);
353 buf
->dma_addr
= dma_map_single(dev
, buf
->addr
, buf
->size
,
355 if (dma_mapping_error(dev
, buf
->dma_addr
)) {
356 dev_err(dev
, "failed to map buffer\n");
366 * unmap descriptor/payload DMA buffer, disabling DMA access and
367 * allowing the main processor to acces the data
369 void vpdma_unmap_desc_buf(struct vpdma_data
*vpdma
, struct vpdma_buf
*buf
)
371 struct device
*dev
= &vpdma
->pdev
->dev
;
374 dma_unmap_single(dev
, buf
->dma_addr
, buf
->size
, DMA_TO_DEVICE
);
380 * create a descriptor list, the user of this list will append configuration,
381 * control and data descriptors to this list, this list will be submitted to
382 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
383 * required DMA operations
385 int vpdma_create_desc_list(struct vpdma_desc_list
*list
, size_t size
, int type
)
389 r
= vpdma_alloc_desc_buf(&list
->buf
, size
);
393 list
->next
= list
->buf
.addr
;
401 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
402 * to allow new descriptors to be added to the list.
404 void vpdma_reset_desc_list(struct vpdma_desc_list
*list
)
406 list
->next
= list
->buf
.addr
;
410 * free the buffer allocated fot the VPDMA descriptor list, this should be
411 * called when the user doesn't want to use VPDMA any more.
413 void vpdma_free_desc_list(struct vpdma_desc_list
*list
)
415 vpdma_free_desc_buf(&list
->buf
);
420 static bool vpdma_list_busy(struct vpdma_data
*vpdma
, int list_num
)
422 return read_reg(vpdma
, VPDMA_LIST_STAT_SYNC
) & BIT(list_num
+ 16);
426 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
428 int vpdma_submit_descs(struct vpdma_data
*vpdma
, struct vpdma_desc_list
*list
)
430 /* we always use the first list */
434 if (vpdma_list_busy(vpdma
, list_num
))
437 /* 16-byte granularity */
438 list_size
= (list
->next
- list
->buf
.addr
) >> 4;
440 write_reg(vpdma
, VPDMA_LIST_ADDR
, (u32
) list
->buf
.dma_addr
);
442 write_reg(vpdma
, VPDMA_LIST_ATTR
,
443 (list_num
<< VPDMA_LIST_NUM_SHFT
) |
444 (list
->type
<< VPDMA_LIST_TYPE_SHFT
) |
450 static void dump_cfd(struct vpdma_cfd
*cfd
)
454 class = cfd_get_class(cfd
);
456 pr_debug("config descriptor of payload class: %s\n",
457 class == CFD_CLS_BLOCK
? "simple block" :
458 "address data block");
460 if (class == CFD_CLS_BLOCK
)
461 pr_debug("word0: dst_addr_offset = 0x%08x\n",
462 cfd
->dest_addr_offset
);
464 if (class == CFD_CLS_BLOCK
)
465 pr_debug("word1: num_data_wrds = %d\n", cfd
->block_len
);
467 pr_debug("word2: payload_addr = 0x%08x\n", cfd
->payload_addr
);
469 pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
470 "payload_len = %d\n", cfd_get_pkt_type(cfd
),
471 cfd_get_direct(cfd
), class, cfd_get_dest(cfd
),
472 cfd_get_payload_len(cfd
));
476 * append a configuration descriptor to the given descriptor list, where the
477 * payload is in the form of a simple data block specified in the descriptor
478 * header, this is used to upload scaler coefficients to the scaler module
480 void vpdma_add_cfd_block(struct vpdma_desc_list
*list
, int client
,
481 struct vpdma_buf
*blk
, u32 dest_offset
)
483 struct vpdma_cfd
*cfd
;
486 WARN_ON(blk
->dma_addr
& VPDMA_DESC_ALIGN
);
489 WARN_ON((void *)(cfd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
491 cfd
->dest_addr_offset
= dest_offset
;
492 cfd
->block_len
= len
;
493 cfd
->payload_addr
= (u32
) blk
->dma_addr
;
494 cfd
->ctl_payload_len
= cfd_pkt_payload_len(CFD_INDIRECT
, CFD_CLS_BLOCK
,
497 list
->next
= cfd
+ 1;
503 * append a configuration descriptor to the given descriptor list, where the
504 * payload is in the address data block format, this is used to a configure a
505 * discontiguous set of MMRs
507 void vpdma_add_cfd_adb(struct vpdma_desc_list
*list
, int client
,
508 struct vpdma_buf
*adb
)
510 struct vpdma_cfd
*cfd
;
511 unsigned int len
= adb
->size
;
513 WARN_ON(len
& VPDMA_ADB_SIZE_ALIGN
);
514 WARN_ON(adb
->dma_addr
& VPDMA_DESC_ALIGN
);
517 BUG_ON((void *)(cfd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
521 cfd
->payload_addr
= (u32
) adb
->dma_addr
;
522 cfd
->ctl_payload_len
= cfd_pkt_payload_len(CFD_INDIRECT
, CFD_CLS_ADB
,
525 list
->next
= cfd
+ 1;
531 * control descriptor format change based on what type of control descriptor it
532 * is, we only use 'sync on channel' control descriptors for now, so assume it's
535 static void dump_ctd(struct vpdma_ctd
*ctd
)
537 pr_debug("control descriptor\n");
539 pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
540 ctd_get_pkt_type(ctd
), ctd_get_source(ctd
), ctd_get_ctl(ctd
));
544 * append a 'sync on channel' type control descriptor to the given descriptor
545 * list, this descriptor stalls the VPDMA list till the time DMA is completed
546 * on the specified channel
548 void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list
*list
,
549 enum vpdma_channel chan
)
551 struct vpdma_ctd
*ctd
;
554 WARN_ON((void *)(ctd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
559 ctd
->type_source_ctl
= ctd_type_source_ctl(chan_info
[chan
].num
,
560 CTD_TYPE_SYNC_ON_CHANNEL
);
562 list
->next
= ctd
+ 1;
567 static void dump_dtd(struct vpdma_dtd
*dtd
)
571 dir
= dtd_get_dir(dtd
);
572 chan
= dtd_get_chan(dtd
);
574 pr_debug("%s data transfer descriptor for channel %d\n",
575 dir
== DTD_DIR_OUT
? "outbound" : "inbound", chan
);
577 pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
578 "even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
579 dtd_get_data_type(dtd
), dtd_get_notify(dtd
), dtd_get_field(dtd
),
580 dtd_get_1d(dtd
), dtd_get_even_line_skip(dtd
),
581 dtd_get_odd_line_skip(dtd
), dtd_get_line_stride(dtd
));
583 if (dir
== DTD_DIR_IN
)
584 pr_debug("word1: line_length = %d, xfer_height = %d\n",
585 dtd_get_line_length(dtd
), dtd_get_xfer_height(dtd
));
587 pr_debug("word2: start_addr = %pad\n", &dtd
->start_addr
);
589 pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
590 "pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd
),
591 dtd_get_mode(dtd
), dir
, chan
, dtd_get_priority(dtd
),
592 dtd_get_next_chan(dtd
));
594 if (dir
== DTD_DIR_IN
)
595 pr_debug("word4: frame_width = %d, frame_height = %d\n",
596 dtd_get_frame_width(dtd
), dtd_get_frame_height(dtd
));
598 pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
599 "drp_data = %d, use_desc_reg = %d\n",
600 dtd_get_desc_write_addr(dtd
), dtd_get_write_desc(dtd
),
601 dtd_get_drop_data(dtd
), dtd_get_use_desc(dtd
));
603 if (dir
== DTD_DIR_IN
)
604 pr_debug("word5: hor_start = %d, ver_start = %d\n",
605 dtd_get_h_start(dtd
), dtd_get_v_start(dtd
));
607 pr_debug("word5: max_width %d, max_height %d\n",
608 dtd_get_max_width(dtd
), dtd_get_max_height(dtd
));
610 pr_debug("word6: client specific attr0 = 0x%08x\n", dtd
->client_attr0
);
611 pr_debug("word7: client specific attr1 = 0x%08x\n", dtd
->client_attr1
);
615 * append an outbound data transfer descriptor to the given descriptor list,
616 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
618 * @list: vpdma desc list to which we add this decriptor
619 * @width: width of the image in pixels in memory
620 * @c_rect: compose params of output image
621 * @fmt: vpdma data format of the buffer
622 * dma_addr: dma address as seen by VPDMA
623 * chan: VPDMA channel
624 * flags: VPDMA flags to configure some descriptor fileds
626 void vpdma_add_out_dtd(struct vpdma_desc_list
*list
, int width
,
627 const struct v4l2_rect
*c_rect
,
628 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
629 enum vpdma_channel chan
, u32 flags
)
634 int channel
, next_chan
;
635 struct v4l2_rect rect
= *c_rect
;
636 int depth
= fmt
->depth
;
638 struct vpdma_dtd
*dtd
;
640 channel
= next_chan
= chan_info
[chan
].num
;
642 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
&&
643 fmt
->data_type
== DATA_TYPE_C420
) {
649 stride
= ALIGN((depth
* width
) >> 3, VPDMA_STRIDE_ALIGN
);
651 dma_addr
+= rect
.top
* stride
+ (rect
.left
* depth
>> 3);
654 WARN_ON((void *)(dtd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
656 dtd
->type_ctl_stride
= dtd_type_ctl_stride(fmt
->data_type
,
659 !!(flags
& VPDMA_DATA_FRAME_1D
),
660 !!(flags
& VPDMA_DATA_EVEN_LINE_SKIP
),
661 !!(flags
& VPDMA_DATA_ODD_LINE_SKIP
),
664 dtd
->start_addr
= (u32
) dma_addr
;
665 dtd
->pkt_ctl
= dtd_pkt_ctl(!!(flags
& VPDMA_DATA_MODE_TILED
),
666 DTD_DIR_OUT
, channel
, priority
, next_chan
);
667 dtd
->desc_write_addr
= dtd_desc_write_addr(0, 0, 0, 0);
668 dtd
->max_width_height
= dtd_max_width_height(MAX_OUT_WIDTH_1920
,
669 MAX_OUT_HEIGHT_1080
);
670 dtd
->client_attr0
= 0;
671 dtd
->client_attr1
= 0;
673 list
->next
= dtd
+ 1;
679 * append an inbound data transfer descriptor to the given descriptor list,
680 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
682 * @list: vpdma desc list to which we add this decriptor
683 * @width: width of the image in pixels in memory(not the cropped width)
684 * @c_rect: crop params of input image
685 * @fmt: vpdma data format of the buffer
686 * dma_addr: dma address as seen by VPDMA
687 * chan: VPDMA channel
688 * field: top or bottom field info of the input image
689 * flags: VPDMA flags to configure some descriptor fileds
690 * frame_width/height: the complete width/height of the image presented to the
691 * client (this makes sense when multiple channels are
692 * connected to the same client, forming a larger frame)
693 * start_h, start_v: position where the given channel starts providing pixel
694 * data to the client (makes sense when multiple channels
695 * contribute to the client)
697 void vpdma_add_in_dtd(struct vpdma_desc_list
*list
, int width
,
698 const struct v4l2_rect
*c_rect
,
699 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
700 enum vpdma_channel chan
, int field
, u32 flags
, int frame_width
,
701 int frame_height
, int start_h
, int start_v
)
705 int depth
= fmt
->depth
;
706 int channel
, next_chan
;
707 struct v4l2_rect rect
= *c_rect
;
709 struct vpdma_dtd
*dtd
;
711 channel
= next_chan
= chan_info
[chan
].num
;
713 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
&&
714 fmt
->data_type
== DATA_TYPE_C420
) {
720 stride
= ALIGN((depth
* width
) >> 3, VPDMA_STRIDE_ALIGN
);
722 dma_addr
+= rect
.top
* stride
+ (rect
.left
* depth
>> 3);
725 WARN_ON((void *)(dtd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
727 dtd
->type_ctl_stride
= dtd_type_ctl_stride(fmt
->data_type
,
730 !!(flags
& VPDMA_DATA_FRAME_1D
),
731 !!(flags
& VPDMA_DATA_EVEN_LINE_SKIP
),
732 !!(flags
& VPDMA_DATA_ODD_LINE_SKIP
),
735 dtd
->xfer_length_height
= dtd_xfer_length_height(rect
.width
,
737 dtd
->start_addr
= (u32
) dma_addr
;
738 dtd
->pkt_ctl
= dtd_pkt_ctl(!!(flags
& VPDMA_DATA_MODE_TILED
),
739 DTD_DIR_IN
, channel
, priority
, next_chan
);
740 dtd
->frame_width_height
= dtd_frame_width_height(frame_width
,
742 dtd
->start_h_v
= dtd_start_h_v(start_h
, start_v
);
743 dtd
->client_attr0
= 0;
744 dtd
->client_attr1
= 0;
746 list
->next
= dtd
+ 1;
751 /* set or clear the mask for list complete interrupt */
752 void vpdma_enable_list_complete_irq(struct vpdma_data
*vpdma
, int list_num
,
757 val
= read_reg(vpdma
, VPDMA_INT_LIST0_MASK
);
759 val
|= (1 << (list_num
* 2));
761 val
&= ~(1 << (list_num
* 2));
762 write_reg(vpdma
, VPDMA_INT_LIST0_MASK
, val
);
765 /* clear previosuly occured list intterupts in the LIST_STAT register */
766 void vpdma_clear_list_stat(struct vpdma_data
*vpdma
)
768 write_reg(vpdma
, VPDMA_INT_LIST0_STAT
,
769 read_reg(vpdma
, VPDMA_INT_LIST0_STAT
));
773 * configures the output mode of the line buffer for the given client, the
774 * line buffer content can either be mirrored(each line repeated twice) or
775 * passed to the client as is
777 void vpdma_set_line_mode(struct vpdma_data
*vpdma
, int line_mode
,
778 enum vpdma_channel chan
)
780 int client_cstat
= chan_info
[chan
].cstat_offset
;
782 write_field_reg(vpdma
, client_cstat
, line_mode
,
783 VPDMA_CSTAT_LINE_MODE_MASK
, VPDMA_CSTAT_LINE_MODE_SHIFT
);
787 * configures the event which should trigger VPDMA transfer for the given
790 void vpdma_set_frame_start_event(struct vpdma_data
*vpdma
,
791 enum vpdma_frame_start_event fs_event
,
792 enum vpdma_channel chan
)
794 int client_cstat
= chan_info
[chan
].cstat_offset
;
796 write_field_reg(vpdma
, client_cstat
, fs_event
,
797 VPDMA_CSTAT_FRAME_START_MASK
, VPDMA_CSTAT_FRAME_START_SHIFT
);
800 static void vpdma_firmware_cb(const struct firmware
*f
, void *context
)
802 struct vpdma_data
*vpdma
= context
;
803 struct vpdma_buf fw_dma_buf
;
806 dev_dbg(&vpdma
->pdev
->dev
, "firmware callback\n");
808 if (!f
|| !f
->data
) {
809 dev_err(&vpdma
->pdev
->dev
, "couldn't get firmware\n");
813 /* already initialized */
814 if (read_field_reg(vpdma
, VPDMA_LIST_ATTR
, VPDMA_LIST_RDY_MASK
,
815 VPDMA_LIST_RDY_SHFT
)) {
816 vpdma
->cb(vpdma
->pdev
);
820 r
= vpdma_alloc_desc_buf(&fw_dma_buf
, f
->size
);
822 dev_err(&vpdma
->pdev
->dev
,
823 "failed to allocate dma buffer for firmware\n");
827 memcpy(fw_dma_buf
.addr
, f
->data
, f
->size
);
829 vpdma_map_desc_buf(vpdma
, &fw_dma_buf
);
831 write_reg(vpdma
, VPDMA_LIST_ADDR
, (u32
) fw_dma_buf
.dma_addr
);
833 for (i
= 0; i
< 100; i
++) { /* max 1 second */
834 msleep_interruptible(10);
836 if (read_field_reg(vpdma
, VPDMA_LIST_ATTR
, VPDMA_LIST_RDY_MASK
,
837 VPDMA_LIST_RDY_SHFT
))
842 dev_err(&vpdma
->pdev
->dev
, "firmware upload failed\n");
846 vpdma
->cb(vpdma
->pdev
);
849 vpdma_unmap_desc_buf(vpdma
, &fw_dma_buf
);
851 vpdma_free_desc_buf(&fw_dma_buf
);
856 static int vpdma_load_firmware(struct vpdma_data
*vpdma
)
859 struct device
*dev
= &vpdma
->pdev
->dev
;
861 r
= request_firmware_nowait(THIS_MODULE
, 1,
862 (const char *) VPDMA_FIRMWARE
, dev
, GFP_KERNEL
, vpdma
,
865 dev_err(dev
, "firmware not available %s\n", VPDMA_FIRMWARE
);
868 dev_info(dev
, "loading firmware %s\n", VPDMA_FIRMWARE
);
874 struct vpdma_data
*vpdma_create(struct platform_device
*pdev
,
875 void (*cb
)(struct platform_device
*pdev
))
877 struct resource
*res
;
878 struct vpdma_data
*vpdma
;
881 dev_dbg(&pdev
->dev
, "vpdma_create\n");
883 vpdma
= devm_kzalloc(&pdev
->dev
, sizeof(*vpdma
), GFP_KERNEL
);
885 dev_err(&pdev
->dev
, "couldn't alloc vpdma_dev\n");
886 return ERR_PTR(-ENOMEM
);
892 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "vpdma");
894 dev_err(&pdev
->dev
, "missing platform resources data\n");
895 return ERR_PTR(-ENODEV
);
898 vpdma
->base
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
900 dev_err(&pdev
->dev
, "failed to ioremap\n");
901 return ERR_PTR(-ENOMEM
);
904 r
= vpdma_load_firmware(vpdma
);
906 pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE
);
912 MODULE_FIRMWARE(VPDMA_FIRMWARE
);