4 * Copyright (c) 2013 Texas Instruments Inc.
6 * David Griego, <dagriego@biglakesoftware.com>
7 * Dale Farnsworth, <dale@farnsworth.org>
8 * Archit Taneja, <archit@ti.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/firmware.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/videodev2.h>
27 #include "vpdma_priv.h"
29 #define VPDMA_FIRMWARE "vpdma-1b8.bin"
31 const struct vpdma_data_format vpdma_yuv_fmts
[] = {
32 [VPDMA_DATA_FMT_Y444
] = {
33 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
34 .data_type
= DATA_TYPE_Y444
,
37 [VPDMA_DATA_FMT_Y422
] = {
38 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
39 .data_type
= DATA_TYPE_Y422
,
42 [VPDMA_DATA_FMT_Y420
] = {
43 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
44 .data_type
= DATA_TYPE_Y420
,
47 [VPDMA_DATA_FMT_C444
] = {
48 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
49 .data_type
= DATA_TYPE_C444
,
52 [VPDMA_DATA_FMT_C422
] = {
53 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
54 .data_type
= DATA_TYPE_C422
,
57 [VPDMA_DATA_FMT_C420
] = {
58 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
59 .data_type
= DATA_TYPE_C420
,
62 [VPDMA_DATA_FMT_YCR422
] = {
63 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
64 .data_type
= DATA_TYPE_YCR422
,
67 [VPDMA_DATA_FMT_YC444
] = {
68 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
69 .data_type
= DATA_TYPE_YC444
,
72 [VPDMA_DATA_FMT_CRY422
] = {
73 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
74 .data_type
= DATA_TYPE_CRY422
,
77 [VPDMA_DATA_FMT_CBY422
] = {
78 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
79 .data_type
= DATA_TYPE_CBY422
,
82 [VPDMA_DATA_FMT_YCB422
] = {
83 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
84 .data_type
= DATA_TYPE_YCB422
,
88 EXPORT_SYMBOL(vpdma_yuv_fmts
);
90 const struct vpdma_data_format vpdma_rgb_fmts
[] = {
91 [VPDMA_DATA_FMT_RGB565
] = {
92 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
93 .data_type
= DATA_TYPE_RGB16_565
,
96 [VPDMA_DATA_FMT_ARGB16_1555
] = {
97 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
98 .data_type
= DATA_TYPE_ARGB_1555
,
101 [VPDMA_DATA_FMT_ARGB16
] = {
102 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
103 .data_type
= DATA_TYPE_ARGB_4444
,
106 [VPDMA_DATA_FMT_RGBA16_5551
] = {
107 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
108 .data_type
= DATA_TYPE_RGBA_5551
,
111 [VPDMA_DATA_FMT_RGBA16
] = {
112 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
113 .data_type
= DATA_TYPE_RGBA_4444
,
116 [VPDMA_DATA_FMT_ARGB24
] = {
117 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
118 .data_type
= DATA_TYPE_ARGB24_6666
,
121 [VPDMA_DATA_FMT_RGB24
] = {
122 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
123 .data_type
= DATA_TYPE_RGB24_888
,
126 [VPDMA_DATA_FMT_ARGB32
] = {
127 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
128 .data_type
= DATA_TYPE_ARGB32_8888
,
131 [VPDMA_DATA_FMT_RGBA24
] = {
132 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
133 .data_type
= DATA_TYPE_RGBA24_6666
,
136 [VPDMA_DATA_FMT_RGBA32
] = {
137 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
138 .data_type
= DATA_TYPE_RGBA32_8888
,
141 [VPDMA_DATA_FMT_BGR565
] = {
142 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
143 .data_type
= DATA_TYPE_BGR16_565
,
146 [VPDMA_DATA_FMT_ABGR16_1555
] = {
147 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
148 .data_type
= DATA_TYPE_ABGR_1555
,
151 [VPDMA_DATA_FMT_ABGR16
] = {
152 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
153 .data_type
= DATA_TYPE_ABGR_4444
,
156 [VPDMA_DATA_FMT_BGRA16_5551
] = {
157 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
158 .data_type
= DATA_TYPE_BGRA_5551
,
161 [VPDMA_DATA_FMT_BGRA16
] = {
162 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
163 .data_type
= DATA_TYPE_BGRA_4444
,
166 [VPDMA_DATA_FMT_ABGR24
] = {
167 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
168 .data_type
= DATA_TYPE_ABGR24_6666
,
171 [VPDMA_DATA_FMT_BGR24
] = {
172 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
173 .data_type
= DATA_TYPE_BGR24_888
,
176 [VPDMA_DATA_FMT_ABGR32
] = {
177 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
178 .data_type
= DATA_TYPE_ABGR32_8888
,
181 [VPDMA_DATA_FMT_BGRA24
] = {
182 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
183 .data_type
= DATA_TYPE_BGRA24_6666
,
186 [VPDMA_DATA_FMT_BGRA32
] = {
187 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
188 .data_type
= DATA_TYPE_BGRA32_8888
,
192 EXPORT_SYMBOL(vpdma_rgb_fmts
);
195 * To handle RAW format we are re-using the CBY422
196 * vpdma data type so that we use the vpdma to re-order
197 * the incoming bytes, as the parser assumes that the
198 * first byte presented on the bus is the MSB of a 2
200 * RAW8 handles from 1 to 8 bits
201 * RAW16 handles from 9 to 16 bits
203 const struct vpdma_data_format vpdma_raw_fmts
[] = {
204 [VPDMA_DATA_FMT_RAW8
] = {
205 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
206 .data_type
= DATA_TYPE_CBY422
,
209 [VPDMA_DATA_FMT_RAW16
] = {
210 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
211 .data_type
= DATA_TYPE_CBY422
,
215 EXPORT_SYMBOL(vpdma_raw_fmts
);
217 const struct vpdma_data_format vpdma_misc_fmts
[] = {
218 [VPDMA_DATA_FMT_MV
] = {
219 .type
= VPDMA_DATA_FMT_TYPE_MISC
,
220 .data_type
= DATA_TYPE_MV
,
224 EXPORT_SYMBOL(vpdma_misc_fmts
);
226 struct vpdma_channel_info
{
227 int num
; /* VPDMA channel number */
228 int cstat_offset
; /* client CSTAT register offset */
231 static const struct vpdma_channel_info chan_info
[] = {
232 [VPE_CHAN_LUMA1_IN
] = {
233 .num
= VPE_CHAN_NUM_LUMA1_IN
,
234 .cstat_offset
= VPDMA_DEI_LUMA1_CSTAT
,
236 [VPE_CHAN_CHROMA1_IN
] = {
237 .num
= VPE_CHAN_NUM_CHROMA1_IN
,
238 .cstat_offset
= VPDMA_DEI_CHROMA1_CSTAT
,
240 [VPE_CHAN_LUMA2_IN
] = {
241 .num
= VPE_CHAN_NUM_LUMA2_IN
,
242 .cstat_offset
= VPDMA_DEI_LUMA2_CSTAT
,
244 [VPE_CHAN_CHROMA2_IN
] = {
245 .num
= VPE_CHAN_NUM_CHROMA2_IN
,
246 .cstat_offset
= VPDMA_DEI_CHROMA2_CSTAT
,
248 [VPE_CHAN_LUMA3_IN
] = {
249 .num
= VPE_CHAN_NUM_LUMA3_IN
,
250 .cstat_offset
= VPDMA_DEI_LUMA3_CSTAT
,
252 [VPE_CHAN_CHROMA3_IN
] = {
253 .num
= VPE_CHAN_NUM_CHROMA3_IN
,
254 .cstat_offset
= VPDMA_DEI_CHROMA3_CSTAT
,
257 .num
= VPE_CHAN_NUM_MV_IN
,
258 .cstat_offset
= VPDMA_DEI_MV_IN_CSTAT
,
260 [VPE_CHAN_MV_OUT
] = {
261 .num
= VPE_CHAN_NUM_MV_OUT
,
262 .cstat_offset
= VPDMA_DEI_MV_OUT_CSTAT
,
264 [VPE_CHAN_LUMA_OUT
] = {
265 .num
= VPE_CHAN_NUM_LUMA_OUT
,
266 .cstat_offset
= VPDMA_VIP_UP_Y_CSTAT
,
268 [VPE_CHAN_CHROMA_OUT
] = {
269 .num
= VPE_CHAN_NUM_CHROMA_OUT
,
270 .cstat_offset
= VPDMA_VIP_UP_UV_CSTAT
,
272 [VPE_CHAN_RGB_OUT
] = {
273 .num
= VPE_CHAN_NUM_RGB_OUT
,
274 .cstat_offset
= VPDMA_VIP_UP_Y_CSTAT
,
278 static u32
read_reg(struct vpdma_data
*vpdma
, int offset
)
280 return ioread32(vpdma
->base
+ offset
);
283 static void write_reg(struct vpdma_data
*vpdma
, int offset
, u32 value
)
285 iowrite32(value
, vpdma
->base
+ offset
);
288 static int read_field_reg(struct vpdma_data
*vpdma
, int offset
,
291 return (read_reg(vpdma
, offset
) & (mask
<< shift
)) >> shift
;
294 static void write_field_reg(struct vpdma_data
*vpdma
, int offset
, u32 field
,
297 u32 val
= read_reg(vpdma
, offset
);
299 val
&= ~(mask
<< shift
);
300 val
|= (field
& mask
) << shift
;
302 write_reg(vpdma
, offset
, val
);
305 void vpdma_dump_regs(struct vpdma_data
*vpdma
)
307 struct device
*dev
= &vpdma
->pdev
->dev
;
309 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
311 dev_dbg(dev
, "VPDMA Registers:\n");
316 DUMPREG(LIST_STAT_SYNC
);
325 * dumping registers of only group0 and group3, because VPE channels
326 * lie within group0 and group3 registers
328 DUMPREG(INT_CHAN_STAT(0));
329 DUMPREG(INT_CHAN_MASK(0));
330 DUMPREG(INT_CHAN_STAT(3));
331 DUMPREG(INT_CHAN_MASK(3));
332 DUMPREG(INT_CLIENT0_STAT
);
333 DUMPREG(INT_CLIENT0_MASK
);
334 DUMPREG(INT_CLIENT1_STAT
);
335 DUMPREG(INT_CLIENT1_MASK
);
336 DUMPREG(INT_LIST0_STAT
);
337 DUMPREG(INT_LIST0_MASK
);
340 * these are registers specific to VPE clients, we can make this
341 * function dump client registers specific to VPE or VIP based on
344 DUMPREG(DEI_CHROMA1_CSTAT
);
345 DUMPREG(DEI_LUMA1_CSTAT
);
346 DUMPREG(DEI_CHROMA2_CSTAT
);
347 DUMPREG(DEI_LUMA2_CSTAT
);
348 DUMPREG(DEI_CHROMA3_CSTAT
);
349 DUMPREG(DEI_LUMA3_CSTAT
);
350 DUMPREG(DEI_MV_IN_CSTAT
);
351 DUMPREG(DEI_MV_OUT_CSTAT
);
352 DUMPREG(VIP_UP_Y_CSTAT
);
353 DUMPREG(VIP_UP_UV_CSTAT
);
354 DUMPREG(VPI_CTL_CSTAT
);
356 EXPORT_SYMBOL(vpdma_dump_regs
);
359 * Allocate a DMA buffer
361 int vpdma_alloc_desc_buf(struct vpdma_buf
*buf
, size_t size
)
365 buf
->addr
= kzalloc(size
, GFP_KERNEL
);
369 WARN_ON(((unsigned long)buf
->addr
& VPDMA_DESC_ALIGN
) != 0);
373 EXPORT_SYMBOL(vpdma_alloc_desc_buf
);
375 void vpdma_free_desc_buf(struct vpdma_buf
*buf
)
377 WARN_ON(buf
->mapped
);
382 EXPORT_SYMBOL(vpdma_free_desc_buf
);
385 * map descriptor/payload DMA buffer, enabling DMA access
387 int vpdma_map_desc_buf(struct vpdma_data
*vpdma
, struct vpdma_buf
*buf
)
389 struct device
*dev
= &vpdma
->pdev
->dev
;
391 WARN_ON(buf
->mapped
);
392 buf
->dma_addr
= dma_map_single(dev
, buf
->addr
, buf
->size
,
394 if (dma_mapping_error(dev
, buf
->dma_addr
)) {
395 dev_err(dev
, "failed to map buffer\n");
403 EXPORT_SYMBOL(vpdma_map_desc_buf
);
406 * unmap descriptor/payload DMA buffer, disabling DMA access and
407 * allowing the main processor to acces the data
409 void vpdma_unmap_desc_buf(struct vpdma_data
*vpdma
, struct vpdma_buf
*buf
)
411 struct device
*dev
= &vpdma
->pdev
->dev
;
414 dma_unmap_single(dev
, buf
->dma_addr
, buf
->size
,
419 EXPORT_SYMBOL(vpdma_unmap_desc_buf
);
422 * Cleanup all pending descriptors of a list
423 * First, stop the current list being processed.
424 * If the VPDMA was busy, this step makes vpdma to accept post lists.
425 * To cleanup the internal FSM, post abort list descriptor for all the
426 * channels from @channels array of size @size.
428 int vpdma_list_cleanup(struct vpdma_data
*vpdma
, int list_num
,
429 int *channels
, int size
)
431 struct vpdma_desc_list abort_list
;
432 int i
, ret
, timeout
= 500;
434 write_reg(vpdma
, VPDMA_LIST_ATTR
,
435 (list_num
<< VPDMA_LIST_NUM_SHFT
) |
436 (1 << VPDMA_LIST_STOP_SHFT
));
438 if (size
<= 0 || !channels
)
441 ret
= vpdma_create_desc_list(&abort_list
,
442 size
* sizeof(struct vpdma_dtd
), VPDMA_LIST_TYPE_NORMAL
);
446 for (i
= 0; i
< size
; i
++)
447 vpdma_add_abort_channel_ctd(&abort_list
, channels
[i
]);
449 ret
= vpdma_map_desc_buf(vpdma
, &abort_list
.buf
);
452 ret
= vpdma_submit_descs(vpdma
, &abort_list
, list_num
);
456 while (vpdma_list_busy(vpdma
, list_num
) && --timeout
)
460 dev_err(&vpdma
->pdev
->dev
, "Timed out cleaning up VPDMA list\n");
464 vpdma_unmap_desc_buf(vpdma
, &abort_list
.buf
);
465 vpdma_free_desc_buf(&abort_list
.buf
);
469 EXPORT_SYMBOL(vpdma_list_cleanup
);
472 * create a descriptor list, the user of this list will append configuration,
473 * control and data descriptors to this list, this list will be submitted to
474 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
475 * required DMA operations
477 int vpdma_create_desc_list(struct vpdma_desc_list
*list
, size_t size
, int type
)
481 r
= vpdma_alloc_desc_buf(&list
->buf
, size
);
485 list
->next
= list
->buf
.addr
;
491 EXPORT_SYMBOL(vpdma_create_desc_list
);
494 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
495 * to allow new descriptors to be added to the list.
497 void vpdma_reset_desc_list(struct vpdma_desc_list
*list
)
499 list
->next
= list
->buf
.addr
;
501 EXPORT_SYMBOL(vpdma_reset_desc_list
);
504 * free the buffer allocated fot the VPDMA descriptor list, this should be
505 * called when the user doesn't want to use VPDMA any more.
507 void vpdma_free_desc_list(struct vpdma_desc_list
*list
)
509 vpdma_free_desc_buf(&list
->buf
);
513 EXPORT_SYMBOL(vpdma_free_desc_list
);
515 bool vpdma_list_busy(struct vpdma_data
*vpdma
, int list_num
)
517 return read_reg(vpdma
, VPDMA_LIST_STAT_SYNC
) & BIT(list_num
+ 16);
519 EXPORT_SYMBOL(vpdma_list_busy
);
522 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
524 int vpdma_submit_descs(struct vpdma_data
*vpdma
,
525 struct vpdma_desc_list
*list
, int list_num
)
530 if (vpdma_list_busy(vpdma
, list_num
))
533 /* 16-byte granularity */
534 list_size
= (list
->next
- list
->buf
.addr
) >> 4;
536 spin_lock_irqsave(&vpdma
->lock
, flags
);
537 write_reg(vpdma
, VPDMA_LIST_ADDR
, (u32
) list
->buf
.dma_addr
);
539 write_reg(vpdma
, VPDMA_LIST_ATTR
,
540 (list_num
<< VPDMA_LIST_NUM_SHFT
) |
541 (list
->type
<< VPDMA_LIST_TYPE_SHFT
) |
543 spin_unlock_irqrestore(&vpdma
->lock
, flags
);
547 EXPORT_SYMBOL(vpdma_submit_descs
);
549 static void dump_dtd(struct vpdma_dtd
*dtd
);
551 void vpdma_update_dma_addr(struct vpdma_data
*vpdma
,
552 struct vpdma_desc_list
*list
, dma_addr_t dma_addr
,
553 void *write_dtd
, int drop
, int idx
)
555 struct vpdma_dtd
*dtd
= list
->buf
.addr
;
556 dma_addr_t write_desc_addr
;
560 vpdma_unmap_desc_buf(vpdma
, &list
->buf
);
562 dtd
->start_addr
= dma_addr
;
564 /* Calculate write address from the offset of write_dtd from start
567 offset
= (void *)write_dtd
- list
->buf
.addr
;
568 write_desc_addr
= list
->buf
.dma_addr
+ offset
;
571 dtd
->desc_write_addr
= dtd_desc_write_addr(write_desc_addr
,
574 dtd
->desc_write_addr
= dtd_desc_write_addr(write_desc_addr
,
577 vpdma_map_desc_buf(vpdma
, &list
->buf
);
581 EXPORT_SYMBOL(vpdma_update_dma_addr
);
583 void vpdma_set_max_size(struct vpdma_data
*vpdma
, int reg_addr
,
584 u32 width
, u32 height
)
586 if (reg_addr
!= VPDMA_MAX_SIZE1
&& reg_addr
!= VPDMA_MAX_SIZE2
&&
587 reg_addr
!= VPDMA_MAX_SIZE3
)
588 reg_addr
= VPDMA_MAX_SIZE1
;
590 write_field_reg(vpdma
, reg_addr
, width
- 1,
591 VPDMA_MAX_SIZE_WIDTH_MASK
, VPDMA_MAX_SIZE_WIDTH_SHFT
);
593 write_field_reg(vpdma
, reg_addr
, height
- 1,
594 VPDMA_MAX_SIZE_HEIGHT_MASK
, VPDMA_MAX_SIZE_HEIGHT_SHFT
);
597 EXPORT_SYMBOL(vpdma_set_max_size
);
599 static void dump_cfd(struct vpdma_cfd
*cfd
)
603 class = cfd_get_class(cfd
);
605 pr_debug("config descriptor of payload class: %s\n",
606 class == CFD_CLS_BLOCK
? "simple block" :
607 "address data block");
609 if (class == CFD_CLS_BLOCK
)
610 pr_debug("word0: dst_addr_offset = 0x%08x\n",
611 cfd
->dest_addr_offset
);
613 if (class == CFD_CLS_BLOCK
)
614 pr_debug("word1: num_data_wrds = %d\n", cfd
->block_len
);
616 pr_debug("word2: payload_addr = 0x%08x\n", cfd
->payload_addr
);
618 pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
619 cfd_get_pkt_type(cfd
),
620 cfd_get_direct(cfd
), class, cfd_get_dest(cfd
),
621 cfd_get_payload_len(cfd
));
625 * append a configuration descriptor to the given descriptor list, where the
626 * payload is in the form of a simple data block specified in the descriptor
627 * header, this is used to upload scaler coefficients to the scaler module
629 void vpdma_add_cfd_block(struct vpdma_desc_list
*list
, int client
,
630 struct vpdma_buf
*blk
, u32 dest_offset
)
632 struct vpdma_cfd
*cfd
;
635 WARN_ON(blk
->dma_addr
& VPDMA_DESC_ALIGN
);
638 WARN_ON((void *)(cfd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
640 cfd
->dest_addr_offset
= dest_offset
;
641 cfd
->block_len
= len
;
642 cfd
->payload_addr
= (u32
) blk
->dma_addr
;
643 cfd
->ctl_payload_len
= cfd_pkt_payload_len(CFD_INDIRECT
, CFD_CLS_BLOCK
,
646 list
->next
= cfd
+ 1;
650 EXPORT_SYMBOL(vpdma_add_cfd_block
);
653 * append a configuration descriptor to the given descriptor list, where the
654 * payload is in the address data block format, this is used to a configure a
655 * discontiguous set of MMRs
657 void vpdma_add_cfd_adb(struct vpdma_desc_list
*list
, int client
,
658 struct vpdma_buf
*adb
)
660 struct vpdma_cfd
*cfd
;
661 unsigned int len
= adb
->size
;
663 WARN_ON(len
& VPDMA_ADB_SIZE_ALIGN
);
664 WARN_ON(adb
->dma_addr
& VPDMA_DESC_ALIGN
);
667 BUG_ON((void *)(cfd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
671 cfd
->payload_addr
= (u32
) adb
->dma_addr
;
672 cfd
->ctl_payload_len
= cfd_pkt_payload_len(CFD_INDIRECT
, CFD_CLS_ADB
,
675 list
->next
= cfd
+ 1;
679 EXPORT_SYMBOL(vpdma_add_cfd_adb
);
682 * control descriptor format change based on what type of control descriptor it
683 * is, we only use 'sync on channel' control descriptors for now, so assume it's
686 static void dump_ctd(struct vpdma_ctd
*ctd
)
688 pr_debug("control descriptor\n");
690 pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
691 ctd_get_pkt_type(ctd
), ctd_get_source(ctd
), ctd_get_ctl(ctd
));
695 * append a 'sync on channel' type control descriptor to the given descriptor
696 * list, this descriptor stalls the VPDMA list till the time DMA is completed
697 * on the specified channel
699 void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list
*list
,
700 enum vpdma_channel chan
)
702 struct vpdma_ctd
*ctd
;
705 WARN_ON((void *)(ctd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
710 ctd
->type_source_ctl
= ctd_type_source_ctl(chan_info
[chan
].num
,
711 CTD_TYPE_SYNC_ON_CHANNEL
);
713 list
->next
= ctd
+ 1;
717 EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd
);
720 * append an 'abort_channel' type control descriptor to the given descriptor
721 * list, this descriptor aborts any DMA transaction happening using the
724 void vpdma_add_abort_channel_ctd(struct vpdma_desc_list
*list
,
727 struct vpdma_ctd
*ctd
;
730 WARN_ON((void *)(ctd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
735 ctd
->type_source_ctl
= ctd_type_source_ctl(chan_num
,
736 CTD_TYPE_ABORT_CHANNEL
);
738 list
->next
= ctd
+ 1;
742 EXPORT_SYMBOL(vpdma_add_abort_channel_ctd
);
744 static void dump_dtd(struct vpdma_dtd
*dtd
)
748 dir
= dtd_get_dir(dtd
);
749 chan
= dtd_get_chan(dtd
);
751 pr_debug("%s data transfer descriptor for channel %d\n",
752 dir
== DTD_DIR_OUT
? "outbound" : "inbound", chan
);
754 pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
755 dtd_get_data_type(dtd
), dtd_get_notify(dtd
), dtd_get_field(dtd
),
756 dtd_get_1d(dtd
), dtd_get_even_line_skip(dtd
),
757 dtd_get_odd_line_skip(dtd
), dtd_get_line_stride(dtd
));
759 if (dir
== DTD_DIR_IN
)
760 pr_debug("word1: line_length = %d, xfer_height = %d\n",
761 dtd_get_line_length(dtd
), dtd_get_xfer_height(dtd
));
763 pr_debug("word2: start_addr = %pad\n", &dtd
->start_addr
);
765 pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
766 dtd_get_pkt_type(dtd
),
767 dtd_get_mode(dtd
), dir
, chan
, dtd_get_priority(dtd
),
768 dtd_get_next_chan(dtd
));
770 if (dir
== DTD_DIR_IN
)
771 pr_debug("word4: frame_width = %d, frame_height = %d\n",
772 dtd_get_frame_width(dtd
), dtd_get_frame_height(dtd
));
774 pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
775 dtd_get_desc_write_addr(dtd
), dtd_get_write_desc(dtd
),
776 dtd_get_drop_data(dtd
), dtd_get_use_desc(dtd
));
778 if (dir
== DTD_DIR_IN
)
779 pr_debug("word5: hor_start = %d, ver_start = %d\n",
780 dtd_get_h_start(dtd
), dtd_get_v_start(dtd
));
782 pr_debug("word5: max_width %d, max_height %d\n",
783 dtd_get_max_width(dtd
), dtd_get_max_height(dtd
));
785 pr_debug("word6: client specific attr0 = 0x%08x\n", dtd
->client_attr0
);
786 pr_debug("word7: client specific attr1 = 0x%08x\n", dtd
->client_attr1
);
790 * append an outbound data transfer descriptor to the given descriptor list,
791 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
793 * @list: vpdma desc list to which we add this decriptor
794 * @width: width of the image in pixels in memory
795 * @c_rect: compose params of output image
796 * @fmt: vpdma data format of the buffer
797 * dma_addr: dma address as seen by VPDMA
798 * max_width: enum for maximum width of data transfer
799 * max_height: enum for maximum height of data transfer
800 * chan: VPDMA channel
801 * flags: VPDMA flags to configure some descriptor fileds
803 void vpdma_add_out_dtd(struct vpdma_desc_list
*list
, int width
,
804 int stride
, const struct v4l2_rect
*c_rect
,
805 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
806 int max_w
, int max_h
, enum vpdma_channel chan
, u32 flags
)
808 vpdma_rawchan_add_out_dtd(list
, width
, stride
, c_rect
, fmt
, dma_addr
,
809 max_w
, max_h
, chan_info
[chan
].num
, flags
);
811 EXPORT_SYMBOL(vpdma_add_out_dtd
);
813 void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list
*list
, int width
,
814 int stride
, const struct v4l2_rect
*c_rect
,
815 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
816 int max_w
, int max_h
, int raw_vpdma_chan
, u32 flags
)
821 int channel
, next_chan
;
822 struct v4l2_rect rect
= *c_rect
;
823 int depth
= fmt
->depth
;
824 struct vpdma_dtd
*dtd
;
826 channel
= next_chan
= raw_vpdma_chan
;
828 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
&&
829 fmt
->data_type
== DATA_TYPE_C420
) {
835 dma_addr
+= rect
.top
* stride
+ (rect
.left
* depth
>> 3);
838 WARN_ON((void *)(dtd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
840 dtd
->type_ctl_stride
= dtd_type_ctl_stride(fmt
->data_type
,
843 !!(flags
& VPDMA_DATA_FRAME_1D
),
844 !!(flags
& VPDMA_DATA_EVEN_LINE_SKIP
),
845 !!(flags
& VPDMA_DATA_ODD_LINE_SKIP
),
848 dtd
->start_addr
= (u32
) dma_addr
;
849 dtd
->pkt_ctl
= dtd_pkt_ctl(!!(flags
& VPDMA_DATA_MODE_TILED
),
850 DTD_DIR_OUT
, channel
, priority
, next_chan
);
851 dtd
->desc_write_addr
= dtd_desc_write_addr(0, 0, 0, 0);
852 dtd
->max_width_height
= dtd_max_width_height(max_w
, max_h
);
853 dtd
->client_attr0
= 0;
854 dtd
->client_attr1
= 0;
856 list
->next
= dtd
+ 1;
860 EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd
);
863 * append an inbound data transfer descriptor to the given descriptor list,
864 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
866 * @list: vpdma desc list to which we add this decriptor
867 * @width: width of the image in pixels in memory(not the cropped width)
868 * @c_rect: crop params of input image
869 * @fmt: vpdma data format of the buffer
870 * dma_addr: dma address as seen by VPDMA
871 * chan: VPDMA channel
872 * field: top or bottom field info of the input image
873 * flags: VPDMA flags to configure some descriptor fileds
874 * frame_width/height: the complete width/height of the image presented to the
875 * client (this makes sense when multiple channels are
876 * connected to the same client, forming a larger frame)
877 * start_h, start_v: position where the given channel starts providing pixel
878 * data to the client (makes sense when multiple channels
879 * contribute to the client)
881 void vpdma_add_in_dtd(struct vpdma_desc_list
*list
, int width
,
882 int stride
, const struct v4l2_rect
*c_rect
,
883 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
884 enum vpdma_channel chan
, int field
, u32 flags
, int frame_width
,
885 int frame_height
, int start_h
, int start_v
)
889 int depth
= fmt
->depth
;
890 int channel
, next_chan
;
891 struct v4l2_rect rect
= *c_rect
;
892 struct vpdma_dtd
*dtd
;
894 channel
= next_chan
= chan_info
[chan
].num
;
896 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
&&
897 fmt
->data_type
== DATA_TYPE_C420
) {
903 dma_addr
+= rect
.top
* stride
+ (rect
.left
* depth
>> 3);
906 WARN_ON((void *)(dtd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
908 dtd
->type_ctl_stride
= dtd_type_ctl_stride(fmt
->data_type
,
911 !!(flags
& VPDMA_DATA_FRAME_1D
),
912 !!(flags
& VPDMA_DATA_EVEN_LINE_SKIP
),
913 !!(flags
& VPDMA_DATA_ODD_LINE_SKIP
),
916 dtd
->xfer_length_height
= dtd_xfer_length_height(rect
.width
,
918 dtd
->start_addr
= (u32
) dma_addr
;
919 dtd
->pkt_ctl
= dtd_pkt_ctl(!!(flags
& VPDMA_DATA_MODE_TILED
),
920 DTD_DIR_IN
, channel
, priority
, next_chan
);
921 dtd
->frame_width_height
= dtd_frame_width_height(frame_width
,
923 dtd
->start_h_v
= dtd_start_h_v(start_h
, start_v
);
924 dtd
->client_attr0
= 0;
925 dtd
->client_attr1
= 0;
927 list
->next
= dtd
+ 1;
931 EXPORT_SYMBOL(vpdma_add_in_dtd
);
933 int vpdma_hwlist_alloc(struct vpdma_data
*vpdma
, void *priv
)
935 int i
, list_num
= -1;
938 spin_lock_irqsave(&vpdma
->lock
, flags
);
939 for (i
= 0; i
< VPDMA_MAX_NUM_LIST
&&
940 vpdma
->hwlist_used
[i
] == true; i
++)
943 if (i
< VPDMA_MAX_NUM_LIST
) {
945 vpdma
->hwlist_used
[i
] = true;
946 vpdma
->hwlist_priv
[i
] = priv
;
948 spin_unlock_irqrestore(&vpdma
->lock
, flags
);
952 EXPORT_SYMBOL(vpdma_hwlist_alloc
);
954 void *vpdma_hwlist_get_priv(struct vpdma_data
*vpdma
, int list_num
)
956 if (!vpdma
|| list_num
>= VPDMA_MAX_NUM_LIST
)
959 return vpdma
->hwlist_priv
[list_num
];
961 EXPORT_SYMBOL(vpdma_hwlist_get_priv
);
963 void *vpdma_hwlist_release(struct vpdma_data
*vpdma
, int list_num
)
968 spin_lock_irqsave(&vpdma
->lock
, flags
);
969 vpdma
->hwlist_used
[list_num
] = false;
970 priv
= vpdma
->hwlist_priv
;
971 spin_unlock_irqrestore(&vpdma
->lock
, flags
);
975 EXPORT_SYMBOL(vpdma_hwlist_release
);
977 /* set or clear the mask for list complete interrupt */
978 void vpdma_enable_list_complete_irq(struct vpdma_data
*vpdma
, int irq_num
,
979 int list_num
, bool enable
)
981 u32 reg_addr
= VPDMA_INT_LIST0_MASK
+ VPDMA_INTX_OFFSET
* irq_num
;
984 val
= read_reg(vpdma
, reg_addr
);
986 val
|= (1 << (list_num
* 2));
988 val
&= ~(1 << (list_num
* 2));
989 write_reg(vpdma
, reg_addr
, val
);
991 EXPORT_SYMBOL(vpdma_enable_list_complete_irq
);
993 /* get the LIST_STAT register */
994 unsigned int vpdma_get_list_stat(struct vpdma_data
*vpdma
, int irq_num
)
996 u32 reg_addr
= VPDMA_INT_LIST0_STAT
+ VPDMA_INTX_OFFSET
* irq_num
;
998 return read_reg(vpdma
, reg_addr
);
1000 EXPORT_SYMBOL(vpdma_get_list_stat
);
1002 /* get the LIST_MASK register */
1003 unsigned int vpdma_get_list_mask(struct vpdma_data
*vpdma
, int irq_num
)
1005 u32 reg_addr
= VPDMA_INT_LIST0_MASK
+ VPDMA_INTX_OFFSET
* irq_num
;
1007 return read_reg(vpdma
, reg_addr
);
1009 EXPORT_SYMBOL(vpdma_get_list_mask
);
1011 /* clear previosuly occured list intterupts in the LIST_STAT register */
1012 void vpdma_clear_list_stat(struct vpdma_data
*vpdma
, int irq_num
,
1015 u32 reg_addr
= VPDMA_INT_LIST0_STAT
+ VPDMA_INTX_OFFSET
* irq_num
;
1017 write_reg(vpdma
, reg_addr
, 3 << (list_num
* 2));
1019 EXPORT_SYMBOL(vpdma_clear_list_stat
);
1021 void vpdma_set_bg_color(struct vpdma_data
*vpdma
,
1022 struct vpdma_data_format
*fmt
, u32 color
)
1024 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_RGB
)
1025 write_reg(vpdma
, VPDMA_BG_RGB
, color
);
1026 else if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
)
1027 write_reg(vpdma
, VPDMA_BG_YUV
, color
);
1029 EXPORT_SYMBOL(vpdma_set_bg_color
);
1032 * configures the output mode of the line buffer for the given client, the
1033 * line buffer content can either be mirrored(each line repeated twice) or
1034 * passed to the client as is
1036 void vpdma_set_line_mode(struct vpdma_data
*vpdma
, int line_mode
,
1037 enum vpdma_channel chan
)
1039 int client_cstat
= chan_info
[chan
].cstat_offset
;
1041 write_field_reg(vpdma
, client_cstat
, line_mode
,
1042 VPDMA_CSTAT_LINE_MODE_MASK
, VPDMA_CSTAT_LINE_MODE_SHIFT
);
1044 EXPORT_SYMBOL(vpdma_set_line_mode
);
1047 * configures the event which should trigger VPDMA transfer for the given
1050 void vpdma_set_frame_start_event(struct vpdma_data
*vpdma
,
1051 enum vpdma_frame_start_event fs_event
,
1052 enum vpdma_channel chan
)
1054 int client_cstat
= chan_info
[chan
].cstat_offset
;
1056 write_field_reg(vpdma
, client_cstat
, fs_event
,
1057 VPDMA_CSTAT_FRAME_START_MASK
, VPDMA_CSTAT_FRAME_START_SHIFT
);
1059 EXPORT_SYMBOL(vpdma_set_frame_start_event
);
1061 static void vpdma_firmware_cb(const struct firmware
*f
, void *context
)
1063 struct vpdma_data
*vpdma
= context
;
1064 struct vpdma_buf fw_dma_buf
;
1067 dev_dbg(&vpdma
->pdev
->dev
, "firmware callback\n");
1069 if (!f
|| !f
->data
) {
1070 dev_err(&vpdma
->pdev
->dev
, "couldn't get firmware\n");
1074 /* already initialized */
1075 if (read_field_reg(vpdma
, VPDMA_LIST_ATTR
, VPDMA_LIST_RDY_MASK
,
1076 VPDMA_LIST_RDY_SHFT
)) {
1077 vpdma
->cb(vpdma
->pdev
);
1081 r
= vpdma_alloc_desc_buf(&fw_dma_buf
, f
->size
);
1083 dev_err(&vpdma
->pdev
->dev
,
1084 "failed to allocate dma buffer for firmware\n");
1088 memcpy(fw_dma_buf
.addr
, f
->data
, f
->size
);
1090 vpdma_map_desc_buf(vpdma
, &fw_dma_buf
);
1092 write_reg(vpdma
, VPDMA_LIST_ADDR
, (u32
) fw_dma_buf
.dma_addr
);
1094 for (i
= 0; i
< 100; i
++) { /* max 1 second */
1095 msleep_interruptible(10);
1097 if (read_field_reg(vpdma
, VPDMA_LIST_ATTR
, VPDMA_LIST_RDY_MASK
,
1098 VPDMA_LIST_RDY_SHFT
))
1103 dev_err(&vpdma
->pdev
->dev
, "firmware upload failed\n");
1107 vpdma
->cb(vpdma
->pdev
);
1110 vpdma_unmap_desc_buf(vpdma
, &fw_dma_buf
);
1112 vpdma_free_desc_buf(&fw_dma_buf
);
1114 release_firmware(f
);
1117 static int vpdma_load_firmware(struct vpdma_data
*vpdma
)
1120 struct device
*dev
= &vpdma
->pdev
->dev
;
1122 r
= request_firmware_nowait(THIS_MODULE
, 1,
1123 (const char *) VPDMA_FIRMWARE
, dev
, GFP_KERNEL
, vpdma
,
1126 dev_err(dev
, "firmware not available %s\n", VPDMA_FIRMWARE
);
1129 dev_info(dev
, "loading firmware %s\n", VPDMA_FIRMWARE
);
1135 int vpdma_create(struct platform_device
*pdev
, struct vpdma_data
*vpdma
,
1136 void (*cb
)(struct platform_device
*pdev
))
1138 struct resource
*res
;
1141 dev_dbg(&pdev
->dev
, "vpdma_create\n");
1145 spin_lock_init(&vpdma
->lock
);
1147 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "vpdma");
1149 dev_err(&pdev
->dev
, "missing platform resources data\n");
1153 vpdma
->base
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1155 dev_err(&pdev
->dev
, "failed to ioremap\n");
1159 r
= vpdma_load_firmware(vpdma
);
1161 pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE
);
1167 EXPORT_SYMBOL(vpdma_create
);
1169 MODULE_AUTHOR("Texas Instruments Inc.");
1170 MODULE_FIRMWARE(VPDMA_FIRMWARE
);
1171 MODULE_LICENSE("GPL v2");