1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2013 Texas Instruments Inc.
7 * David Griego, <dagriego@biglakesoftware.com>
8 * Dale Farnsworth, <dale@farnsworth.org>
9 * Archit Taneja, <archit@ti.com>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/firmware.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/videodev2.h>
24 #include "vpdma_priv.h"
26 #define VPDMA_FIRMWARE "vpdma-1b8.bin"
28 const struct vpdma_data_format vpdma_yuv_fmts
[] = {
29 [VPDMA_DATA_FMT_Y444
] = {
30 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
31 .data_type
= DATA_TYPE_Y444
,
34 [VPDMA_DATA_FMT_Y422
] = {
35 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
36 .data_type
= DATA_TYPE_Y422
,
39 [VPDMA_DATA_FMT_Y420
] = {
40 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
41 .data_type
= DATA_TYPE_Y420
,
44 [VPDMA_DATA_FMT_C444
] = {
45 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
46 .data_type
= DATA_TYPE_C444
,
49 [VPDMA_DATA_FMT_C422
] = {
50 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
51 .data_type
= DATA_TYPE_C422
,
54 [VPDMA_DATA_FMT_C420
] = {
55 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
56 .data_type
= DATA_TYPE_C420
,
59 [VPDMA_DATA_FMT_CB420
] = {
60 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
61 .data_type
= DATA_TYPE_CB420
,
64 [VPDMA_DATA_FMT_YCR422
] = {
65 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
66 .data_type
= DATA_TYPE_YCR422
,
69 [VPDMA_DATA_FMT_YC444
] = {
70 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
71 .data_type
= DATA_TYPE_YC444
,
74 [VPDMA_DATA_FMT_CRY422
] = {
75 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
76 .data_type
= DATA_TYPE_CRY422
,
79 [VPDMA_DATA_FMT_CBY422
] = {
80 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
81 .data_type
= DATA_TYPE_CBY422
,
84 [VPDMA_DATA_FMT_YCB422
] = {
85 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
86 .data_type
= DATA_TYPE_YCB422
,
90 EXPORT_SYMBOL(vpdma_yuv_fmts
);
92 const struct vpdma_data_format vpdma_rgb_fmts
[] = {
93 [VPDMA_DATA_FMT_RGB565
] = {
94 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
95 .data_type
= DATA_TYPE_RGB16_565
,
98 [VPDMA_DATA_FMT_ARGB16_1555
] = {
99 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
100 .data_type
= DATA_TYPE_ARGB_1555
,
103 [VPDMA_DATA_FMT_ARGB16
] = {
104 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
105 .data_type
= DATA_TYPE_ARGB_4444
,
108 [VPDMA_DATA_FMT_RGBA16_5551
] = {
109 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
110 .data_type
= DATA_TYPE_RGBA_5551
,
113 [VPDMA_DATA_FMT_RGBA16
] = {
114 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
115 .data_type
= DATA_TYPE_RGBA_4444
,
118 [VPDMA_DATA_FMT_ARGB24
] = {
119 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
120 .data_type
= DATA_TYPE_ARGB24_6666
,
123 [VPDMA_DATA_FMT_RGB24
] = {
124 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
125 .data_type
= DATA_TYPE_RGB24_888
,
128 [VPDMA_DATA_FMT_ARGB32
] = {
129 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
130 .data_type
= DATA_TYPE_ARGB32_8888
,
133 [VPDMA_DATA_FMT_RGBA24
] = {
134 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
135 .data_type
= DATA_TYPE_RGBA24_6666
,
138 [VPDMA_DATA_FMT_RGBA32
] = {
139 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
140 .data_type
= DATA_TYPE_RGBA32_8888
,
143 [VPDMA_DATA_FMT_BGR565
] = {
144 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
145 .data_type
= DATA_TYPE_BGR16_565
,
148 [VPDMA_DATA_FMT_ABGR16_1555
] = {
149 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
150 .data_type
= DATA_TYPE_ABGR_1555
,
153 [VPDMA_DATA_FMT_ABGR16
] = {
154 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
155 .data_type
= DATA_TYPE_ABGR_4444
,
158 [VPDMA_DATA_FMT_BGRA16_5551
] = {
159 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
160 .data_type
= DATA_TYPE_BGRA_5551
,
163 [VPDMA_DATA_FMT_BGRA16
] = {
164 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
165 .data_type
= DATA_TYPE_BGRA_4444
,
168 [VPDMA_DATA_FMT_ABGR24
] = {
169 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
170 .data_type
= DATA_TYPE_ABGR24_6666
,
173 [VPDMA_DATA_FMT_BGR24
] = {
174 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
175 .data_type
= DATA_TYPE_BGR24_888
,
178 [VPDMA_DATA_FMT_ABGR32
] = {
179 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
180 .data_type
= DATA_TYPE_ABGR32_8888
,
183 [VPDMA_DATA_FMT_BGRA24
] = {
184 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
185 .data_type
= DATA_TYPE_BGRA24_6666
,
188 [VPDMA_DATA_FMT_BGRA32
] = {
189 .type
= VPDMA_DATA_FMT_TYPE_RGB
,
190 .data_type
= DATA_TYPE_BGRA32_8888
,
194 EXPORT_SYMBOL(vpdma_rgb_fmts
);
197 * To handle RAW format we are re-using the CBY422
198 * vpdma data type so that we use the vpdma to re-order
199 * the incoming bytes, as the parser assumes that the
200 * first byte presented on the bus is the MSB of a 2
202 * RAW8 handles from 1 to 8 bits
203 * RAW16 handles from 9 to 16 bits
205 const struct vpdma_data_format vpdma_raw_fmts
[] = {
206 [VPDMA_DATA_FMT_RAW8
] = {
207 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
208 .data_type
= DATA_TYPE_CBY422
,
211 [VPDMA_DATA_FMT_RAW16
] = {
212 .type
= VPDMA_DATA_FMT_TYPE_YUV
,
213 .data_type
= DATA_TYPE_CBY422
,
217 EXPORT_SYMBOL(vpdma_raw_fmts
);
219 const struct vpdma_data_format vpdma_misc_fmts
[] = {
220 [VPDMA_DATA_FMT_MV
] = {
221 .type
= VPDMA_DATA_FMT_TYPE_MISC
,
222 .data_type
= DATA_TYPE_MV
,
226 EXPORT_SYMBOL(vpdma_misc_fmts
);
228 struct vpdma_channel_info
{
229 int num
; /* VPDMA channel number */
230 int cstat_offset
; /* client CSTAT register offset */
233 static const struct vpdma_channel_info chan_info
[] = {
234 [VPE_CHAN_LUMA1_IN
] = {
235 .num
= VPE_CHAN_NUM_LUMA1_IN
,
236 .cstat_offset
= VPDMA_DEI_LUMA1_CSTAT
,
238 [VPE_CHAN_CHROMA1_IN
] = {
239 .num
= VPE_CHAN_NUM_CHROMA1_IN
,
240 .cstat_offset
= VPDMA_DEI_CHROMA1_CSTAT
,
242 [VPE_CHAN_LUMA2_IN
] = {
243 .num
= VPE_CHAN_NUM_LUMA2_IN
,
244 .cstat_offset
= VPDMA_DEI_LUMA2_CSTAT
,
246 [VPE_CHAN_CHROMA2_IN
] = {
247 .num
= VPE_CHAN_NUM_CHROMA2_IN
,
248 .cstat_offset
= VPDMA_DEI_CHROMA2_CSTAT
,
250 [VPE_CHAN_LUMA3_IN
] = {
251 .num
= VPE_CHAN_NUM_LUMA3_IN
,
252 .cstat_offset
= VPDMA_DEI_LUMA3_CSTAT
,
254 [VPE_CHAN_CHROMA3_IN
] = {
255 .num
= VPE_CHAN_NUM_CHROMA3_IN
,
256 .cstat_offset
= VPDMA_DEI_CHROMA3_CSTAT
,
259 .num
= VPE_CHAN_NUM_MV_IN
,
260 .cstat_offset
= VPDMA_DEI_MV_IN_CSTAT
,
262 [VPE_CHAN_MV_OUT
] = {
263 .num
= VPE_CHAN_NUM_MV_OUT
,
264 .cstat_offset
= VPDMA_DEI_MV_OUT_CSTAT
,
266 [VPE_CHAN_LUMA_OUT
] = {
267 .num
= VPE_CHAN_NUM_LUMA_OUT
,
268 .cstat_offset
= VPDMA_VIP_UP_Y_CSTAT
,
270 [VPE_CHAN_CHROMA_OUT
] = {
271 .num
= VPE_CHAN_NUM_CHROMA_OUT
,
272 .cstat_offset
= VPDMA_VIP_UP_UV_CSTAT
,
274 [VPE_CHAN_RGB_OUT
] = {
275 .num
= VPE_CHAN_NUM_RGB_OUT
,
276 .cstat_offset
= VPDMA_VIP_UP_Y_CSTAT
,
280 static u32
read_reg(struct vpdma_data
*vpdma
, int offset
)
282 return ioread32(vpdma
->base
+ offset
);
285 static void write_reg(struct vpdma_data
*vpdma
, int offset
, u32 value
)
287 iowrite32(value
, vpdma
->base
+ offset
);
290 static int read_field_reg(struct vpdma_data
*vpdma
, int offset
,
293 return (read_reg(vpdma
, offset
) & (mask
<< shift
)) >> shift
;
296 static void write_field_reg(struct vpdma_data
*vpdma
, int offset
, u32 field
,
299 u32 val
= read_reg(vpdma
, offset
);
301 val
&= ~(mask
<< shift
);
302 val
|= (field
& mask
) << shift
;
304 write_reg(vpdma
, offset
, val
);
307 void vpdma_dump_regs(struct vpdma_data
*vpdma
)
309 struct device
*dev
= &vpdma
->pdev
->dev
;
311 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
313 dev_dbg(dev
, "VPDMA Registers:\n");
318 DUMPREG(LIST_STAT_SYNC
);
327 * dumping registers of only group0 and group3, because VPE channels
328 * lie within group0 and group3 registers
330 DUMPREG(INT_CHAN_STAT(0));
331 DUMPREG(INT_CHAN_MASK(0));
332 DUMPREG(INT_CHAN_STAT(3));
333 DUMPREG(INT_CHAN_MASK(3));
334 DUMPREG(INT_CLIENT0_STAT
);
335 DUMPREG(INT_CLIENT0_MASK
);
336 DUMPREG(INT_CLIENT1_STAT
);
337 DUMPREG(INT_CLIENT1_MASK
);
338 DUMPREG(INT_LIST0_STAT
);
339 DUMPREG(INT_LIST0_MASK
);
342 * these are registers specific to VPE clients, we can make this
343 * function dump client registers specific to VPE or VIP based on
346 DUMPREG(DEI_CHROMA1_CSTAT
);
347 DUMPREG(DEI_LUMA1_CSTAT
);
348 DUMPREG(DEI_CHROMA2_CSTAT
);
349 DUMPREG(DEI_LUMA2_CSTAT
);
350 DUMPREG(DEI_CHROMA3_CSTAT
);
351 DUMPREG(DEI_LUMA3_CSTAT
);
352 DUMPREG(DEI_MV_IN_CSTAT
);
353 DUMPREG(DEI_MV_OUT_CSTAT
);
354 DUMPREG(VIP_UP_Y_CSTAT
);
355 DUMPREG(VIP_UP_UV_CSTAT
);
356 DUMPREG(VPI_CTL_CSTAT
);
358 EXPORT_SYMBOL(vpdma_dump_regs
);
361 * Allocate a DMA buffer
363 int vpdma_alloc_desc_buf(struct vpdma_buf
*buf
, size_t size
)
367 buf
->addr
= kzalloc(size
, GFP_KERNEL
);
371 WARN_ON(((unsigned long)buf
->addr
& VPDMA_DESC_ALIGN
) != 0);
375 EXPORT_SYMBOL(vpdma_alloc_desc_buf
);
377 void vpdma_free_desc_buf(struct vpdma_buf
*buf
)
379 WARN_ON(buf
->mapped
);
384 EXPORT_SYMBOL(vpdma_free_desc_buf
);
387 * map descriptor/payload DMA buffer, enabling DMA access
389 int vpdma_map_desc_buf(struct vpdma_data
*vpdma
, struct vpdma_buf
*buf
)
391 struct device
*dev
= &vpdma
->pdev
->dev
;
393 WARN_ON(buf
->mapped
);
394 buf
->dma_addr
= dma_map_single(dev
, buf
->addr
, buf
->size
,
396 if (dma_mapping_error(dev
, buf
->dma_addr
)) {
397 dev_err(dev
, "failed to map buffer\n");
405 EXPORT_SYMBOL(vpdma_map_desc_buf
);
408 * unmap descriptor/payload DMA buffer, disabling DMA access and
409 * allowing the main processor to access the data
411 void vpdma_unmap_desc_buf(struct vpdma_data
*vpdma
, struct vpdma_buf
*buf
)
413 struct device
*dev
= &vpdma
->pdev
->dev
;
416 dma_unmap_single(dev
, buf
->dma_addr
, buf
->size
,
421 EXPORT_SYMBOL(vpdma_unmap_desc_buf
);
424 * Cleanup all pending descriptors of a list
425 * First, stop the current list being processed.
426 * If the VPDMA was busy, this step makes vpdma to accept post lists.
427 * To cleanup the internal FSM, post abort list descriptor for all the
428 * channels from @channels array of size @size.
430 int vpdma_list_cleanup(struct vpdma_data
*vpdma
, int list_num
,
431 int *channels
, int size
)
433 struct vpdma_desc_list abort_list
;
434 int i
, ret
, timeout
= 500;
436 write_reg(vpdma
, VPDMA_LIST_ATTR
,
437 (list_num
<< VPDMA_LIST_NUM_SHFT
) |
438 (1 << VPDMA_LIST_STOP_SHFT
));
440 if (size
<= 0 || !channels
)
443 ret
= vpdma_create_desc_list(&abort_list
,
444 size
* sizeof(struct vpdma_dtd
), VPDMA_LIST_TYPE_NORMAL
);
448 for (i
= 0; i
< size
; i
++)
449 vpdma_add_abort_channel_ctd(&abort_list
, channels
[i
]);
451 ret
= vpdma_map_desc_buf(vpdma
, &abort_list
.buf
);
454 ret
= vpdma_submit_descs(vpdma
, &abort_list
, list_num
);
458 while (vpdma_list_busy(vpdma
, list_num
) && --timeout
)
462 dev_err(&vpdma
->pdev
->dev
, "Timed out cleaning up VPDMA list\n");
467 vpdma_unmap_desc_buf(vpdma
, &abort_list
.buf
);
469 vpdma_free_desc_buf(&abort_list
.buf
);
473 EXPORT_SYMBOL(vpdma_list_cleanup
);
476 * create a descriptor list, the user of this list will append configuration,
477 * control and data descriptors to this list, this list will be submitted to
478 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
479 * required DMA operations
481 int vpdma_create_desc_list(struct vpdma_desc_list
*list
, size_t size
, int type
)
485 r
= vpdma_alloc_desc_buf(&list
->buf
, size
);
489 list
->next
= list
->buf
.addr
;
495 EXPORT_SYMBOL(vpdma_create_desc_list
);
498 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
499 * to allow new descriptors to be added to the list.
501 void vpdma_reset_desc_list(struct vpdma_desc_list
*list
)
503 list
->next
= list
->buf
.addr
;
505 EXPORT_SYMBOL(vpdma_reset_desc_list
);
508 * free the buffer allocated for the VPDMA descriptor list, this should be
509 * called when the user doesn't want to use VPDMA any more.
511 void vpdma_free_desc_list(struct vpdma_desc_list
*list
)
513 vpdma_free_desc_buf(&list
->buf
);
517 EXPORT_SYMBOL(vpdma_free_desc_list
);
519 bool vpdma_list_busy(struct vpdma_data
*vpdma
, int list_num
)
521 return read_reg(vpdma
, VPDMA_LIST_STAT_SYNC
) & BIT(list_num
+ 16);
523 EXPORT_SYMBOL(vpdma_list_busy
);
526 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
528 int vpdma_submit_descs(struct vpdma_data
*vpdma
,
529 struct vpdma_desc_list
*list
, int list_num
)
534 if (vpdma_list_busy(vpdma
, list_num
))
537 /* 16-byte granularity */
538 list_size
= (list
->next
- list
->buf
.addr
) >> 4;
540 spin_lock_irqsave(&vpdma
->lock
, flags
);
541 write_reg(vpdma
, VPDMA_LIST_ADDR
, (u32
) list
->buf
.dma_addr
);
543 write_reg(vpdma
, VPDMA_LIST_ATTR
,
544 (list_num
<< VPDMA_LIST_NUM_SHFT
) |
545 (list
->type
<< VPDMA_LIST_TYPE_SHFT
) |
547 spin_unlock_irqrestore(&vpdma
->lock
, flags
);
551 EXPORT_SYMBOL(vpdma_submit_descs
);
553 static void dump_dtd(struct vpdma_dtd
*dtd
);
555 void vpdma_update_dma_addr(struct vpdma_data
*vpdma
,
556 struct vpdma_desc_list
*list
, dma_addr_t dma_addr
,
557 void *write_dtd
, int drop
, int idx
)
559 struct vpdma_dtd
*dtd
= list
->buf
.addr
;
560 dma_addr_t write_desc_addr
;
564 vpdma_unmap_desc_buf(vpdma
, &list
->buf
);
566 dtd
->start_addr
= dma_addr
;
568 /* Calculate write address from the offset of write_dtd from start
571 offset
= (void *)write_dtd
- list
->buf
.addr
;
572 write_desc_addr
= list
->buf
.dma_addr
+ offset
;
575 dtd
->desc_write_addr
= dtd_desc_write_addr(write_desc_addr
,
578 dtd
->desc_write_addr
= dtd_desc_write_addr(write_desc_addr
,
581 vpdma_map_desc_buf(vpdma
, &list
->buf
);
585 EXPORT_SYMBOL(vpdma_update_dma_addr
);
587 void vpdma_set_max_size(struct vpdma_data
*vpdma
, int reg_addr
,
588 u32 width
, u32 height
)
590 if (reg_addr
!= VPDMA_MAX_SIZE1
&& reg_addr
!= VPDMA_MAX_SIZE2
&&
591 reg_addr
!= VPDMA_MAX_SIZE3
)
592 reg_addr
= VPDMA_MAX_SIZE1
;
594 write_field_reg(vpdma
, reg_addr
, width
- 1,
595 VPDMA_MAX_SIZE_WIDTH_MASK
, VPDMA_MAX_SIZE_WIDTH_SHFT
);
597 write_field_reg(vpdma
, reg_addr
, height
- 1,
598 VPDMA_MAX_SIZE_HEIGHT_MASK
, VPDMA_MAX_SIZE_HEIGHT_SHFT
);
601 EXPORT_SYMBOL(vpdma_set_max_size
);
603 static void dump_cfd(struct vpdma_cfd
*cfd
)
607 class = cfd_get_class(cfd
);
609 pr_debug("config descriptor of payload class: %s\n",
610 class == CFD_CLS_BLOCK
? "simple block" :
611 "address data block");
613 if (class == CFD_CLS_BLOCK
)
614 pr_debug("word0: dst_addr_offset = 0x%08x\n",
615 cfd
->dest_addr_offset
);
617 if (class == CFD_CLS_BLOCK
)
618 pr_debug("word1: num_data_wrds = %d\n", cfd
->block_len
);
620 pr_debug("word2: payload_addr = 0x%08x\n", cfd
->payload_addr
);
622 pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
623 cfd_get_pkt_type(cfd
),
624 cfd_get_direct(cfd
), class, cfd_get_dest(cfd
),
625 cfd_get_payload_len(cfd
));
629 * append a configuration descriptor to the given descriptor list, where the
630 * payload is in the form of a simple data block specified in the descriptor
631 * header, this is used to upload scaler coefficients to the scaler module
633 void vpdma_add_cfd_block(struct vpdma_desc_list
*list
, int client
,
634 struct vpdma_buf
*blk
, u32 dest_offset
)
636 struct vpdma_cfd
*cfd
;
639 WARN_ON(blk
->dma_addr
& VPDMA_DESC_ALIGN
);
642 WARN_ON((void *)(cfd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
644 cfd
->dest_addr_offset
= dest_offset
;
645 cfd
->block_len
= len
;
646 cfd
->payload_addr
= (u32
) blk
->dma_addr
;
647 cfd
->ctl_payload_len
= cfd_pkt_payload_len(CFD_INDIRECT
, CFD_CLS_BLOCK
,
650 list
->next
= cfd
+ 1;
654 EXPORT_SYMBOL(vpdma_add_cfd_block
);
657 * append a configuration descriptor to the given descriptor list, where the
658 * payload is in the address data block format, this is used to a configure a
659 * discontiguous set of MMRs
661 void vpdma_add_cfd_adb(struct vpdma_desc_list
*list
, int client
,
662 struct vpdma_buf
*adb
)
664 struct vpdma_cfd
*cfd
;
665 unsigned int len
= adb
->size
;
667 WARN_ON(len
& VPDMA_ADB_SIZE_ALIGN
);
668 WARN_ON(adb
->dma_addr
& VPDMA_DESC_ALIGN
);
671 BUG_ON((void *)(cfd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
675 cfd
->payload_addr
= (u32
) adb
->dma_addr
;
676 cfd
->ctl_payload_len
= cfd_pkt_payload_len(CFD_INDIRECT
, CFD_CLS_ADB
,
679 list
->next
= cfd
+ 1;
683 EXPORT_SYMBOL(vpdma_add_cfd_adb
);
686 * control descriptor format change based on what type of control descriptor it
687 * is, we only use 'sync on channel' control descriptors for now, so assume it's
690 static void dump_ctd(struct vpdma_ctd
*ctd
)
692 pr_debug("control descriptor\n");
694 pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
695 ctd_get_pkt_type(ctd
), ctd_get_source(ctd
), ctd_get_ctl(ctd
));
699 * append a 'sync on channel' type control descriptor to the given descriptor
700 * list, this descriptor stalls the VPDMA list till the time DMA is completed
701 * on the specified channel
703 void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list
*list
,
704 enum vpdma_channel chan
)
706 struct vpdma_ctd
*ctd
;
709 WARN_ON((void *)(ctd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
714 ctd
->type_source_ctl
= ctd_type_source_ctl(chan_info
[chan
].num
,
715 CTD_TYPE_SYNC_ON_CHANNEL
);
717 list
->next
= ctd
+ 1;
721 EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd
);
724 * append an 'abort_channel' type control descriptor to the given descriptor
725 * list, this descriptor aborts any DMA transaction happening using the
728 void vpdma_add_abort_channel_ctd(struct vpdma_desc_list
*list
,
731 struct vpdma_ctd
*ctd
;
734 WARN_ON((void *)(ctd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
739 ctd
->type_source_ctl
= ctd_type_source_ctl(chan_num
,
740 CTD_TYPE_ABORT_CHANNEL
);
742 list
->next
= ctd
+ 1;
746 EXPORT_SYMBOL(vpdma_add_abort_channel_ctd
);
748 static void dump_dtd(struct vpdma_dtd
*dtd
)
752 dir
= dtd_get_dir(dtd
);
753 chan
= dtd_get_chan(dtd
);
755 pr_debug("%s data transfer descriptor for channel %d\n",
756 dir
== DTD_DIR_OUT
? "outbound" : "inbound", chan
);
758 pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
759 dtd_get_data_type(dtd
), dtd_get_notify(dtd
), dtd_get_field(dtd
),
760 dtd_get_1d(dtd
), dtd_get_even_line_skip(dtd
),
761 dtd_get_odd_line_skip(dtd
), dtd_get_line_stride(dtd
));
763 if (dir
== DTD_DIR_IN
)
764 pr_debug("word1: line_length = %d, xfer_height = %d\n",
765 dtd_get_line_length(dtd
), dtd_get_xfer_height(dtd
));
767 pr_debug("word2: start_addr = %x\n", dtd
->start_addr
);
769 pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
770 dtd_get_pkt_type(dtd
),
771 dtd_get_mode(dtd
), dir
, chan
, dtd_get_priority(dtd
),
772 dtd_get_next_chan(dtd
));
774 if (dir
== DTD_DIR_IN
)
775 pr_debug("word4: frame_width = %d, frame_height = %d\n",
776 dtd_get_frame_width(dtd
), dtd_get_frame_height(dtd
));
778 pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
779 dtd_get_desc_write_addr(dtd
), dtd_get_write_desc(dtd
),
780 dtd_get_drop_data(dtd
), dtd_get_use_desc(dtd
));
782 if (dir
== DTD_DIR_IN
)
783 pr_debug("word5: hor_start = %d, ver_start = %d\n",
784 dtd_get_h_start(dtd
), dtd_get_v_start(dtd
));
786 pr_debug("word5: max_width %d, max_height %d\n",
787 dtd_get_max_width(dtd
), dtd_get_max_height(dtd
));
789 pr_debug("word6: client specific attr0 = 0x%08x\n", dtd
->client_attr0
);
790 pr_debug("word7: client specific attr1 = 0x%08x\n", dtd
->client_attr1
);
794 * append an outbound data transfer descriptor to the given descriptor list,
795 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
797 * @list: vpdma desc list to which we add this descriptor
798 * @width: width of the image in pixels in memory
799 * @c_rect: compose params of output image
800 * @fmt: vpdma data format of the buffer
801 * dma_addr: dma address as seen by VPDMA
802 * max_width: enum for maximum width of data transfer
803 * max_height: enum for maximum height of data transfer
804 * chan: VPDMA channel
805 * flags: VPDMA flags to configure some descriptor fields
807 void vpdma_add_out_dtd(struct vpdma_desc_list
*list
, int width
,
808 int stride
, const struct v4l2_rect
*c_rect
,
809 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
810 int max_w
, int max_h
, enum vpdma_channel chan
, u32 flags
)
812 vpdma_rawchan_add_out_dtd(list
, width
, stride
, c_rect
, fmt
, dma_addr
,
813 max_w
, max_h
, chan_info
[chan
].num
, flags
);
815 EXPORT_SYMBOL(vpdma_add_out_dtd
);
817 void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list
*list
, int width
,
818 int stride
, const struct v4l2_rect
*c_rect
,
819 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
820 int max_w
, int max_h
, int raw_vpdma_chan
, u32 flags
)
825 int channel
, next_chan
;
826 struct v4l2_rect rect
= *c_rect
;
827 int depth
= fmt
->depth
;
828 struct vpdma_dtd
*dtd
;
830 channel
= next_chan
= raw_vpdma_chan
;
832 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
&&
833 (fmt
->data_type
== DATA_TYPE_C420
||
834 fmt
->data_type
== DATA_TYPE_CB420
)) {
840 dma_addr
+= rect
.top
* stride
+ (rect
.left
* depth
>> 3);
843 WARN_ON((void *)(dtd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
845 dtd
->type_ctl_stride
= dtd_type_ctl_stride(fmt
->data_type
,
848 !!(flags
& VPDMA_DATA_FRAME_1D
),
849 !!(flags
& VPDMA_DATA_EVEN_LINE_SKIP
),
850 !!(flags
& VPDMA_DATA_ODD_LINE_SKIP
),
853 dtd
->start_addr
= (u32
) dma_addr
;
854 dtd
->pkt_ctl
= dtd_pkt_ctl(!!(flags
& VPDMA_DATA_MODE_TILED
),
855 DTD_DIR_OUT
, channel
, priority
, next_chan
);
856 dtd
->desc_write_addr
= dtd_desc_write_addr(0, 0, 0, 0);
857 dtd
->max_width_height
= dtd_max_width_height(max_w
, max_h
);
858 dtd
->client_attr0
= 0;
859 dtd
->client_attr1
= 0;
861 list
->next
= dtd
+ 1;
865 EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd
);
868 * append an inbound data transfer descriptor to the given descriptor list,
869 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
871 * @list: vpdma desc list to which we add this descriptor
872 * @width: width of the image in pixels in memory(not the cropped width)
873 * @c_rect: crop params of input image
874 * @fmt: vpdma data format of the buffer
875 * dma_addr: dma address as seen by VPDMA
876 * chan: VPDMA channel
877 * field: top or bottom field info of the input image
878 * flags: VPDMA flags to configure some descriptor fields
879 * frame_width/height: the complete width/height of the image presented to the
880 * client (this makes sense when multiple channels are
881 * connected to the same client, forming a larger frame)
882 * start_h, start_v: position where the given channel starts providing pixel
883 * data to the client (makes sense when multiple channels
884 * contribute to the client)
886 void vpdma_add_in_dtd(struct vpdma_desc_list
*list
, int width
,
887 int stride
, const struct v4l2_rect
*c_rect
,
888 const struct vpdma_data_format
*fmt
, dma_addr_t dma_addr
,
889 enum vpdma_channel chan
, int field
, u32 flags
, int frame_width
,
890 int frame_height
, int start_h
, int start_v
)
894 int depth
= fmt
->depth
;
895 int channel
, next_chan
;
896 struct v4l2_rect rect
= *c_rect
;
897 struct vpdma_dtd
*dtd
;
899 channel
= next_chan
= chan_info
[chan
].num
;
901 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
&&
902 (fmt
->data_type
== DATA_TYPE_C420
||
903 fmt
->data_type
== DATA_TYPE_CB420
)) {
909 dma_addr
+= rect
.top
* stride
+ (rect
.left
* depth
>> 3);
912 WARN_ON((void *)(dtd
+ 1) > (list
->buf
.addr
+ list
->buf
.size
));
914 dtd
->type_ctl_stride
= dtd_type_ctl_stride(fmt
->data_type
,
917 !!(flags
& VPDMA_DATA_FRAME_1D
),
918 !!(flags
& VPDMA_DATA_EVEN_LINE_SKIP
),
919 !!(flags
& VPDMA_DATA_ODD_LINE_SKIP
),
922 dtd
->xfer_length_height
= dtd_xfer_length_height(rect
.width
,
924 dtd
->start_addr
= (u32
) dma_addr
;
925 dtd
->pkt_ctl
= dtd_pkt_ctl(!!(flags
& VPDMA_DATA_MODE_TILED
),
926 DTD_DIR_IN
, channel
, priority
, next_chan
);
927 dtd
->frame_width_height
= dtd_frame_width_height(frame_width
,
929 dtd
->start_h_v
= dtd_start_h_v(start_h
, start_v
);
930 dtd
->client_attr0
= 0;
931 dtd
->client_attr1
= 0;
933 list
->next
= dtd
+ 1;
937 EXPORT_SYMBOL(vpdma_add_in_dtd
);
939 int vpdma_hwlist_alloc(struct vpdma_data
*vpdma
, void *priv
)
941 int i
, list_num
= -1;
944 spin_lock_irqsave(&vpdma
->lock
, flags
);
945 for (i
= 0; i
< VPDMA_MAX_NUM_LIST
&&
946 vpdma
->hwlist_used
[i
] == true; i
++)
949 if (i
< VPDMA_MAX_NUM_LIST
) {
951 vpdma
->hwlist_used
[i
] = true;
952 vpdma
->hwlist_priv
[i
] = priv
;
954 spin_unlock_irqrestore(&vpdma
->lock
, flags
);
958 EXPORT_SYMBOL(vpdma_hwlist_alloc
);
960 void *vpdma_hwlist_get_priv(struct vpdma_data
*vpdma
, int list_num
)
962 if (!vpdma
|| list_num
>= VPDMA_MAX_NUM_LIST
)
965 return vpdma
->hwlist_priv
[list_num
];
967 EXPORT_SYMBOL(vpdma_hwlist_get_priv
);
969 void *vpdma_hwlist_release(struct vpdma_data
*vpdma
, int list_num
)
974 spin_lock_irqsave(&vpdma
->lock
, flags
);
975 vpdma
->hwlist_used
[list_num
] = false;
976 priv
= vpdma
->hwlist_priv
;
977 spin_unlock_irqrestore(&vpdma
->lock
, flags
);
981 EXPORT_SYMBOL(vpdma_hwlist_release
);
983 /* set or clear the mask for list complete interrupt */
984 void vpdma_enable_list_complete_irq(struct vpdma_data
*vpdma
, int irq_num
,
985 int list_num
, bool enable
)
987 u32 reg_addr
= VPDMA_INT_LIST0_MASK
+ VPDMA_INTX_OFFSET
* irq_num
;
990 val
= read_reg(vpdma
, reg_addr
);
992 val
|= (1 << (list_num
* 2));
994 val
&= ~(1 << (list_num
* 2));
995 write_reg(vpdma
, reg_addr
, val
);
997 EXPORT_SYMBOL(vpdma_enable_list_complete_irq
);
999 /* get the LIST_STAT register */
1000 unsigned int vpdma_get_list_stat(struct vpdma_data
*vpdma
, int irq_num
)
1002 u32 reg_addr
= VPDMA_INT_LIST0_STAT
+ VPDMA_INTX_OFFSET
* irq_num
;
1004 return read_reg(vpdma
, reg_addr
);
1006 EXPORT_SYMBOL(vpdma_get_list_stat
);
1008 /* get the LIST_MASK register */
1009 unsigned int vpdma_get_list_mask(struct vpdma_data
*vpdma
, int irq_num
)
1011 u32 reg_addr
= VPDMA_INT_LIST0_MASK
+ VPDMA_INTX_OFFSET
* irq_num
;
1013 return read_reg(vpdma
, reg_addr
);
1015 EXPORT_SYMBOL(vpdma_get_list_mask
);
1017 /* clear previously occurred list interrupts in the LIST_STAT register */
1018 void vpdma_clear_list_stat(struct vpdma_data
*vpdma
, int irq_num
,
1021 u32 reg_addr
= VPDMA_INT_LIST0_STAT
+ VPDMA_INTX_OFFSET
* irq_num
;
1023 write_reg(vpdma
, reg_addr
, 3 << (list_num
* 2));
1025 EXPORT_SYMBOL(vpdma_clear_list_stat
);
1027 void vpdma_set_bg_color(struct vpdma_data
*vpdma
,
1028 struct vpdma_data_format
*fmt
, u32 color
)
1030 if (fmt
->type
== VPDMA_DATA_FMT_TYPE_RGB
)
1031 write_reg(vpdma
, VPDMA_BG_RGB
, color
);
1032 else if (fmt
->type
== VPDMA_DATA_FMT_TYPE_YUV
)
1033 write_reg(vpdma
, VPDMA_BG_YUV
, color
);
1035 EXPORT_SYMBOL(vpdma_set_bg_color
);
1038 * configures the output mode of the line buffer for the given client, the
1039 * line buffer content can either be mirrored(each line repeated twice) or
1040 * passed to the client as is
1042 void vpdma_set_line_mode(struct vpdma_data
*vpdma
, int line_mode
,
1043 enum vpdma_channel chan
)
1045 int client_cstat
= chan_info
[chan
].cstat_offset
;
1047 write_field_reg(vpdma
, client_cstat
, line_mode
,
1048 VPDMA_CSTAT_LINE_MODE_MASK
, VPDMA_CSTAT_LINE_MODE_SHIFT
);
1050 EXPORT_SYMBOL(vpdma_set_line_mode
);
1053 * configures the event which should trigger VPDMA transfer for the given
1056 void vpdma_set_frame_start_event(struct vpdma_data
*vpdma
,
1057 enum vpdma_frame_start_event fs_event
,
1058 enum vpdma_channel chan
)
1060 int client_cstat
= chan_info
[chan
].cstat_offset
;
1062 write_field_reg(vpdma
, client_cstat
, fs_event
,
1063 VPDMA_CSTAT_FRAME_START_MASK
, VPDMA_CSTAT_FRAME_START_SHIFT
);
1065 EXPORT_SYMBOL(vpdma_set_frame_start_event
);
1067 static void vpdma_firmware_cb(const struct firmware
*f
, void *context
)
1069 struct vpdma_data
*vpdma
= context
;
1070 struct vpdma_buf fw_dma_buf
;
1073 dev_dbg(&vpdma
->pdev
->dev
, "firmware callback\n");
1075 if (!f
|| !f
->data
) {
1076 dev_err(&vpdma
->pdev
->dev
, "couldn't get firmware\n");
1080 /* already initialized */
1081 if (read_field_reg(vpdma
, VPDMA_LIST_ATTR
, VPDMA_LIST_RDY_MASK
,
1082 VPDMA_LIST_RDY_SHFT
)) {
1083 vpdma
->cb(vpdma
->pdev
);
1087 r
= vpdma_alloc_desc_buf(&fw_dma_buf
, f
->size
);
1089 dev_err(&vpdma
->pdev
->dev
,
1090 "failed to allocate dma buffer for firmware\n");
1094 memcpy(fw_dma_buf
.addr
, f
->data
, f
->size
);
1096 vpdma_map_desc_buf(vpdma
, &fw_dma_buf
);
1098 write_reg(vpdma
, VPDMA_LIST_ADDR
, (u32
) fw_dma_buf
.dma_addr
);
1100 for (i
= 0; i
< 100; i
++) { /* max 1 second */
1101 msleep_interruptible(10);
1103 if (read_field_reg(vpdma
, VPDMA_LIST_ATTR
, VPDMA_LIST_RDY_MASK
,
1104 VPDMA_LIST_RDY_SHFT
))
1109 dev_err(&vpdma
->pdev
->dev
, "firmware upload failed\n");
1113 vpdma
->cb(vpdma
->pdev
);
1116 vpdma_unmap_desc_buf(vpdma
, &fw_dma_buf
);
1118 vpdma_free_desc_buf(&fw_dma_buf
);
1120 release_firmware(f
);
1123 static int vpdma_load_firmware(struct vpdma_data
*vpdma
)
1126 struct device
*dev
= &vpdma
->pdev
->dev
;
1128 r
= request_firmware_nowait(THIS_MODULE
, 1,
1129 (const char *) VPDMA_FIRMWARE
, dev
, GFP_KERNEL
, vpdma
,
1132 dev_err(dev
, "firmware not available %s\n", VPDMA_FIRMWARE
);
1135 dev_info(dev
, "loading firmware %s\n", VPDMA_FIRMWARE
);
1141 int vpdma_create(struct platform_device
*pdev
, struct vpdma_data
*vpdma
,
1142 void (*cb
)(struct platform_device
*pdev
))
1144 struct resource
*res
;
1147 dev_dbg(&pdev
->dev
, "vpdma_create\n");
1151 spin_lock_init(&vpdma
->lock
);
1153 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "vpdma");
1155 dev_err(&pdev
->dev
, "missing platform resources data\n");
1159 vpdma
->base
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1161 dev_err(&pdev
->dev
, "failed to ioremap\n");
1165 r
= vpdma_load_firmware(vpdma
);
1167 pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE
);
1173 EXPORT_SYMBOL(vpdma_create
);
1175 MODULE_AUTHOR("Texas Instruments Inc.");
1176 MODULE_FIRMWARE(VPDMA_FIRMWARE
);
1177 MODULE_LICENSE("GPL v2");