1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/kernel.h>
5 #include <linux/export.h>
6 #include <media/drv-intf/saa7146_vv.h>
8 static void calculate_output_format_register(struct saa7146_dev
* saa
, u32 palette
, u32
* clip_format
)
10 /* clear out the necessary bits */
11 *clip_format
&= 0x0000ffff;
12 /* set these bits new */
13 *clip_format
|= (( ((palette
&0xf00)>>8) << 30) | ((palette
&0x00f) << 24) | (((palette
&0x0f0)>>4) << 16));
16 static void calculate_hps_source_and_sync(struct saa7146_dev
*dev
, int source
, int sync
, u32
* hps_ctrl
)
18 *hps_ctrl
&= ~(MASK_30
| MASK_31
| MASK_28
);
19 *hps_ctrl
|= (source
<< 30) | (sync
<< 28);
22 static void calculate_hxo_and_hyo(struct saa7146_vv
*vv
, u32
* hps_h_scale
, u32
* hps_ctrl
)
26 hyo
= vv
->standard
->v_offset
;
27 hxo
= vv
->standard
->h_offset
;
29 *hps_h_scale
&= ~(MASK_B0
| 0xf00);
30 *hps_h_scale
|= (hxo
<< 0);
32 *hps_ctrl
&= ~(MASK_W0
| MASK_B2
);
33 *hps_ctrl
|= (hyo
<< 12);
36 /* helper functions for the calculation of the horizontal- and vertical
37 scaling registers, clip-format-register etc ...
38 these functions take pointers to the (most-likely read-out
39 original-values) and manipulate them according to the requested
43 /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
47 } hps_h_coeff_tab
[] = {
48 {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8},
49 {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8},
50 {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8},
51 {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8},
52 {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8},
53 {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8},
54 {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
55 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
56 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
57 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8},
58 {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8},
59 {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8},
60 {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16}
63 /* table of attenuation values for horizontal scaling */
64 static u8 h_attenuation
[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
66 /* calculate horizontal scale registers */
67 static int calculate_h_scale_registers(struct saa7146_dev
*dev
,
68 int in_x
, int out_x
, int flip_lr
,
69 u32
* hps_ctrl
, u32
* hps_v_gain
, u32
* hps_h_prescale
, u32
* hps_h_scale
)
71 /* horizontal prescaler */
72 u32 dcgx
= 0, xpsc
= 0, xacm
= 0, cxy
= 0, cxuv
= 0;
73 /* horizontal scaler */
74 u32 xim
= 0, xp
= 0, xsci
=0;
75 /* vertical scale & gain */
78 /* helper variables */
79 u32 h_atten
= 0, i
= 0;
85 /* mask out vanity-bit */
86 *hps_ctrl
&= ~MASK_29
;
88 /* calculate prescale-(xspc)-value: [n .. 1/2) : 1
100 /* if flip_lr-bit is set, number of pixels after
101 horizontal prescaling must be < 384 */
102 if ( 0 != flip_lr
) {
105 *hps_ctrl
|= MASK_29
;
107 while (in_x
/ xpsc
>= 384 )
110 /* if zooming is wanted, number of pixels after
111 horizontal prescaling must be < 768 */
113 while ( in_x
/ xpsc
>= 768 )
117 /* maximum prescale is 64 (p.69) */
124 /* set horizontal filter parameters (CXY = CXUV) */
125 cxy
= hps_h_coeff_tab
[min(xpsc
- 1, 63)].hps_coeff
;
128 /* calculate and set horizontal fine scale (xsci) */
130 /* bypass the horizontal scaler ? */
131 if ( (in_x
== out_x
) && ( 1 == xpsc
) )
134 xsci
= ( (1024 * in_x
) / (out_x
* xpsc
) ) + xpsc
;
136 /* set start phase for horizontal fine scale (xp) to 0 */
139 /* set xim, if we bypass the horizontal scaler */
145 /* if the prescaler is bypassed, enable horizontal
146 accumulation mode (xacm) and clear dcgx */
152 /* get best match in the table of attenuations
153 for horizontal scaling */
154 h_atten
= hps_h_coeff_tab
[min(xpsc
- 1, 63)].weight_sum
;
156 for (i
= 0; h_attenuation
[i
] != 0; i
++) {
157 if (h_attenuation
[i
] >= h_atten
)
164 /* the horizontal scaling increment controls the UV filter
165 to reduce the bandwidth to improve the display quality,
169 else if ( xsci
< 0x600)
171 else if ( xsci
< 0x680)
173 else if ( xsci
< 0x700)
179 *hps_v_gain
&= MASK_W0
|MASK_B2
;
180 *hps_v_gain
|= (pfuv
<< 24);
182 *hps_h_scale
&= ~(MASK_W1
| 0xf000);
183 *hps_h_scale
|= (xim
<< 31) | (xp
<< 24) | (xsci
<< 12);
185 *hps_h_prescale
|= (dcgx
<< 27) | ((xpsc
-1) << 18) | (xacm
<< 17) | (cxy
<< 8) | (cxuv
<< 0);
193 } hps_v_coeff_tab
[] = {
194 {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8},
195 {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16},
196 {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16},
197 {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32},
198 {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32},
199 {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32},
200 {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
201 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
202 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
203 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64},
204 {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64},
205 {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64},
206 {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128}
209 /* table of attenuation values for vertical scaling */
210 static u16 v_attenuation
[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
212 /* calculate vertical scale registers */
213 static int calculate_v_scale_registers(struct saa7146_dev
*dev
, enum v4l2_field field
,
214 int in_y
, int out_y
, u32
* hps_v_scale
, u32
* hps_v_gain
)
218 /* vertical scaling */
219 u32 yacm
= 0, ysci
= 0, yacl
= 0, ypo
= 0, ype
= 0;
220 /* vertical scale & gain */
221 u32 dcgy
= 0, cya_cyb
= 0;
223 /* helper variables */
224 u32 v_atten
= 0, i
= 0;
226 /* error, if vertical zooming */
227 if ( in_y
< out_y
) {
231 /* linear phase interpolation may be used
232 if scaling is between 1 and 1/2 (both fields used)
233 or scaling is between 1/2 and 1/4 (if only one field is used) */
235 if (V4L2_FIELD_HAS_BOTH(field
)) {
236 if( 2*out_y
>= in_y
) {
239 } else if (field
== V4L2_FIELD_TOP
240 || field
== V4L2_FIELD_ALTERNATE
241 || field
== V4L2_FIELD_BOTTOM
) {
242 if( 4*out_y
>= in_y
) {
253 /* calculate scaling increment */
255 ysci
= ((1024 * in_y
) / (out_y
+ 1)) - 1024;
261 /* calculate ype and ypo */
263 ypo
= ype
+ (ysci
/ 64);
268 /* calculate scaling increment */
269 ysci
= (((10 * 1024 * (in_y
- out_y
- 1)) / in_y
) + 9) / 10;
271 /* calculate ype and ypo */
272 ypo
= ype
= ((ysci
+ 15) / 16);
274 /* the sequence length interval (yacl) has to be set according
275 to the prescale value, e.g. [n .. 1/2) : 0
282 yacl
= ( ysci
/ (1024 - ysci
) );
285 /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
286 cya_cyb
= hps_v_coeff_tab
[min(yacl
, 63)].hps_coeff
;
288 /* get best match in the table of attenuations for vertical scaling */
289 v_atten
= hps_v_coeff_tab
[min(yacl
, 63)].weight_sum
;
291 for (i
= 0; v_attenuation
[i
] != 0; i
++) {
292 if (v_attenuation
[i
] >= v_atten
)
299 /* ypo and ype swapped in spec ? */
300 *hps_v_scale
|= (yacm
<< 31) | (ysci
<< 21) | (yacl
<< 15) | (ypo
<< 8 ) | (ype
<< 1);
302 *hps_v_gain
&= ~(MASK_W0
|MASK_B2
);
303 *hps_v_gain
|= (dcgy
<< 16) | (cya_cyb
<< 0);
308 /* simple bubble-sort algorithm with duplicate elimination */
309 static void saa7146_set_window(struct saa7146_dev
*dev
, int width
, int height
, enum v4l2_field field
)
311 struct saa7146_vv
*vv
= dev
->vv_data
;
313 int source
= vv
->current_hps_source
;
314 int sync
= vv
->current_hps_sync
;
316 u32 hps_v_scale
= 0, hps_v_gain
= 0, hps_ctrl
= 0, hps_h_prescale
= 0, hps_h_scale
= 0;
318 /* set vertical scale */
319 hps_v_scale
= 0; /* all bits get set by the function-call */
320 hps_v_gain
= 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/
321 calculate_v_scale_registers(dev
, field
, vv
->standard
->v_field
*2, height
, &hps_v_scale
, &hps_v_gain
);
323 /* set horizontal scale */
325 hps_h_prescale
= 0; /* all bits get set in the function */
327 calculate_h_scale_registers(dev
, vv
->standard
->h_pixels
, width
, vv
->hflip
, &hps_ctrl
, &hps_v_gain
, &hps_h_prescale
, &hps_h_scale
);
329 /* set hyo and hxo */
330 calculate_hxo_and_hyo(vv
, &hps_h_scale
, &hps_ctrl
);
331 calculate_hps_source_and_sync(dev
, source
, sync
, &hps_ctrl
);
333 /* write out new register contents */
334 saa7146_write(dev
, HPS_V_SCALE
, hps_v_scale
);
335 saa7146_write(dev
, HPS_V_GAIN
, hps_v_gain
);
336 saa7146_write(dev
, HPS_CTRL
, hps_ctrl
);
337 saa7146_write(dev
, HPS_H_PRESCALE
,hps_h_prescale
);
338 saa7146_write(dev
, HPS_H_SCALE
, hps_h_scale
);
340 /* upload shadow-ram registers */
341 saa7146_write(dev
, MC2
, (MASK_05
| MASK_06
| MASK_21
| MASK_22
) );
344 static void saa7146_set_output_format(struct saa7146_dev
*dev
, unsigned long palette
)
346 u32 clip_format
= saa7146_read(dev
, CLIP_FORMAT_CTRL
);
348 /* call helper function */
349 calculate_output_format_register(dev
,palette
,&clip_format
);
351 /* update the hps registers */
352 saa7146_write(dev
, CLIP_FORMAT_CTRL
, clip_format
);
353 saa7146_write(dev
, MC2
, (MASK_05
| MASK_21
));
356 /* select input-source */
357 void saa7146_set_hps_source_and_sync(struct saa7146_dev
*dev
, int source
, int sync
)
359 struct saa7146_vv
*vv
= dev
->vv_data
;
363 hps_ctrl
= saa7146_read(dev
, HPS_CTRL
);
365 hps_ctrl
&= ~( MASK_31
| MASK_30
| MASK_28
);
366 hps_ctrl
|= (source
<< 30) | (sync
<< 28);
368 /* write back & upload register */
369 saa7146_write(dev
, HPS_CTRL
, hps_ctrl
);
370 saa7146_write(dev
, MC2
, (MASK_05
| MASK_21
));
372 vv
->current_hps_source
= source
;
373 vv
->current_hps_sync
= sync
;
375 EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync
);
377 void saa7146_write_out_dma(struct saa7146_dev
* dev
, int which
, struct saa7146_video_dma
* vdma
)
381 if( which
< 1 || which
> 3) {
385 /* calculate starting address */
386 where
= (which
-1)*0x18;
388 saa7146_write(dev
, where
, vdma
->base_odd
);
389 saa7146_write(dev
, where
+0x04, vdma
->base_even
);
390 saa7146_write(dev
, where
+0x08, vdma
->prot_addr
);
391 saa7146_write(dev
, where
+0x0c, vdma
->pitch
);
392 saa7146_write(dev
, where
+0x10, vdma
->base_page
);
393 saa7146_write(dev
, where
+0x14, vdma
->num_line_byte
);
396 saa7146_write(dev
, MC2
, (MASK_02
<<(which
-1))|(MASK_18
<<(which
-1)));
398 printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even);
399 printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd);
400 printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr);
401 printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page);
402 printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch);
403 printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte);
407 static int calculate_video_dma_grab_packed(struct saa7146_dev
* dev
, struct saa7146_buf
*buf
)
409 struct saa7146_vv
*vv
= dev
->vv_data
;
410 struct v4l2_pix_format
*pix
= &vv
->video_fmt
;
411 struct saa7146_video_dma vdma1
;
412 struct saa7146_format
*sfmt
= saa7146_format_by_fourcc(dev
, pix
->pixelformat
);
414 int width
= pix
->width
;
415 int height
= pix
->height
;
416 int bytesperline
= pix
->bytesperline
;
417 enum v4l2_field field
= pix
->field
;
419 int depth
= sfmt
->depth
;
421 DEB_CAP("[size=%dx%d,fields=%s]\n",
422 width
, height
, v4l2_field_names
[field
]);
424 if( bytesperline
!= 0) {
425 vdma1
.pitch
= bytesperline
*2;
427 vdma1
.pitch
= (width
*depth
*2)/8;
429 vdma1
.num_line_byte
= ((vv
->standard
->v_field
<<16) + vv
->standard
->h_pixels
);
430 vdma1
.base_page
= buf
->pt
[0].dma
| ME1
| sfmt
->swap
;
432 if( 0 != vv
->vflip
) {
433 vdma1
.prot_addr
= buf
->pt
[0].offset
;
434 vdma1
.base_even
= buf
->pt
[0].offset
+(vdma1
.pitch
/2)*height
;
435 vdma1
.base_odd
= vdma1
.base_even
- (vdma1
.pitch
/2);
437 vdma1
.base_even
= buf
->pt
[0].offset
;
438 vdma1
.base_odd
= vdma1
.base_even
+ (vdma1
.pitch
/2);
439 vdma1
.prot_addr
= buf
->pt
[0].offset
+(vdma1
.pitch
/2)*height
;
442 if (V4L2_FIELD_HAS_BOTH(field
)) {
443 } else if (field
== V4L2_FIELD_ALTERNATE
) {
445 if ( vv
->last_field
== V4L2_FIELD_TOP
) {
446 vdma1
.base_odd
= vdma1
.prot_addr
;
448 } else if ( vv
->last_field
== V4L2_FIELD_BOTTOM
) {
449 vdma1
.base_odd
= vdma1
.base_even
;
450 vdma1
.base_even
= vdma1
.prot_addr
;
453 } else if (field
== V4L2_FIELD_TOP
) {
454 vdma1
.base_odd
= vdma1
.prot_addr
;
456 } else if (field
== V4L2_FIELD_BOTTOM
) {
457 vdma1
.base_odd
= vdma1
.base_even
;
458 vdma1
.base_even
= vdma1
.prot_addr
;
462 if( 0 != vv
->vflip
) {
466 saa7146_write_out_dma(dev
, 1, &vdma1
);
470 static int calc_planar_422(struct saa7146_vv
*vv
, struct saa7146_buf
*buf
, struct saa7146_video_dma
*vdma2
, struct saa7146_video_dma
*vdma3
)
472 struct v4l2_pix_format
*pix
= &vv
->video_fmt
;
473 int height
= pix
->height
;
474 int width
= pix
->width
;
476 vdma2
->pitch
= width
;
477 vdma3
->pitch
= width
;
479 /* fixme: look at bytesperline! */
481 if( 0 != vv
->vflip
) {
482 vdma2
->prot_addr
= buf
->pt
[1].offset
;
483 vdma2
->base_even
= ((vdma2
->pitch
/2)*height
)+buf
->pt
[1].offset
;
484 vdma2
->base_odd
= vdma2
->base_even
- (vdma2
->pitch
/2);
486 vdma3
->prot_addr
= buf
->pt
[2].offset
;
487 vdma3
->base_even
= ((vdma3
->pitch
/2)*height
)+buf
->pt
[2].offset
;
488 vdma3
->base_odd
= vdma3
->base_even
- (vdma3
->pitch
/2);
490 vdma3
->base_even
= buf
->pt
[2].offset
;
491 vdma3
->base_odd
= vdma3
->base_even
+ (vdma3
->pitch
/2);
492 vdma3
->prot_addr
= (vdma3
->pitch
/2)*height
+buf
->pt
[2].offset
;
494 vdma2
->base_even
= buf
->pt
[1].offset
;
495 vdma2
->base_odd
= vdma2
->base_even
+ (vdma2
->pitch
/2);
496 vdma2
->prot_addr
= (vdma2
->pitch
/2)*height
+buf
->pt
[1].offset
;
502 static int calc_planar_420(struct saa7146_vv
*vv
, struct saa7146_buf
*buf
, struct saa7146_video_dma
*vdma2
, struct saa7146_video_dma
*vdma3
)
504 struct v4l2_pix_format
*pix
= &vv
->video_fmt
;
505 int height
= pix
->height
;
506 int width
= pix
->width
;
508 vdma2
->pitch
= width
/2;
509 vdma3
->pitch
= width
/2;
511 if( 0 != vv
->vflip
) {
512 vdma2
->prot_addr
= buf
->pt
[2].offset
;
513 vdma2
->base_even
= ((vdma2
->pitch
/2)*height
)+buf
->pt
[2].offset
;
514 vdma2
->base_odd
= vdma2
->base_even
- (vdma2
->pitch
/2);
516 vdma3
->prot_addr
= buf
->pt
[1].offset
;
517 vdma3
->base_even
= ((vdma3
->pitch
/2)*height
)+buf
->pt
[1].offset
;
518 vdma3
->base_odd
= vdma3
->base_even
- (vdma3
->pitch
/2);
521 vdma3
->base_even
= buf
->pt
[2].offset
;
522 vdma3
->base_odd
= vdma3
->base_even
+ (vdma3
->pitch
);
523 vdma3
->prot_addr
= (vdma3
->pitch
/2)*height
+buf
->pt
[2].offset
;
525 vdma2
->base_even
= buf
->pt
[1].offset
;
526 vdma2
->base_odd
= vdma2
->base_even
+ (vdma2
->pitch
);
527 vdma2
->prot_addr
= (vdma2
->pitch
/2)*height
+buf
->pt
[1].offset
;
532 static int calculate_video_dma_grab_planar(struct saa7146_dev
* dev
, struct saa7146_buf
*buf
)
534 struct saa7146_vv
*vv
= dev
->vv_data
;
535 struct v4l2_pix_format
*pix
= &vv
->video_fmt
;
536 struct saa7146_video_dma vdma1
;
537 struct saa7146_video_dma vdma2
;
538 struct saa7146_video_dma vdma3
;
539 struct saa7146_format
*sfmt
= saa7146_format_by_fourcc(dev
, pix
->pixelformat
);
541 int width
= pix
->width
;
542 int height
= pix
->height
;
543 enum v4l2_field field
= pix
->field
;
545 if (WARN_ON(!buf
->pt
[0].dma
) ||
546 WARN_ON(!buf
->pt
[1].dma
) ||
547 WARN_ON(!buf
->pt
[2].dma
))
550 DEB_CAP("[size=%dx%d,fields=%s]\n",
551 width
, height
, v4l2_field_names
[field
]);
553 /* fixme: look at bytesperline! */
555 /* fixme: what happens for user space buffers here?. The offsets are
556 most likely wrong, this version here only works for page-aligned
557 buffers, modifications to the pagetable-functions are necessary...*/
559 vdma1
.pitch
= width
*2;
560 vdma1
.num_line_byte
= ((vv
->standard
->v_field
<<16) + vv
->standard
->h_pixels
);
561 vdma1
.base_page
= buf
->pt
[0].dma
| ME1
;
563 if( 0 != vv
->vflip
) {
564 vdma1
.prot_addr
= buf
->pt
[0].offset
;
565 vdma1
.base_even
= ((vdma1
.pitch
/2)*height
)+buf
->pt
[0].offset
;
566 vdma1
.base_odd
= vdma1
.base_even
- (vdma1
.pitch
/2);
568 vdma1
.base_even
= buf
->pt
[0].offset
;
569 vdma1
.base_odd
= vdma1
.base_even
+ (vdma1
.pitch
/2);
570 vdma1
.prot_addr
= (vdma1
.pitch
/2)*height
+buf
->pt
[0].offset
;
573 vdma2
.num_line_byte
= 0; /* unused */
574 vdma2
.base_page
= buf
->pt
[1].dma
| ME1
;
576 vdma3
.num_line_byte
= 0; /* unused */
577 vdma3
.base_page
= buf
->pt
[2].dma
| ME1
;
579 switch( sfmt
->depth
) {
581 calc_planar_420(vv
,buf
,&vdma2
,&vdma3
);
585 calc_planar_422(vv
,buf
,&vdma2
,&vdma3
);
593 if (V4L2_FIELD_HAS_BOTH(field
)) {
594 } else if (field
== V4L2_FIELD_ALTERNATE
) {
596 vdma1
.base_odd
= vdma1
.prot_addr
;
598 vdma2
.base_odd
= vdma2
.prot_addr
;
600 vdma3
.base_odd
= vdma3
.prot_addr
;
602 } else if (field
== V4L2_FIELD_TOP
) {
603 vdma1
.base_odd
= vdma1
.prot_addr
;
605 vdma2
.base_odd
= vdma2
.prot_addr
;
607 vdma3
.base_odd
= vdma3
.prot_addr
;
609 } else if (field
== V4L2_FIELD_BOTTOM
) {
610 vdma1
.base_odd
= vdma1
.base_even
;
611 vdma1
.base_even
= vdma1
.prot_addr
;
613 vdma2
.base_odd
= vdma2
.base_even
;
614 vdma2
.base_even
= vdma2
.prot_addr
;
616 vdma3
.base_odd
= vdma3
.base_even
;
617 vdma3
.base_even
= vdma3
.prot_addr
;
621 if( 0 != vv
->vflip
) {
627 saa7146_write_out_dma(dev
, 1, &vdma1
);
628 if( (sfmt
->flags
& FORMAT_BYTE_SWAP
) != 0 ) {
629 saa7146_write_out_dma(dev
, 3, &vdma2
);
630 saa7146_write_out_dma(dev
, 2, &vdma3
);
632 saa7146_write_out_dma(dev
, 2, &vdma2
);
633 saa7146_write_out_dma(dev
, 3, &vdma3
);
638 static void program_capture_engine(struct saa7146_dev
*dev
, int planar
)
640 struct saa7146_vv
*vv
= dev
->vv_data
;
643 unsigned long e_wait
= vv
->current_hps_sync
== SAA7146_HPS_SYNC_PORT_A
? CMD_E_FID_A
: CMD_E_FID_B
;
644 unsigned long o_wait
= vv
->current_hps_sync
== SAA7146_HPS_SYNC_PORT_A
? CMD_O_FID_A
: CMD_O_FID_B
;
646 /* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/
647 WRITE_RPS0(CMD_PAUSE
| CMD_OAN
| CMD_SIG0
| o_wait
);
648 WRITE_RPS0(CMD_PAUSE
| CMD_OAN
| CMD_SIG0
| e_wait
);
650 /* set rps register 0 */
651 WRITE_RPS0(CMD_WR_REG
| (1 << 8) | (MC2
/4));
652 WRITE_RPS0(MASK_27
| MASK_11
);
654 /* turn on video-dma1 */
655 WRITE_RPS0(CMD_WR_REG_MASK
| (MC1
/4));
656 WRITE_RPS0(MASK_06
| MASK_22
); /* => mask */
657 WRITE_RPS0(MASK_06
| MASK_22
); /* => values */
659 /* turn on video-dma2 */
660 WRITE_RPS0(CMD_WR_REG_MASK
| (MC1
/4));
661 WRITE_RPS0(MASK_05
| MASK_21
); /* => mask */
662 WRITE_RPS0(MASK_05
| MASK_21
); /* => values */
664 /* turn on video-dma3 */
665 WRITE_RPS0(CMD_WR_REG_MASK
| (MC1
/4));
666 WRITE_RPS0(MASK_04
| MASK_20
); /* => mask */
667 WRITE_RPS0(MASK_04
| MASK_20
); /* => values */
670 /* wait for o_fid_a/b / e_fid_a/b toggle */
671 if ( vv
->last_field
== V4L2_FIELD_INTERLACED
) {
672 WRITE_RPS0(CMD_PAUSE
| o_wait
);
673 WRITE_RPS0(CMD_PAUSE
| e_wait
);
674 } else if ( vv
->last_field
== V4L2_FIELD_TOP
) {
675 WRITE_RPS0(CMD_PAUSE
| (vv
->current_hps_sync
== SAA7146_HPS_SYNC_PORT_A
? MASK_10
: MASK_09
));
676 WRITE_RPS0(CMD_PAUSE
| o_wait
);
677 } else if ( vv
->last_field
== V4L2_FIELD_BOTTOM
) {
678 WRITE_RPS0(CMD_PAUSE
| (vv
->current_hps_sync
== SAA7146_HPS_SYNC_PORT_A
? MASK_10
: MASK_09
));
679 WRITE_RPS0(CMD_PAUSE
| e_wait
);
682 /* turn off video-dma1 */
683 WRITE_RPS0(CMD_WR_REG_MASK
| (MC1
/4));
684 WRITE_RPS0(MASK_22
| MASK_06
); /* => mask */
685 WRITE_RPS0(MASK_22
); /* => values */
687 /* turn off video-dma2 */
688 WRITE_RPS0(CMD_WR_REG_MASK
| (MC1
/4));
689 WRITE_RPS0(MASK_05
| MASK_21
); /* => mask */
690 WRITE_RPS0(MASK_21
); /* => values */
692 /* turn off video-dma3 */
693 WRITE_RPS0(CMD_WR_REG_MASK
| (MC1
/4));
694 WRITE_RPS0(MASK_04
| MASK_20
); /* => mask */
695 WRITE_RPS0(MASK_20
); /* => values */
698 /* generate interrupt */
699 WRITE_RPS0(CMD_INTERRUPT
);
702 WRITE_RPS0(CMD_STOP
);
705 /* disable clipping */
706 static void saa7146_disable_clipping(struct saa7146_dev
*dev
)
708 u32 clip_format
= saa7146_read(dev
, CLIP_FORMAT_CTRL
);
710 /* mask out relevant bits (=lower word)*/
711 clip_format
&= MASK_W1
;
713 /* upload clipping-registers*/
714 saa7146_write(dev
, CLIP_FORMAT_CTRL
, clip_format
);
715 saa7146_write(dev
, MC2
, (MASK_05
| MASK_21
));
717 /* disable video dma2 */
718 saa7146_write(dev
, MC1
, MASK_21
);
721 void saa7146_set_capture(struct saa7146_dev
*dev
, struct saa7146_buf
*buf
, struct saa7146_buf
*next
)
723 struct saa7146_vv
*vv
= dev
->vv_data
;
724 struct v4l2_pix_format
*pix
= &vv
->video_fmt
;
725 struct saa7146_format
*sfmt
= saa7146_format_by_fourcc(dev
, pix
->pixelformat
);
728 DEB_CAP("buf:%p, next:%p\n", buf
, next
);
730 vdma1_prot_addr
= saa7146_read(dev
, PROT_ADDR1
);
731 if( 0 == vdma1_prot_addr
) {
732 /* clear out beginning of streaming bit (rps register 0)*/
733 DEB_CAP("forcing sync to new frame\n");
734 saa7146_write(dev
, MC2
, MASK_27
);
737 saa7146_set_window(dev
, pix
->width
, pix
->height
, pix
->field
);
738 saa7146_set_output_format(dev
, sfmt
->trans
);
739 saa7146_disable_clipping(dev
);
741 if ( vv
->last_field
== V4L2_FIELD_INTERLACED
) {
742 } else if ( vv
->last_field
== V4L2_FIELD_TOP
) {
743 vv
->last_field
= V4L2_FIELD_BOTTOM
;
744 } else if ( vv
->last_field
== V4L2_FIELD_BOTTOM
) {
745 vv
->last_field
= V4L2_FIELD_TOP
;
748 if( 0 != IS_PLANAR(sfmt
->trans
)) {
749 calculate_video_dma_grab_planar(dev
, buf
);
750 program_capture_engine(dev
,1);
752 calculate_video_dma_grab_packed(dev
, buf
);
753 program_capture_engine(dev
,0);
757 printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
758 printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
759 printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
760 printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
761 printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1));
762 printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
763 printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1));
766 /* write the address of the rps-program */
767 saa7146_write(dev
, RPS_ADDR0
, dev
->d_rps0
.dma_handle
);
770 saa7146_write(dev
, MC1
, (MASK_12
| MASK_28
));