1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * The Hardware Video Scaler (HVS) is the piece of hardware that does
10 * translation, scaling, colorspace conversion, and compositing of
11 * pixels stored in framebuffers into a FIFO of pixels going out to
12 * the Pixel Valve (CRTC). It operates at the system clock rate (the
13 * system audio clock gate, specifically), which is much higher than
14 * the pixel clock rate.
16 * There is a single global HVS, with multiple output FIFOs that can
17 * be consumed by the PVs. This file just manages the resources for
18 * the HVS, while the vc4_crtc.c code actually drives HVS setup for
22 #include <linux/bitfield.h>
23 #include <linux/clk.h>
24 #include <linux/component.h>
25 #include <linux/platform_device.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_drv.h>
29 #include <drm/drm_vblank.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
36 static const struct debugfs_reg32 vc4_hvs_regs
[] = {
37 VC4_REG32(SCALER_DISPCTRL
),
38 VC4_REG32(SCALER_DISPSTAT
),
39 VC4_REG32(SCALER_DISPID
),
40 VC4_REG32(SCALER_DISPECTRL
),
41 VC4_REG32(SCALER_DISPPROF
),
42 VC4_REG32(SCALER_DISPDITHER
),
43 VC4_REG32(SCALER_DISPEOLN
),
44 VC4_REG32(SCALER_DISPLIST0
),
45 VC4_REG32(SCALER_DISPLIST1
),
46 VC4_REG32(SCALER_DISPLIST2
),
47 VC4_REG32(SCALER_DISPLSTAT
),
48 VC4_REG32(SCALER_DISPLACT0
),
49 VC4_REG32(SCALER_DISPLACT1
),
50 VC4_REG32(SCALER_DISPLACT2
),
51 VC4_REG32(SCALER_DISPCTRL0
),
52 VC4_REG32(SCALER_DISPBKGND0
),
53 VC4_REG32(SCALER_DISPSTAT0
),
54 VC4_REG32(SCALER_DISPBASE0
),
55 VC4_REG32(SCALER_DISPCTRL1
),
56 VC4_REG32(SCALER_DISPBKGND1
),
57 VC4_REG32(SCALER_DISPSTAT1
),
58 VC4_REG32(SCALER_DISPBASE1
),
59 VC4_REG32(SCALER_DISPCTRL2
),
60 VC4_REG32(SCALER_DISPBKGND2
),
61 VC4_REG32(SCALER_DISPSTAT2
),
62 VC4_REG32(SCALER_DISPBASE2
),
63 VC4_REG32(SCALER_DISPALPHA2
),
64 VC4_REG32(SCALER_OLEDOFFS
),
65 VC4_REG32(SCALER_OLEDCOEF0
),
66 VC4_REG32(SCALER_OLEDCOEF1
),
67 VC4_REG32(SCALER_OLEDCOEF2
),
70 void vc4_hvs_dump_state(struct vc4_hvs
*hvs
)
72 struct drm_device
*drm
= &hvs
->vc4
->base
;
73 struct drm_printer p
= drm_info_printer(&hvs
->pdev
->dev
);
76 if (!drm_dev_enter(drm
, &idx
))
79 drm_print_regset32(&p
, &hvs
->regset
);
81 DRM_INFO("HVS ctx:\n");
82 for (i
= 0; i
< 64; i
+= 4) {
83 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
84 i
* 4, i
< HVS_BOOTLOADER_DLIST_END
? "B" : "D",
85 readl((u32 __iomem
*)hvs
->dlist
+ i
+ 0),
86 readl((u32 __iomem
*)hvs
->dlist
+ i
+ 1),
87 readl((u32 __iomem
*)hvs
->dlist
+ i
+ 2),
88 readl((u32 __iomem
*)hvs
->dlist
+ i
+ 3));
94 static int vc4_hvs_debugfs_underrun(struct seq_file
*m
, void *data
)
96 struct drm_debugfs_entry
*entry
= m
->private;
97 struct drm_device
*dev
= entry
->dev
;
98 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
99 struct drm_printer p
= drm_seq_file_printer(m
);
101 drm_printf(&p
, "%d\n", atomic_read(&vc4
->underrun
));
106 static int vc4_hvs_debugfs_dlist(struct seq_file
*m
, void *data
)
108 struct drm_debugfs_entry
*entry
= m
->private;
109 struct drm_device
*dev
= entry
->dev
;
110 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
111 struct vc4_hvs
*hvs
= vc4
->hvs
;
112 struct drm_printer p
= drm_seq_file_printer(m
);
113 unsigned int dlist_mem_size
= hvs
->dlist_mem_size
;
114 unsigned int next_entry_start
;
116 u32 dlist_word
, dispstat
;
118 for (i
= 0; i
< SCALER_CHANNELS_COUNT
; i
++) {
119 dispstat
= VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(i
)),
120 SCALER_DISPSTATX_MODE
);
121 if (dispstat
== SCALER_DISPSTATX_MODE_DISABLED
||
122 dispstat
== SCALER_DISPSTATX_MODE_EOF
) {
123 drm_printf(&p
, "HVS chan %u disabled\n", i
);
127 drm_printf(&p
, "HVS chan %u:\n", i
);
128 next_entry_start
= 0;
130 for (j
= HVS_READ(SCALER_DISPLISTX(i
)); j
< dlist_mem_size
; j
++) {
131 dlist_word
= readl((u32 __iomem
*)vc4
->hvs
->dlist
+ j
);
132 drm_printf(&p
, "dlist: %02d: 0x%08x\n", j
,
134 if (!next_entry_start
||
135 next_entry_start
== j
) {
136 if (dlist_word
& SCALER_CTL0_END
)
138 next_entry_start
= j
+
139 VC4_GET_FIELD(dlist_word
,
148 /* The filter kernel is composed of dwords each containing 3 9-bit
149 * signed integers packed next to each other.
151 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
152 #define VC4_PPF_FILTER_WORD(c0, c1, c2) \
153 ((((c0) & 0x1ff) << 0) | \
154 (((c1) & 0x1ff) << 9) | \
155 (((c2) & 0x1ff) << 18))
157 /* The whole filter kernel is arranged as the coefficients 0-16 going
158 * up, then a pad, then 17-31 going down and reversed within the
159 * dwords. This means that a linear phase kernel (where it's
160 * symmetrical at the boundary between 15 and 16) has the last 5
161 * dwords matching the first 5, but reversed.
163 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \
164 c9, c10, c11, c12, c13, c14, c15) \
165 {VC4_PPF_FILTER_WORD(c0, c1, c2), \
166 VC4_PPF_FILTER_WORD(c3, c4, c5), \
167 VC4_PPF_FILTER_WORD(c6, c7, c8), \
168 VC4_PPF_FILTER_WORD(c9, c10, c11), \
169 VC4_PPF_FILTER_WORD(c12, c13, c14), \
170 VC4_PPF_FILTER_WORD(c15, c15, 0)}
172 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
173 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
175 /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
176 * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
178 static const u32 mitchell_netravali_1_3_1_3_kernel
[] =
179 VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
180 50, 82, 119, 155, 187, 213, 227);
182 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs
*hvs
,
183 struct drm_mm_node
*space
,
187 u32 __iomem
*dst_kernel
;
190 * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit()
191 * here since that function is only called from vc4_hvs_bind().
194 ret
= drm_mm_insert_node(&hvs
->dlist_mm
, space
, VC4_KERNEL_DWORDS
);
196 drm_err(&hvs
->vc4
->base
, "Failed to allocate space for filter kernel: %d\n",
201 dst_kernel
= hvs
->dlist
+ space
->start
;
203 for (i
= 0; i
< VC4_KERNEL_DWORDS
; i
++) {
204 if (i
< VC4_LINEAR_PHASE_KERNEL_DWORDS
)
205 writel(kernel
[i
], &dst_kernel
[i
]);
207 writel(kernel
[VC4_KERNEL_DWORDS
- i
- 1],
215 static void vc4_hvs_lut_load(struct vc4_hvs
*hvs
,
216 struct vc4_crtc
*vc4_crtc
)
218 struct drm_device
*drm
= &hvs
->vc4
->base
;
219 struct drm_crtc
*crtc
= &vc4_crtc
->base
;
220 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(crtc
->state
);
224 if (!drm_dev_enter(drm
, &idx
))
227 if (hvs
->vc4
->gen
!= VC4_GEN_4
)
230 /* The LUT memory is laid out with each HVS channel in order,
231 * each of which takes 256 writes for R, 256 for G, then 256
234 HVS_WRITE(SCALER_GAMADDR
,
235 SCALER_GAMADDR_AUTOINC
|
236 (vc4_state
->assigned_channel
* 3 * crtc
->gamma_size
));
238 for (i
= 0; i
< crtc
->gamma_size
; i
++)
239 HVS_WRITE(SCALER_GAMDATA
, vc4_crtc
->lut_r
[i
]);
240 for (i
= 0; i
< crtc
->gamma_size
; i
++)
241 HVS_WRITE(SCALER_GAMDATA
, vc4_crtc
->lut_g
[i
]);
242 for (i
= 0; i
< crtc
->gamma_size
; i
++)
243 HVS_WRITE(SCALER_GAMDATA
, vc4_crtc
->lut_b
[i
]);
249 static void vc4_hvs_update_gamma_lut(struct vc4_hvs
*hvs
,
250 struct vc4_crtc
*vc4_crtc
)
252 struct drm_crtc_state
*crtc_state
= vc4_crtc
->base
.state
;
253 struct drm_color_lut
*lut
= crtc_state
->gamma_lut
->data
;
254 u32 length
= drm_color_lut_size(crtc_state
->gamma_lut
);
257 for (i
= 0; i
< length
; i
++) {
258 vc4_crtc
->lut_r
[i
] = drm_color_lut_extract(lut
[i
].red
, 8);
259 vc4_crtc
->lut_g
[i
] = drm_color_lut_extract(lut
[i
].green
, 8);
260 vc4_crtc
->lut_b
[i
] = drm_color_lut_extract(lut
[i
].blue
, 8);
263 vc4_hvs_lut_load(hvs
, vc4_crtc
);
266 u8
vc4_hvs_get_fifo_frame_count(struct vc4_hvs
*hvs
, unsigned int fifo
)
268 struct drm_device
*drm
= &hvs
->vc4
->base
;
272 if (!drm_dev_enter(drm
, &idx
))
277 field
= VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1
),
278 SCALER_DISPSTAT1_FRCNT0
);
281 field
= VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1
),
282 SCALER_DISPSTAT1_FRCNT1
);
285 field
= VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2
),
286 SCALER_DISPSTAT2_FRCNT2
);
294 int vc4_hvs_get_fifo_from_output(struct vc4_hvs
*hvs
, unsigned int output
)
296 struct vc4_dev
*vc4
= hvs
->vc4
;
306 * NOTE: We should probably use
307 * drm_dev_enter()/drm_dev_exit() here, but this
308 * function is only used during the DRM device
309 * initialization, so we should be fine.
320 reg
= HVS_READ(SCALER_DISPECTRL
);
321 ret
= FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK
, reg
);
328 reg
= HVS_READ(SCALER_DISPCTRL
);
329 ret
= FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK
, reg
);
336 reg
= HVS_READ(SCALER_DISPEOLN
);
337 ret
= FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK
, reg
);
344 reg
= HVS_READ(SCALER_DISPDITHER
);
345 ret
= FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK
, reg
);
360 static int vc4_hvs_init_channel(struct vc4_hvs
*hvs
, struct drm_crtc
*crtc
,
361 struct drm_display_mode
*mode
, bool oneshot
)
363 struct vc4_dev
*vc4
= hvs
->vc4
;
364 struct drm_device
*drm
= &vc4
->base
;
365 struct vc4_crtc
*vc4_crtc
= to_vc4_crtc(crtc
);
366 struct vc4_crtc_state
*vc4_crtc_state
= to_vc4_crtc_state(crtc
->state
);
367 unsigned int chan
= vc4_crtc_state
->assigned_channel
;
368 bool interlace
= mode
->flags
& DRM_MODE_FLAG_INTERLACE
;
373 if (!drm_dev_enter(drm
, &idx
))
376 HVS_WRITE(SCALER_DISPCTRLX(chan
), 0);
377 HVS_WRITE(SCALER_DISPCTRLX(chan
), SCALER_DISPCTRLX_RESET
);
378 HVS_WRITE(SCALER_DISPCTRLX(chan
), 0);
380 /* Turn on the scaler, which will wait for vstart to start
382 * When feeding the transposer, we should operate in oneshot
385 dispctrl
= SCALER_DISPCTRLX_ENABLE
;
386 dispbkgndx
= HVS_READ(SCALER_DISPBKGNDX(chan
));
388 if (vc4
->gen
== VC4_GEN_4
) {
389 dispctrl
|= VC4_SET_FIELD(mode
->hdisplay
,
390 SCALER_DISPCTRLX_WIDTH
) |
391 VC4_SET_FIELD(mode
->vdisplay
,
392 SCALER_DISPCTRLX_HEIGHT
) |
393 (oneshot
? SCALER_DISPCTRLX_ONESHOT
: 0);
394 dispbkgndx
|= SCALER_DISPBKGND_AUTOHS
;
396 dispctrl
|= VC4_SET_FIELD(mode
->hdisplay
,
397 SCALER5_DISPCTRLX_WIDTH
) |
398 VC4_SET_FIELD(mode
->vdisplay
,
399 SCALER5_DISPCTRLX_HEIGHT
) |
400 (oneshot
? SCALER5_DISPCTRLX_ONESHOT
: 0);
401 dispbkgndx
&= ~SCALER5_DISPBKGND_BCK2BCK
;
404 HVS_WRITE(SCALER_DISPCTRLX(chan
), dispctrl
);
406 dispbkgndx
&= ~SCALER_DISPBKGND_GAMMA
;
407 dispbkgndx
&= ~SCALER_DISPBKGND_INTERLACE
;
409 HVS_WRITE(SCALER_DISPBKGNDX(chan
), dispbkgndx
|
410 ((vc4
->gen
== VC4_GEN_4
) ? SCALER_DISPBKGND_GAMMA
: 0) |
411 (interlace
? SCALER_DISPBKGND_INTERLACE
: 0));
413 /* Reload the LUT, since the SRAMs would have been disabled if
414 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
416 vc4_hvs_lut_load(hvs
, vc4_crtc
);
423 void vc4_hvs_stop_channel(struct vc4_hvs
*hvs
, unsigned int chan
)
425 struct drm_device
*drm
= &hvs
->vc4
->base
;
428 if (!drm_dev_enter(drm
, &idx
))
431 if (!(HVS_READ(SCALER_DISPCTRLX(chan
)) & SCALER_DISPCTRLX_ENABLE
))
434 HVS_WRITE(SCALER_DISPCTRLX(chan
), SCALER_DISPCTRLX_RESET
);
435 HVS_WRITE(SCALER_DISPCTRLX(chan
), 0);
437 /* Once we leave, the scaler should be disabled and its fifo empty. */
438 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan
)) & SCALER_DISPCTRLX_RESET
);
440 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan
)),
441 SCALER_DISPSTATX_MODE
) !=
442 SCALER_DISPSTATX_MODE_DISABLED
);
444 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan
)) &
445 (SCALER_DISPSTATX_FULL
| SCALER_DISPSTATX_EMPTY
)) !=
446 SCALER_DISPSTATX_EMPTY
);
452 int vc4_hvs_atomic_check(struct drm_crtc
*crtc
, struct drm_atomic_state
*state
)
454 struct drm_crtc_state
*crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
455 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(crtc_state
);
456 struct drm_device
*dev
= crtc
->dev
;
457 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
458 struct drm_plane
*plane
;
460 const struct drm_plane_state
*plane_state
;
464 /* The pixelvalve can only feed one encoder (and encoders are
465 * 1:1 with connectors.)
467 if (hweight32(crtc_state
->connector_mask
) > 1)
470 drm_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
471 u32 plane_dlist_count
= vc4_plane_dlist_size(plane_state
);
473 drm_dbg_driver(dev
, "[CRTC:%d:%s] Found [PLANE:%d:%s] with DLIST size: %u\n",
474 crtc
->base
.id
, crtc
->name
,
475 plane
->base
.id
, plane
->name
,
478 dlist_count
+= plane_dlist_count
;
481 dlist_count
++; /* Account for SCALER_CTL0_END. */
483 drm_dbg_driver(dev
, "[CRTC:%d:%s] Allocating DLIST block with size: %u\n",
484 crtc
->base
.id
, crtc
->name
, dlist_count
);
485 spin_lock_irqsave(&vc4
->hvs
->mm_lock
, flags
);
486 ret
= drm_mm_insert_node(&vc4
->hvs
->dlist_mm
, &vc4_state
->mm
,
488 spin_unlock_irqrestore(&vc4
->hvs
->mm_lock
, flags
);
490 drm_err(dev
, "Failed to allocate DLIST entry: %d\n", ret
);
497 static void vc4_hvs_install_dlist(struct drm_crtc
*crtc
)
499 struct drm_device
*dev
= crtc
->dev
;
500 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
501 struct vc4_hvs
*hvs
= vc4
->hvs
;
502 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(crtc
->state
);
505 if (!drm_dev_enter(dev
, &idx
))
508 HVS_WRITE(SCALER_DISPLISTX(vc4_state
->assigned_channel
),
509 vc4_state
->mm
.start
);
514 static void vc4_hvs_update_dlist(struct drm_crtc
*crtc
)
516 struct drm_device
*dev
= crtc
->dev
;
517 struct vc4_crtc
*vc4_crtc
= to_vc4_crtc(crtc
);
518 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(crtc
->state
);
521 if (crtc
->state
->event
) {
522 crtc
->state
->event
->pipe
= drm_crtc_index(crtc
);
524 WARN_ON(drm_crtc_vblank_get(crtc
) != 0);
526 spin_lock_irqsave(&dev
->event_lock
, flags
);
528 if (!vc4_crtc
->feeds_txp
|| vc4_state
->txp_armed
) {
529 vc4_crtc
->event
= crtc
->state
->event
;
530 crtc
->state
->event
= NULL
;
533 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
536 spin_lock_irqsave(&vc4_crtc
->irq_lock
, flags
);
537 vc4_crtc
->current_dlist
= vc4_state
->mm
.start
;
538 spin_unlock_irqrestore(&vc4_crtc
->irq_lock
, flags
);
541 void vc4_hvs_atomic_begin(struct drm_crtc
*crtc
,
542 struct drm_atomic_state
*state
)
544 struct vc4_crtc
*vc4_crtc
= to_vc4_crtc(crtc
);
545 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(crtc
->state
);
548 spin_lock_irqsave(&vc4_crtc
->irq_lock
, flags
);
549 vc4_crtc
->current_hvs_channel
= vc4_state
->assigned_channel
;
550 spin_unlock_irqrestore(&vc4_crtc
->irq_lock
, flags
);
553 void vc4_hvs_atomic_enable(struct drm_crtc
*crtc
,
554 struct drm_atomic_state
*state
)
556 struct drm_device
*dev
= crtc
->dev
;
557 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
558 struct drm_display_mode
*mode
= &crtc
->state
->adjusted_mode
;
559 struct vc4_crtc
*vc4_crtc
= to_vc4_crtc(crtc
);
560 bool oneshot
= vc4_crtc
->feeds_txp
;
562 vc4_hvs_install_dlist(crtc
);
563 vc4_hvs_update_dlist(crtc
);
564 vc4_hvs_init_channel(vc4
->hvs
, crtc
, mode
, oneshot
);
567 void vc4_hvs_atomic_disable(struct drm_crtc
*crtc
,
568 struct drm_atomic_state
*state
)
570 struct drm_device
*dev
= crtc
->dev
;
571 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
572 struct drm_crtc_state
*old_state
= drm_atomic_get_old_crtc_state(state
, crtc
);
573 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(old_state
);
574 unsigned int chan
= vc4_state
->assigned_channel
;
576 vc4_hvs_stop_channel(vc4
->hvs
, chan
);
579 void vc4_hvs_atomic_flush(struct drm_crtc
*crtc
,
580 struct drm_atomic_state
*state
)
582 struct drm_crtc_state
*old_state
= drm_atomic_get_old_crtc_state(state
,
584 struct drm_device
*dev
= crtc
->dev
;
585 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
586 struct vc4_hvs
*hvs
= vc4
->hvs
;
587 struct vc4_crtc
*vc4_crtc
= to_vc4_crtc(crtc
);
588 struct vc4_crtc_state
*vc4_state
= to_vc4_crtc_state(crtc
->state
);
589 unsigned int channel
= vc4_state
->assigned_channel
;
590 struct drm_plane
*plane
;
591 struct vc4_plane_state
*vc4_plane_state
;
592 bool debug_dump_regs
= false;
593 bool enable_bg_fill
= false;
594 u32 __iomem
*dlist_start
= vc4
->hvs
->dlist
+ vc4_state
->mm
.start
;
595 u32 __iomem
*dlist_next
= dlist_start
;
596 unsigned int zpos
= 0;
600 if (!drm_dev_enter(dev
, &idx
)) {
601 vc4_crtc_send_vblank(crtc
);
605 if (vc4_state
->assigned_channel
== VC4_HVS_CHANNEL_DISABLED
)
608 if (debug_dump_regs
) {
609 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc
));
610 vc4_hvs_dump_state(hvs
);
613 /* Copy all the active planes' dlist contents to the hardware dlist. */
617 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
618 if (plane
->state
->normalized_zpos
!= zpos
)
621 /* Is this the first active plane? */
622 if (dlist_next
== dlist_start
) {
623 /* We need to enable background fill when a plane
624 * could be alpha blending from the background, i.e.
625 * where no other plane is underneath. It suffices to
626 * consider the first active plane here since we set
627 * needs_bg_fill such that either the first plane
628 * already needs it or all planes on top blend from
629 * the first or a lower plane.
631 vc4_plane_state
= to_vc4_plane_state(plane
->state
);
632 enable_bg_fill
= vc4_plane_state
->needs_bg_fill
;
635 dlist_next
+= vc4_plane_write_dlist(plane
, dlist_next
);
643 writel(SCALER_CTL0_END
, dlist_next
);
646 WARN_ON_ONCE(dlist_next
- dlist_start
!= vc4_state
->mm
.size
);
649 /* This sets a black background color fill, as is the case
650 * with other DRM drivers.
652 HVS_WRITE(SCALER_DISPBKGNDX(channel
),
653 HVS_READ(SCALER_DISPBKGNDX(channel
)) |
654 SCALER_DISPBKGND_FILL
);
656 /* Only update DISPLIST if the CRTC was already running and is not
658 * vc4_crtc_enable() takes care of updating the dlist just after
659 * re-enabling VBLANK interrupts and before enabling the engine.
660 * If the CRTC is being disabled, there's no point in updating this
663 if (crtc
->state
->active
&& old_state
->active
) {
664 vc4_hvs_install_dlist(crtc
);
665 vc4_hvs_update_dlist(crtc
);
668 if (crtc
->state
->color_mgmt_changed
) {
669 u32 dispbkgndx
= HVS_READ(SCALER_DISPBKGNDX(channel
));
671 if (crtc
->state
->gamma_lut
) {
672 vc4_hvs_update_gamma_lut(hvs
, vc4_crtc
);
673 dispbkgndx
|= SCALER_DISPBKGND_GAMMA
;
675 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step
676 * in hardware, which is the same as a linear lut that
677 * DRM expects us to use in absence of a user lut.
679 dispbkgndx
&= ~SCALER_DISPBKGND_GAMMA
;
681 HVS_WRITE(SCALER_DISPBKGNDX(channel
), dispbkgndx
);
684 if (debug_dump_regs
) {
685 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc
));
686 vc4_hvs_dump_state(hvs
);
693 void vc4_hvs_mask_underrun(struct vc4_hvs
*hvs
, int channel
)
695 struct vc4_dev
*vc4
= hvs
->vc4
;
696 struct drm_device
*drm
= &vc4
->base
;
700 if (!drm_dev_enter(drm
, &idx
))
703 dispctrl
= HVS_READ(SCALER_DISPCTRL
);
704 dispctrl
&= ~((vc4
->gen
== VC4_GEN_5
) ?
705 SCALER5_DISPCTRL_DSPEISLUR(channel
) :
706 SCALER_DISPCTRL_DSPEISLUR(channel
));
708 HVS_WRITE(SCALER_DISPCTRL
, dispctrl
);
713 void vc4_hvs_unmask_underrun(struct vc4_hvs
*hvs
, int channel
)
715 struct vc4_dev
*vc4
= hvs
->vc4
;
716 struct drm_device
*drm
= &vc4
->base
;
720 if (!drm_dev_enter(drm
, &idx
))
723 dispctrl
= HVS_READ(SCALER_DISPCTRL
);
724 dispctrl
|= ((vc4
->gen
== VC4_GEN_5
) ?
725 SCALER5_DISPCTRL_DSPEISLUR(channel
) :
726 SCALER_DISPCTRL_DSPEISLUR(channel
));
728 HVS_WRITE(SCALER_DISPSTAT
,
729 SCALER_DISPSTAT_EUFLOW(channel
));
730 HVS_WRITE(SCALER_DISPCTRL
, dispctrl
);
735 static void vc4_hvs_report_underrun(struct drm_device
*dev
)
737 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
739 atomic_inc(&vc4
->underrun
);
740 DRM_DEV_ERROR(dev
->dev
, "HVS underrun\n");
743 static irqreturn_t
vc4_hvs_irq_handler(int irq
, void *data
)
745 struct drm_device
*dev
= data
;
746 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
747 struct vc4_hvs
*hvs
= vc4
->hvs
;
748 irqreturn_t irqret
= IRQ_NONE
;
755 * NOTE: We don't need to protect the register access using
756 * drm_dev_enter() there because the interrupt handler lifetime
757 * is tied to the device itself, and not to the DRM device.
759 * So when the device will be gone, one of the first thing we
760 * will be doing will be to unregister the interrupt handler,
761 * and then unregister the DRM device. drm_dev_enter() would
762 * thus always succeed if we are here.
765 status
= HVS_READ(SCALER_DISPSTAT
);
766 control
= HVS_READ(SCALER_DISPCTRL
);
768 for (channel
= 0; channel
< SCALER_CHANNELS_COUNT
; channel
++) {
769 dspeislur
= (vc4
->gen
== VC4_GEN_5
) ?
770 SCALER5_DISPCTRL_DSPEISLUR(channel
) :
771 SCALER_DISPCTRL_DSPEISLUR(channel
);
773 /* Interrupt masking is not always honored, so check it here. */
774 if (status
& SCALER_DISPSTAT_EUFLOW(channel
) &&
775 control
& dspeislur
) {
776 vc4_hvs_mask_underrun(hvs
, channel
);
777 vc4_hvs_report_underrun(dev
);
779 irqret
= IRQ_HANDLED
;
783 /* Clear every per-channel interrupt flag. */
784 HVS_WRITE(SCALER_DISPSTAT
, SCALER_DISPSTAT_IRQMASK(0) |
785 SCALER_DISPSTAT_IRQMASK(1) |
786 SCALER_DISPSTAT_IRQMASK(2));
791 int vc4_hvs_debugfs_init(struct drm_minor
*minor
)
793 struct drm_device
*drm
= minor
->dev
;
794 struct vc4_dev
*vc4
= to_vc4_dev(drm
);
795 struct vc4_hvs
*hvs
= vc4
->hvs
;
800 if (vc4
->gen
== VC4_GEN_4
)
801 debugfs_create_bool("hvs_load_tracker", S_IRUGO
| S_IWUSR
,
803 &vc4
->load_tracker_enabled
);
805 drm_debugfs_add_file(drm
, "hvs_dlists", vc4_hvs_debugfs_dlist
, NULL
);
807 drm_debugfs_add_file(drm
, "hvs_underrun", vc4_hvs_debugfs_underrun
, NULL
);
809 vc4_debugfs_add_regset32(drm
, "hvs_regs", &hvs
->regset
);
814 struct vc4_hvs
*__vc4_hvs_alloc(struct vc4_dev
*vc4
,
816 struct platform_device
*pdev
)
818 struct drm_device
*drm
= &vc4
->base
;
821 hvs
= drmm_kzalloc(drm
, sizeof(*hvs
), GFP_KERNEL
);
823 return ERR_PTR(-ENOMEM
);
829 spin_lock_init(&hvs
->mm_lock
);
831 /* Set up the HVS display list memory manager. We never
832 * overwrite the setup from the bootloader (just 128b out of
833 * our 16K), since we don't want to scramble the screen when
834 * transitioning from the firmware's boot setup to runtime.
836 hvs
->dlist_mem_size
= (SCALER_DLIST_SIZE
>> 2) - HVS_BOOTLOADER_DLIST_END
;
837 drm_mm_init(&hvs
->dlist_mm
,
838 HVS_BOOTLOADER_DLIST_END
,
839 hvs
->dlist_mem_size
);
841 /* Set up the HVS LBM memory manager. We could have some more
842 * complicated data structure that allowed reuse of LBM areas
843 * between planes when they don't overlap on the screen, but
844 * for now we just allocate globally.
846 if (vc4
->gen
== VC4_GEN_4
)
847 /* 48k words of 2x12-bit pixels */
848 drm_mm_init(&hvs
->lbm_mm
, 0, 48 * 1024);
850 /* 60k words of 4x12-bit pixels */
851 drm_mm_init(&hvs
->lbm_mm
, 0, 60 * 1024);
858 static int vc4_hvs_hw_init(struct vc4_hvs
*hvs
)
860 struct vc4_dev
*vc4
= hvs
->vc4
;
863 dispctrl
= HVS_READ(SCALER_DISPCTRL
);
864 dispctrl
|= SCALER_DISPCTRL_ENABLE
;
865 HVS_WRITE(SCALER_DISPCTRL
, dispctrl
);
867 reg
= HVS_READ(SCALER_DISPECTRL
);
868 reg
&= ~SCALER_DISPECTRL_DSP2_MUX_MASK
;
869 HVS_WRITE(SCALER_DISPECTRL
,
870 reg
| VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX
));
872 reg
= HVS_READ(SCALER_DISPCTRL
);
873 reg
&= ~SCALER_DISPCTRL_DSP3_MUX_MASK
;
874 HVS_WRITE(SCALER_DISPCTRL
,
875 reg
| VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX
));
877 reg
= HVS_READ(SCALER_DISPEOLN
);
878 reg
&= ~SCALER_DISPEOLN_DSP4_MUX_MASK
;
879 HVS_WRITE(SCALER_DISPEOLN
,
880 reg
| VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX
));
882 reg
= HVS_READ(SCALER_DISPDITHER
);
883 reg
&= ~SCALER_DISPDITHER_DSP5_MUX_MASK
;
884 HVS_WRITE(SCALER_DISPDITHER
,
885 reg
| VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX
));
887 dispctrl
= HVS_READ(SCALER_DISPCTRL
);
888 dispctrl
|= SCALER_DISPCTRL_DISPEIRQ(0) |
889 SCALER_DISPCTRL_DISPEIRQ(1) |
890 SCALER_DISPCTRL_DISPEIRQ(2);
892 if (vc4
->gen
== VC4_GEN_4
)
893 dispctrl
&= ~(SCALER_DISPCTRL_DMAEIRQ
|
894 SCALER_DISPCTRL_SLVWREIRQ
|
895 SCALER_DISPCTRL_SLVRDEIRQ
|
896 SCALER_DISPCTRL_DSPEIEOF(0) |
897 SCALER_DISPCTRL_DSPEIEOF(1) |
898 SCALER_DISPCTRL_DSPEIEOF(2) |
899 SCALER_DISPCTRL_DSPEIEOLN(0) |
900 SCALER_DISPCTRL_DSPEIEOLN(1) |
901 SCALER_DISPCTRL_DSPEIEOLN(2) |
902 SCALER_DISPCTRL_DSPEISLUR(0) |
903 SCALER_DISPCTRL_DSPEISLUR(1) |
904 SCALER_DISPCTRL_DSPEISLUR(2) |
905 SCALER_DISPCTRL_SCLEIRQ
);
907 dispctrl
&= ~(SCALER_DISPCTRL_DMAEIRQ
|
908 SCALER5_DISPCTRL_SLVEIRQ
|
909 SCALER5_DISPCTRL_DSPEIEOF(0) |
910 SCALER5_DISPCTRL_DSPEIEOF(1) |
911 SCALER5_DISPCTRL_DSPEIEOF(2) |
912 SCALER5_DISPCTRL_DSPEIEOLN(0) |
913 SCALER5_DISPCTRL_DSPEIEOLN(1) |
914 SCALER5_DISPCTRL_DSPEIEOLN(2) |
915 SCALER5_DISPCTRL_DSPEISLUR(0) |
916 SCALER5_DISPCTRL_DSPEISLUR(1) |
917 SCALER5_DISPCTRL_DSPEISLUR(2) |
918 SCALER_DISPCTRL_SCLEIRQ
);
921 /* Set AXI panic mode.
922 * VC4 panics when < 2 lines in FIFO.
923 * VC5 panics when less than 1 line in the FIFO.
925 dispctrl
&= ~(SCALER_DISPCTRL_PANIC0_MASK
|
926 SCALER_DISPCTRL_PANIC1_MASK
|
927 SCALER_DISPCTRL_PANIC2_MASK
);
928 dispctrl
|= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0
);
929 dispctrl
|= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1
);
930 dispctrl
|= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2
);
932 /* Set AXI panic mode.
933 * VC4 panics when < 2 lines in FIFO.
934 * VC5 panics when less than 1 line in the FIFO.
936 dispctrl
&= ~(SCALER_DISPCTRL_PANIC0_MASK
|
937 SCALER_DISPCTRL_PANIC1_MASK
|
938 SCALER_DISPCTRL_PANIC2_MASK
);
939 dispctrl
|= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0
);
940 dispctrl
|= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1
);
941 dispctrl
|= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2
);
943 HVS_WRITE(SCALER_DISPCTRL
, dispctrl
);
948 static int vc4_hvs_cob_init(struct vc4_hvs
*hvs
)
950 struct vc4_dev
*vc4
= hvs
->vc4
;
954 * Recompute Composite Output Buffer (COB) allocations for the
959 /* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
960 * The bottom 2048 pixels are full 32bpp RGBA (intended for the
961 * TXP composing RGBA to memory), whilst the remainder are only
964 * Assign 3 lines to channels 1 & 2, and just over 4 lines to
967 #define VC4_COB_SIZE 20736
968 #define VC4_COB_LINE_WIDTH 2048
969 #define VC4_COB_NUM_LINES 3
971 top
= VC4_COB_LINE_WIDTH
* VC4_COB_NUM_LINES
;
972 reg
|= (top
- 1) << 16;
973 HVS_WRITE(SCALER_DISPBASE2
, reg
);
975 top
+= VC4_COB_LINE_WIDTH
* VC4_COB_NUM_LINES
;
976 reg
|= (top
- 1) << 16;
977 HVS_WRITE(SCALER_DISPBASE1
, reg
);
980 reg
|= (top
- 1) << 16;
981 HVS_WRITE(SCALER_DISPBASE0
, reg
);
985 /* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
986 * The bottom 4096 pixels are full RGBA (intended for the TXP
987 * composing RGBA to memory), whilst the remainder are only
988 * RGB. Addressing is always pixel wide.
990 * Assign 3 lines of 4096 to channels 1 & 2, and just over 4
991 * lines. to channel 0.
993 #define VC5_COB_SIZE 44416
994 #define VC5_COB_LINE_WIDTH 4096
995 #define VC5_COB_NUM_LINES 3
997 top
= VC5_COB_LINE_WIDTH
* VC5_COB_NUM_LINES
;
999 HVS_WRITE(SCALER_DISPBASE2
, reg
);
1002 top
+= VC5_COB_LINE_WIDTH
* VC5_COB_NUM_LINES
;
1004 HVS_WRITE(SCALER_DISPBASE1
, reg
);
1009 HVS_WRITE(SCALER_DISPBASE0
, reg
);
1019 static int vc4_hvs_bind(struct device
*dev
, struct device
*master
, void *data
)
1021 struct platform_device
*pdev
= to_platform_device(dev
);
1022 struct drm_device
*drm
= dev_get_drvdata(master
);
1023 struct vc4_dev
*vc4
= to_vc4_dev(drm
);
1024 struct vc4_hvs
*hvs
= NULL
;
1028 regs
= vc4_ioremap_regs(pdev
, 0);
1030 return PTR_ERR(regs
);
1032 hvs
= __vc4_hvs_alloc(vc4
, regs
, pdev
);
1034 return PTR_ERR(hvs
);
1036 hvs
->regset
.base
= hvs
->regs
;
1037 hvs
->regset
.regs
= vc4_hvs_regs
;
1038 hvs
->regset
.nregs
= ARRAY_SIZE(vc4_hvs_regs
);
1040 if (vc4
->gen
== VC4_GEN_5
) {
1041 struct rpi_firmware
*firmware
;
1042 struct device_node
*node
;
1043 unsigned int max_rate
;
1045 node
= rpi_firmware_find_node();
1049 firmware
= rpi_firmware_get(node
);
1052 return -EPROBE_DEFER
;
1054 hvs
->core_clk
= devm_clk_get(&pdev
->dev
, NULL
);
1055 if (IS_ERR(hvs
->core_clk
)) {
1056 dev_err(&pdev
->dev
, "Couldn't get core clock\n");
1057 return PTR_ERR(hvs
->core_clk
);
1060 max_rate
= rpi_firmware_clk_get_max_rate(firmware
,
1061 RPI_FIRMWARE_CORE_CLK_ID
);
1062 rpi_firmware_put(firmware
);
1063 if (max_rate
>= 550000000)
1064 hvs
->vc5_hdmi_enable_hdmi_20
= true;
1066 if (max_rate
>= 600000000)
1067 hvs
->vc5_hdmi_enable_4096by2160
= true;
1069 hvs
->max_core_rate
= max_rate
;
1071 ret
= clk_prepare_enable(hvs
->core_clk
);
1073 dev_err(&pdev
->dev
, "Couldn't enable the core clock\n");
1078 if (vc4
->gen
== VC4_GEN_4
)
1079 hvs
->dlist
= hvs
->regs
+ SCALER_DLIST_START
;
1081 hvs
->dlist
= hvs
->regs
+ SCALER5_DLIST_START
;
1083 ret
= vc4_hvs_hw_init(hvs
);
1087 /* Upload filter kernels. We only have the one for now, so we
1088 * keep it around for the lifetime of the driver.
1090 ret
= vc4_hvs_upload_linear_kernel(hvs
,
1091 &hvs
->mitchell_netravali_filter
,
1092 mitchell_netravali_1_3_1_3_kernel
);
1096 ret
= vc4_hvs_cob_init(hvs
);
1100 ret
= devm_request_irq(dev
, platform_get_irq(pdev
, 0),
1101 vc4_hvs_irq_handler
, 0, "vc4 hvs", drm
);
1108 static void vc4_hvs_unbind(struct device
*dev
, struct device
*master
,
1111 struct drm_device
*drm
= dev_get_drvdata(master
);
1112 struct vc4_dev
*vc4
= to_vc4_dev(drm
);
1113 struct vc4_hvs
*hvs
= vc4
->hvs
;
1114 struct drm_mm_node
*node
, *next
;
1116 if (drm_mm_node_allocated(&vc4
->hvs
->mitchell_netravali_filter
))
1117 drm_mm_remove_node(&vc4
->hvs
->mitchell_netravali_filter
);
1119 drm_mm_for_each_node_safe(node
, next
, &vc4
->hvs
->dlist_mm
)
1120 drm_mm_remove_node(node
);
1122 drm_mm_takedown(&vc4
->hvs
->dlist_mm
);
1124 drm_mm_for_each_node_safe(node
, next
, &vc4
->hvs
->lbm_mm
)
1125 drm_mm_remove_node(node
);
1126 drm_mm_takedown(&vc4
->hvs
->lbm_mm
);
1128 clk_disable_unprepare(hvs
->core_clk
);
1133 static const struct component_ops vc4_hvs_ops
= {
1134 .bind
= vc4_hvs_bind
,
1135 .unbind
= vc4_hvs_unbind
,
1138 static int vc4_hvs_dev_probe(struct platform_device
*pdev
)
1140 return component_add(&pdev
->dev
, &vc4_hvs_ops
);
1143 static void vc4_hvs_dev_remove(struct platform_device
*pdev
)
1145 component_del(&pdev
->dev
, &vc4_hvs_ops
);
1148 static const struct of_device_id vc4_hvs_dt_match
[] = {
1149 { .compatible
= "brcm,bcm2711-hvs" },
1150 { .compatible
= "brcm,bcm2835-hvs" },
1154 struct platform_driver vc4_hvs_driver
= {
1155 .probe
= vc4_hvs_dev_probe
,
1156 .remove
= vc4_hvs_dev_remove
,
1159 .of_match_table
= vc4_hvs_dt_match
,