2 * vsp1_dl.h -- R-Car VSP1 Display List
4 * Copyright (C) 2015 Renesas Corporation
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/gfp.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
23 #define VSP1_DL_NUM_ENTRIES 256
25 #define VSP1_DLH_INT_ENABLE (1 << 1)
26 #define VSP1_DLH_AUTO_START (1 << 0)
28 struct vsp1_dl_header_list
{
31 } __attribute__((__packed__
));
33 struct vsp1_dl_header
{
35 struct vsp1_dl_header_list lists
[8];
38 } __attribute__((__packed__
));
40 struct vsp1_dl_entry
{
43 } __attribute__((__packed__
));
46 * struct vsp1_dl_body - Display list body
47 * @list: entry in the display list list of bodies
48 * @vsp1: the VSP1 device
49 * @entries: array of entries
50 * @dma: DMA address of the entries
51 * @size: size of the DMA memory in bytes
52 * @num_entries: number of stored entries
55 struct list_head list
;
56 struct vsp1_device
*vsp1
;
58 struct vsp1_dl_entry
*entries
;
62 unsigned int num_entries
;
66 * struct vsp1_dl_list - Display list
67 * @list: entry in the display list manager lists
68 * @dlm: the display list manager
69 * @header: display list header, NULL for headerless lists
70 * @dma: DMA address for the header
71 * @body0: first display list body
72 * @fragments: list of extra display list bodies
73 * @chain: entry in the display list partition chain
76 struct list_head list
;
77 struct vsp1_dl_manager
*dlm
;
79 struct vsp1_dl_header
*header
;
82 struct vsp1_dl_body body0
;
83 struct list_head fragments
;
86 struct list_head chain
;
91 VSP1_DL_MODE_HEADERLESS
,
95 * struct vsp1_dl_manager - Display List manager
96 * @index: index of the related WPF
97 * @mode: display list operation mode (header or headerless)
98 * @vsp1: the VSP1 device
99 * @lock: protects the free, active, queued, pending and gc_fragments lists
100 * @free: array of all free display lists
101 * @active: list currently being processed (loaded) by hardware
102 * @queued: list queued to the hardware (written to the DL registers)
103 * @pending: list waiting to be queued to the hardware
104 * @gc_work: fragments garbage collector work struct
105 * @gc_fragments: array of display list fragments waiting to be freed
107 struct vsp1_dl_manager
{
109 enum vsp1_dl_mode mode
;
110 struct vsp1_device
*vsp1
;
113 struct list_head free
;
114 struct vsp1_dl_list
*active
;
115 struct vsp1_dl_list
*queued
;
116 struct vsp1_dl_list
*pending
;
118 struct work_struct gc_work
;
119 struct list_head gc_fragments
;
122 /* -----------------------------------------------------------------------------
123 * Display List Body Management
127 * Initialize a display list body object and allocate DMA memory for the body
128 * data. The display list body object is expected to have been initialized to
131 static int vsp1_dl_body_init(struct vsp1_device
*vsp1
,
132 struct vsp1_dl_body
*dlb
, unsigned int num_entries
,
135 size_t size
= num_entries
* sizeof(*dlb
->entries
) + extra_size
;
140 dlb
->entries
= dma_alloc_wc(vsp1
->dev
, dlb
->size
, &dlb
->dma
,
149 * Cleanup a display list body and free allocated DMA memory allocated.
151 static void vsp1_dl_body_cleanup(struct vsp1_dl_body
*dlb
)
153 dma_free_wc(dlb
->vsp1
->dev
, dlb
->size
, dlb
->entries
, dlb
->dma
);
157 * vsp1_dl_fragment_alloc - Allocate a display list fragment
158 * @vsp1: The VSP1 device
159 * @num_entries: The maximum number of entries that the fragment can contain
161 * Allocate a display list fragment with enough memory to contain the requested
164 * Return a pointer to a fragment on success or NULL if memory can't be
167 struct vsp1_dl_body
*vsp1_dl_fragment_alloc(struct vsp1_device
*vsp1
,
168 unsigned int num_entries
)
170 struct vsp1_dl_body
*dlb
;
173 dlb
= kzalloc(sizeof(*dlb
), GFP_KERNEL
);
177 ret
= vsp1_dl_body_init(vsp1
, dlb
, num_entries
, 0);
187 * vsp1_dl_fragment_free - Free a display list fragment
190 * Free the given display list fragment and the associated DMA memory.
192 * Fragments must only be freed explicitly if they are not added to a display
193 * list, as the display list will take ownership of them and free them
194 * otherwise. Manual free typically happens at cleanup time for fragments that
195 * have been allocated but not used.
197 * Passing a NULL pointer to this function is safe, in that case no operation
200 void vsp1_dl_fragment_free(struct vsp1_dl_body
*dlb
)
205 vsp1_dl_body_cleanup(dlb
);
210 * vsp1_dl_fragment_write - Write a register to a display list fragment
212 * @reg: The register address
213 * @data: The register value
215 * Write the given register and value to the display list fragment. The maximum
216 * number of entries that can be written in a fragment is specified when the
217 * fragment is allocated by vsp1_dl_fragment_alloc().
219 void vsp1_dl_fragment_write(struct vsp1_dl_body
*dlb
, u32 reg
, u32 data
)
221 dlb
->entries
[dlb
->num_entries
].addr
= reg
;
222 dlb
->entries
[dlb
->num_entries
].data
= data
;
226 /* -----------------------------------------------------------------------------
227 * Display List Transaction Management
230 static struct vsp1_dl_list
*vsp1_dl_list_alloc(struct vsp1_dl_manager
*dlm
)
232 struct vsp1_dl_list
*dl
;
236 dl
= kzalloc(sizeof(*dl
), GFP_KERNEL
);
240 INIT_LIST_HEAD(&dl
->fragments
);
243 /* Initialize the display list body and allocate DMA memory for the body
244 * and the optional header. Both are allocated together to avoid memory
245 * fragmentation, with the header located right after the body in
248 header_size
= dlm
->mode
== VSP1_DL_MODE_HEADER
249 ? ALIGN(sizeof(struct vsp1_dl_header
), 8)
252 ret
= vsp1_dl_body_init(dlm
->vsp1
, &dl
->body0
, VSP1_DL_NUM_ENTRIES
,
259 if (dlm
->mode
== VSP1_DL_MODE_HEADER
) {
260 size_t header_offset
= VSP1_DL_NUM_ENTRIES
261 * sizeof(*dl
->body0
.entries
);
263 dl
->header
= ((void *)dl
->body0
.entries
) + header_offset
;
264 dl
->dma
= dl
->body0
.dma
+ header_offset
;
266 memset(dl
->header
, 0, sizeof(*dl
->header
));
267 dl
->header
->lists
[0].addr
= dl
->body0
.dma
;
273 static void vsp1_dl_list_free(struct vsp1_dl_list
*dl
)
275 vsp1_dl_body_cleanup(&dl
->body0
);
276 list_splice_init(&dl
->fragments
, &dl
->dlm
->gc_fragments
);
281 * vsp1_dl_list_get - Get a free display list
282 * @dlm: The display list manager
284 * Get a display list from the pool of free lists and return it.
286 * This function must be called without the display list manager lock held.
288 struct vsp1_dl_list
*vsp1_dl_list_get(struct vsp1_dl_manager
*dlm
)
290 struct vsp1_dl_list
*dl
= NULL
;
293 spin_lock_irqsave(&dlm
->lock
, flags
);
295 if (!list_empty(&dlm
->free
)) {
296 dl
= list_first_entry(&dlm
->free
, struct vsp1_dl_list
, list
);
300 * The display list chain must be initialised to ensure every
301 * display list can assert list_empty() if it is not in a chain.
303 INIT_LIST_HEAD(&dl
->chain
);
306 spin_unlock_irqrestore(&dlm
->lock
, flags
);
311 /* This function must be called with the display list manager lock held.*/
312 static void __vsp1_dl_list_put(struct vsp1_dl_list
*dl
)
314 struct vsp1_dl_list
*dl_child
;
320 * Release any linked display-lists which were chained for a single
321 * hardware operation.
324 list_for_each_entry(dl_child
, &dl
->chain
, chain
)
325 __vsp1_dl_list_put(dl_child
);
328 dl
->has_chain
= false;
331 * We can't free fragments here as DMA memory can only be freed in
332 * interruptible context. Move all fragments to the display list
333 * manager's list of fragments to be freed, they will be
334 * garbage-collected by the work queue.
336 if (!list_empty(&dl
->fragments
)) {
337 list_splice_init(&dl
->fragments
, &dl
->dlm
->gc_fragments
);
338 schedule_work(&dl
->dlm
->gc_work
);
341 dl
->body0
.num_entries
= 0;
343 list_add_tail(&dl
->list
, &dl
->dlm
->free
);
347 * vsp1_dl_list_put - Release a display list
348 * @dl: The display list
350 * Release the display list and return it to the pool of free lists.
352 * Passing a NULL pointer to this function is safe, in that case no operation
355 void vsp1_dl_list_put(struct vsp1_dl_list
*dl
)
362 spin_lock_irqsave(&dl
->dlm
->lock
, flags
);
363 __vsp1_dl_list_put(dl
);
364 spin_unlock_irqrestore(&dl
->dlm
->lock
, flags
);
368 * vsp1_dl_list_write - Write a register to the display list
369 * @dl: The display list
370 * @reg: The register address
371 * @data: The register value
373 * Write the given register and value to the display list. Up to 256 registers
374 * can be written per display list.
376 void vsp1_dl_list_write(struct vsp1_dl_list
*dl
, u32 reg
, u32 data
)
378 vsp1_dl_fragment_write(&dl
->body0
, reg
, data
);
382 * vsp1_dl_list_add_fragment - Add a fragment to the display list
383 * @dl: The display list
386 * Add a display list body as a fragment to a display list. Registers contained
387 * in fragments are processed after registers contained in the main display
388 * list, in the order in which fragments are added.
390 * Adding a fragment to a display list passes ownership of the fragment to the
391 * list. The caller must not touch the fragment after this call, and must not
392 * free it explicitly with vsp1_dl_fragment_free().
394 * Fragments are only usable for display lists in header mode. Attempt to
395 * add a fragment to a header-less display list will return an error.
397 int vsp1_dl_list_add_fragment(struct vsp1_dl_list
*dl
,
398 struct vsp1_dl_body
*dlb
)
400 /* Multi-body lists are only available in header mode. */
401 if (dl
->dlm
->mode
!= VSP1_DL_MODE_HEADER
)
404 list_add_tail(&dlb
->list
, &dl
->fragments
);
409 * vsp1_dl_list_add_chain - Add a display list to a chain
410 * @head: The head display list
411 * @dl: The new display list
413 * Add a display list to an existing display list chain. The chained lists
414 * will be automatically processed by the hardware without intervention from
415 * the CPU. A display list end interrupt will only complete after the last
416 * display list in the chain has completed processing.
418 * Adding a display list to a chain passes ownership of the display list to
419 * the head display list item. The chain is released when the head dl item is
420 * put back with __vsp1_dl_list_put().
422 * Chained display lists are only usable in header mode. Attempts to add a
423 * display list to a chain in header-less mode will return an error.
425 int vsp1_dl_list_add_chain(struct vsp1_dl_list
*head
,
426 struct vsp1_dl_list
*dl
)
428 /* Chained lists are only available in header mode. */
429 if (head
->dlm
->mode
!= VSP1_DL_MODE_HEADER
)
432 head
->has_chain
= true;
433 list_add_tail(&dl
->chain
, &head
->chain
);
437 static void vsp1_dl_list_fill_header(struct vsp1_dl_list
*dl
, bool is_last
)
439 struct vsp1_dl_header_list
*hdr
= dl
->header
->lists
;
440 struct vsp1_dl_body
*dlb
;
441 unsigned int num_lists
= 0;
444 * Fill the header with the display list bodies addresses and sizes. The
445 * address of the first body has already been filled when the display
446 * list was allocated.
449 hdr
->num_bytes
= dl
->body0
.num_entries
450 * sizeof(*dl
->header
->lists
);
452 list_for_each_entry(dlb
, &dl
->fragments
, list
) {
456 hdr
->addr
= dlb
->dma
;
457 hdr
->num_bytes
= dlb
->num_entries
458 * sizeof(*dl
->header
->lists
);
461 dl
->header
->num_lists
= num_lists
;
464 * If this display list's chain is not empty, we are on a list, where
465 * the next item in the list is the display list entity which should be
466 * automatically queued by the hardware.
468 if (!list_empty(&dl
->chain
) && !is_last
) {
469 struct vsp1_dl_list
*next
= list_next_entry(dl
, chain
);
471 dl
->header
->next_header
= next
->dma
;
472 dl
->header
->flags
= VSP1_DLH_AUTO_START
;
474 dl
->header
->flags
= VSP1_DLH_INT_ENABLE
;
478 void vsp1_dl_list_commit(struct vsp1_dl_list
*dl
)
480 struct vsp1_dl_manager
*dlm
= dl
->dlm
;
481 struct vsp1_device
*vsp1
= dlm
->vsp1
;
485 spin_lock_irqsave(&dlm
->lock
, flags
);
487 if (dl
->dlm
->mode
== VSP1_DL_MODE_HEADER
) {
488 struct vsp1_dl_list
*dl_child
;
491 * In header mode the caller guarantees that the hardware is
492 * idle at this point.
495 /* Fill the header for the head and chained display lists. */
496 vsp1_dl_list_fill_header(dl
, list_empty(&dl
->chain
));
498 list_for_each_entry(dl_child
, &dl
->chain
, chain
) {
499 bool last
= list_is_last(&dl_child
->chain
, &dl
->chain
);
501 vsp1_dl_list_fill_header(dl_child
, last
);
505 * Commit the head display list to hardware. Chained headers
508 vsp1_write(vsp1
, VI6_DL_HDR_ADDR(dlm
->index
), dl
->dma
);
514 /* Once the UPD bit has been set the hardware can start processing the
515 * display list at any time and we can't touch the address and size
516 * registers. In that case mark the update as pending, it will be
517 * queued up to the hardware by the frame end interrupt handler.
519 update
= !!(vsp1_read(vsp1
, VI6_DL_BODY_SIZE
) & VI6_DL_BODY_SIZE_UPD
);
521 __vsp1_dl_list_put(dlm
->pending
);
526 /* Program the hardware with the display list body address and size.
527 * The UPD bit will be cleared by the device when the display list is
530 vsp1_write(vsp1
, VI6_DL_HDR_ADDR(0), dl
->body0
.dma
);
531 vsp1_write(vsp1
, VI6_DL_BODY_SIZE
, VI6_DL_BODY_SIZE_UPD
|
532 (dl
->body0
.num_entries
* sizeof(*dl
->header
->lists
)));
534 __vsp1_dl_list_put(dlm
->queued
);
538 spin_unlock_irqrestore(&dlm
->lock
, flags
);
541 /* -----------------------------------------------------------------------------
542 * Display List Manager
545 /* Interrupt Handling */
546 void vsp1_dlm_irq_display_start(struct vsp1_dl_manager
*dlm
)
548 spin_lock(&dlm
->lock
);
550 /* The display start interrupt signals the end of the display list
551 * processing by the device. The active display list, if any, won't be
552 * accessed anymore and can be reused.
554 __vsp1_dl_list_put(dlm
->active
);
557 spin_unlock(&dlm
->lock
);
560 void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager
*dlm
)
562 struct vsp1_device
*vsp1
= dlm
->vsp1
;
564 spin_lock(&dlm
->lock
);
566 __vsp1_dl_list_put(dlm
->active
);
569 /* Header mode is used for mem-to-mem pipelines only. We don't need to
570 * perform any operation as there can't be any new display list queued
573 if (dlm
->mode
== VSP1_DL_MODE_HEADER
)
576 /* The UPD bit set indicates that the commit operation raced with the
577 * interrupt and occurred after the frame end event and UPD clear but
578 * before interrupt processing. The hardware hasn't taken the update
579 * into account yet, we'll thus skip one frame and retry.
581 if (vsp1_read(vsp1
, VI6_DL_BODY_SIZE
) & VI6_DL_BODY_SIZE_UPD
)
584 /* The device starts processing the queued display list right after the
585 * frame end interrupt. The display list thus becomes active.
588 dlm
->active
= dlm
->queued
;
592 /* Now that the UPD bit has been cleared we can queue the next display
593 * list to the hardware if one has been prepared.
596 struct vsp1_dl_list
*dl
= dlm
->pending
;
598 vsp1_write(vsp1
, VI6_DL_HDR_ADDR(0), dl
->body0
.dma
);
599 vsp1_write(vsp1
, VI6_DL_BODY_SIZE
, VI6_DL_BODY_SIZE_UPD
|
600 (dl
->body0
.num_entries
*
601 sizeof(*dl
->header
->lists
)));
608 spin_unlock(&dlm
->lock
);
612 void vsp1_dlm_setup(struct vsp1_device
*vsp1
)
614 u32 ctrl
= (256 << VI6_DL_CTRL_AR_WAIT_SHIFT
)
615 | VI6_DL_CTRL_DC2
| VI6_DL_CTRL_DC1
| VI6_DL_CTRL_DC0
618 /* The DRM pipeline operates with display lists in Continuous Frame
619 * Mode, all other pipelines use manual start.
622 ctrl
|= VI6_DL_CTRL_CFM0
| VI6_DL_CTRL_NH0
;
624 vsp1_write(vsp1
, VI6_DL_CTRL
, ctrl
);
625 vsp1_write(vsp1
, VI6_DL_SWAP
, VI6_DL_SWAP_LWS
);
628 void vsp1_dlm_reset(struct vsp1_dl_manager
*dlm
)
632 spin_lock_irqsave(&dlm
->lock
, flags
);
634 __vsp1_dl_list_put(dlm
->active
);
635 __vsp1_dl_list_put(dlm
->queued
);
636 __vsp1_dl_list_put(dlm
->pending
);
638 spin_unlock_irqrestore(&dlm
->lock
, flags
);
646 * Free all fragments awaiting to be garbage-collected.
648 * This function must be called without the display list manager lock held.
650 static void vsp1_dlm_fragments_free(struct vsp1_dl_manager
*dlm
)
654 spin_lock_irqsave(&dlm
->lock
, flags
);
656 while (!list_empty(&dlm
->gc_fragments
)) {
657 struct vsp1_dl_body
*dlb
;
659 dlb
= list_first_entry(&dlm
->gc_fragments
, struct vsp1_dl_body
,
661 list_del(&dlb
->list
);
663 spin_unlock_irqrestore(&dlm
->lock
, flags
);
664 vsp1_dl_fragment_free(dlb
);
665 spin_lock_irqsave(&dlm
->lock
, flags
);
668 spin_unlock_irqrestore(&dlm
->lock
, flags
);
671 static void vsp1_dlm_garbage_collect(struct work_struct
*work
)
673 struct vsp1_dl_manager
*dlm
=
674 container_of(work
, struct vsp1_dl_manager
, gc_work
);
676 vsp1_dlm_fragments_free(dlm
);
679 struct vsp1_dl_manager
*vsp1_dlm_create(struct vsp1_device
*vsp1
,
681 unsigned int prealloc
)
683 struct vsp1_dl_manager
*dlm
;
686 dlm
= devm_kzalloc(vsp1
->dev
, sizeof(*dlm
), GFP_KERNEL
);
691 dlm
->mode
= index
== 0 && !vsp1
->info
->uapi
692 ? VSP1_DL_MODE_HEADERLESS
: VSP1_DL_MODE_HEADER
;
695 spin_lock_init(&dlm
->lock
);
696 INIT_LIST_HEAD(&dlm
->free
);
697 INIT_LIST_HEAD(&dlm
->gc_fragments
);
698 INIT_WORK(&dlm
->gc_work
, vsp1_dlm_garbage_collect
);
700 for (i
= 0; i
< prealloc
; ++i
) {
701 struct vsp1_dl_list
*dl
;
703 dl
= vsp1_dl_list_alloc(dlm
);
707 list_add_tail(&dl
->list
, &dlm
->free
);
713 void vsp1_dlm_destroy(struct vsp1_dl_manager
*dlm
)
715 struct vsp1_dl_list
*dl
, *next
;
720 cancel_work_sync(&dlm
->gc_work
);
722 list_for_each_entry_safe(dl
, next
, &dlm
->free
, list
) {
724 vsp1_dl_list_free(dl
);
727 vsp1_dlm_fragments_free(dlm
);