Linux 4.16.11
[linux/fpc-iii.git] / drivers / media / platform / vsp1 / vsp1_dl.c
blob0b86ed01e85db433ad9e11e0271247854232983a
1 /*
2 * vsp1_dl.h -- R-Car VSP1 Display List
4 * Copyright (C) 2015 Renesas Corporation
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/gfp.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
20 #include "vsp1.h"
21 #include "vsp1_dl.h"
23 #define VSP1_DL_NUM_ENTRIES 256
25 #define VSP1_DLH_INT_ENABLE (1 << 1)
26 #define VSP1_DLH_AUTO_START (1 << 0)
28 struct vsp1_dl_header_list {
29 u32 num_bytes;
30 u32 addr;
31 } __attribute__((__packed__));
33 struct vsp1_dl_header {
34 u32 num_lists;
35 struct vsp1_dl_header_list lists[8];
36 u32 next_header;
37 u32 flags;
38 } __attribute__((__packed__));
40 struct vsp1_dl_entry {
41 u32 addr;
42 u32 data;
43 } __attribute__((__packed__));
45 /**
46 * struct vsp1_dl_body - Display list body
47 * @list: entry in the display list list of bodies
48 * @vsp1: the VSP1 device
49 * @entries: array of entries
50 * @dma: DMA address of the entries
51 * @size: size of the DMA memory in bytes
52 * @num_entries: number of stored entries
54 struct vsp1_dl_body {
55 struct list_head list;
56 struct vsp1_device *vsp1;
58 struct vsp1_dl_entry *entries;
59 dma_addr_t dma;
60 size_t size;
62 unsigned int num_entries;
65 /**
66 * struct vsp1_dl_list - Display list
67 * @list: entry in the display list manager lists
68 * @dlm: the display list manager
69 * @header: display list header, NULL for headerless lists
70 * @dma: DMA address for the header
71 * @body0: first display list body
72 * @fragments: list of extra display list bodies
73 * @has_chain: if true, indicates that there's a partition chain
74 * @chain: entry in the display list partition chain
76 struct vsp1_dl_list {
77 struct list_head list;
78 struct vsp1_dl_manager *dlm;
80 struct vsp1_dl_header *header;
81 dma_addr_t dma;
83 struct vsp1_dl_body body0;
84 struct list_head fragments;
86 bool has_chain;
87 struct list_head chain;
90 enum vsp1_dl_mode {
91 VSP1_DL_MODE_HEADER,
92 VSP1_DL_MODE_HEADERLESS,
95 /**
96 * struct vsp1_dl_manager - Display List manager
97 * @index: index of the related WPF
98 * @mode: display list operation mode (header or headerless)
99 * @singleshot: execute the display list in single-shot mode
100 * @vsp1: the VSP1 device
101 * @lock: protects the free, active, queued, pending and gc_fragments lists
102 * @free: array of all free display lists
103 * @active: list currently being processed (loaded) by hardware
104 * @queued: list queued to the hardware (written to the DL registers)
105 * @pending: list waiting to be queued to the hardware
106 * @gc_work: fragments garbage collector work struct
107 * @gc_fragments: array of display list fragments waiting to be freed
109 struct vsp1_dl_manager {
110 unsigned int index;
111 enum vsp1_dl_mode mode;
112 bool singleshot;
113 struct vsp1_device *vsp1;
115 spinlock_t lock;
116 struct list_head free;
117 struct vsp1_dl_list *active;
118 struct vsp1_dl_list *queued;
119 struct vsp1_dl_list *pending;
121 struct work_struct gc_work;
122 struct list_head gc_fragments;
125 /* -----------------------------------------------------------------------------
126 * Display List Body Management
130 * Initialize a display list body object and allocate DMA memory for the body
131 * data. The display list body object is expected to have been initialized to
132 * 0 when allocated.
134 static int vsp1_dl_body_init(struct vsp1_device *vsp1,
135 struct vsp1_dl_body *dlb, unsigned int num_entries,
136 size_t extra_size)
138 size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
140 dlb->vsp1 = vsp1;
141 dlb->size = size;
143 dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
144 GFP_KERNEL);
145 if (!dlb->entries)
146 return -ENOMEM;
148 return 0;
152 * Cleanup a display list body and free allocated DMA memory allocated.
154 static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
156 dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
160 * vsp1_dl_fragment_alloc - Allocate a display list fragment
161 * @vsp1: The VSP1 device
162 * @num_entries: The maximum number of entries that the fragment can contain
164 * Allocate a display list fragment with enough memory to contain the requested
165 * number of entries.
167 * Return a pointer to a fragment on success or NULL if memory can't be
168 * allocated.
170 struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
171 unsigned int num_entries)
173 struct vsp1_dl_body *dlb;
174 int ret;
176 dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
177 if (!dlb)
178 return NULL;
180 ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
181 if (ret < 0) {
182 kfree(dlb);
183 return NULL;
186 return dlb;
190 * vsp1_dl_fragment_free - Free a display list fragment
191 * @dlb: The fragment
193 * Free the given display list fragment and the associated DMA memory.
195 * Fragments must only be freed explicitly if they are not added to a display
196 * list, as the display list will take ownership of them and free them
197 * otherwise. Manual free typically happens at cleanup time for fragments that
198 * have been allocated but not used.
200 * Passing a NULL pointer to this function is safe, in that case no operation
201 * will be performed.
203 void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
205 if (!dlb)
206 return;
208 vsp1_dl_body_cleanup(dlb);
209 kfree(dlb);
213 * vsp1_dl_fragment_write - Write a register to a display list fragment
214 * @dlb: The fragment
215 * @reg: The register address
216 * @data: The register value
218 * Write the given register and value to the display list fragment. The maximum
219 * number of entries that can be written in a fragment is specified when the
220 * fragment is allocated by vsp1_dl_fragment_alloc().
222 void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
224 dlb->entries[dlb->num_entries].addr = reg;
225 dlb->entries[dlb->num_entries].data = data;
226 dlb->num_entries++;
229 /* -----------------------------------------------------------------------------
230 * Display List Transaction Management
233 static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
235 struct vsp1_dl_list *dl;
236 size_t header_size;
237 int ret;
239 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
240 if (!dl)
241 return NULL;
243 INIT_LIST_HEAD(&dl->fragments);
244 dl->dlm = dlm;
247 * Initialize the display list body and allocate DMA memory for the body
248 * and the optional header. Both are allocated together to avoid memory
249 * fragmentation, with the header located right after the body in
250 * memory.
252 header_size = dlm->mode == VSP1_DL_MODE_HEADER
253 ? ALIGN(sizeof(struct vsp1_dl_header), 8)
254 : 0;
256 ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
257 header_size);
258 if (ret < 0) {
259 kfree(dl);
260 return NULL;
263 if (dlm->mode == VSP1_DL_MODE_HEADER) {
264 size_t header_offset = VSP1_DL_NUM_ENTRIES
265 * sizeof(*dl->body0.entries);
267 dl->header = ((void *)dl->body0.entries) + header_offset;
268 dl->dma = dl->body0.dma + header_offset;
270 memset(dl->header, 0, sizeof(*dl->header));
271 dl->header->lists[0].addr = dl->body0.dma;
274 return dl;
277 static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
279 vsp1_dl_body_cleanup(&dl->body0);
280 list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
281 kfree(dl);
285 * vsp1_dl_list_get - Get a free display list
286 * @dlm: The display list manager
288 * Get a display list from the pool of free lists and return it.
290 * This function must be called without the display list manager lock held.
292 struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
294 struct vsp1_dl_list *dl = NULL;
295 unsigned long flags;
297 spin_lock_irqsave(&dlm->lock, flags);
299 if (!list_empty(&dlm->free)) {
300 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
301 list_del(&dl->list);
304 * The display list chain must be initialised to ensure every
305 * display list can assert list_empty() if it is not in a chain.
307 INIT_LIST_HEAD(&dl->chain);
310 spin_unlock_irqrestore(&dlm->lock, flags);
312 return dl;
315 /* This function must be called with the display list manager lock held.*/
316 static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
318 struct vsp1_dl_list *dl_child;
320 if (!dl)
321 return;
324 * Release any linked display-lists which were chained for a single
325 * hardware operation.
327 if (dl->has_chain) {
328 list_for_each_entry(dl_child, &dl->chain, chain)
329 __vsp1_dl_list_put(dl_child);
332 dl->has_chain = false;
335 * We can't free fragments here as DMA memory can only be freed in
336 * interruptible context. Move all fragments to the display list
337 * manager's list of fragments to be freed, they will be
338 * garbage-collected by the work queue.
340 if (!list_empty(&dl->fragments)) {
341 list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
342 schedule_work(&dl->dlm->gc_work);
345 dl->body0.num_entries = 0;
347 list_add_tail(&dl->list, &dl->dlm->free);
351 * vsp1_dl_list_put - Release a display list
352 * @dl: The display list
354 * Release the display list and return it to the pool of free lists.
356 * Passing a NULL pointer to this function is safe, in that case no operation
357 * will be performed.
359 void vsp1_dl_list_put(struct vsp1_dl_list *dl)
361 unsigned long flags;
363 if (!dl)
364 return;
366 spin_lock_irqsave(&dl->dlm->lock, flags);
367 __vsp1_dl_list_put(dl);
368 spin_unlock_irqrestore(&dl->dlm->lock, flags);
372 * vsp1_dl_list_write - Write a register to the display list
373 * @dl: The display list
374 * @reg: The register address
375 * @data: The register value
377 * Write the given register and value to the display list. Up to 256 registers
378 * can be written per display list.
380 void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
382 vsp1_dl_fragment_write(&dl->body0, reg, data);
386 * vsp1_dl_list_add_fragment - Add a fragment to the display list
387 * @dl: The display list
388 * @dlb: The fragment
390 * Add a display list body as a fragment to a display list. Registers contained
391 * in fragments are processed after registers contained in the main display
392 * list, in the order in which fragments are added.
394 * Adding a fragment to a display list passes ownership of the fragment to the
395 * list. The caller must not touch the fragment after this call, and must not
396 * free it explicitly with vsp1_dl_fragment_free().
398 * Fragments are only usable for display lists in header mode. Attempt to
399 * add a fragment to a header-less display list will return an error.
401 int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
402 struct vsp1_dl_body *dlb)
404 /* Multi-body lists are only available in header mode. */
405 if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
406 return -EINVAL;
408 list_add_tail(&dlb->list, &dl->fragments);
409 return 0;
413 * vsp1_dl_list_add_chain - Add a display list to a chain
414 * @head: The head display list
415 * @dl: The new display list
417 * Add a display list to an existing display list chain. The chained lists
418 * will be automatically processed by the hardware without intervention from
419 * the CPU. A display list end interrupt will only complete after the last
420 * display list in the chain has completed processing.
422 * Adding a display list to a chain passes ownership of the display list to
423 * the head display list item. The chain is released when the head dl item is
424 * put back with __vsp1_dl_list_put().
426 * Chained display lists are only usable in header mode. Attempts to add a
427 * display list to a chain in header-less mode will return an error.
429 int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
430 struct vsp1_dl_list *dl)
432 /* Chained lists are only available in header mode. */
433 if (head->dlm->mode != VSP1_DL_MODE_HEADER)
434 return -EINVAL;
436 head->has_chain = true;
437 list_add_tail(&dl->chain, &head->chain);
438 return 0;
441 static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
443 struct vsp1_dl_manager *dlm = dl->dlm;
444 struct vsp1_dl_header_list *hdr = dl->header->lists;
445 struct vsp1_dl_body *dlb;
446 unsigned int num_lists = 0;
449 * Fill the header with the display list bodies addresses and sizes. The
450 * address of the first body has already been filled when the display
451 * list was allocated.
454 hdr->num_bytes = dl->body0.num_entries
455 * sizeof(*dl->header->lists);
457 list_for_each_entry(dlb, &dl->fragments, list) {
458 num_lists++;
459 hdr++;
461 hdr->addr = dlb->dma;
462 hdr->num_bytes = dlb->num_entries
463 * sizeof(*dl->header->lists);
466 dl->header->num_lists = num_lists;
468 if (!list_empty(&dl->chain) && !is_last) {
470 * If this display list's chain is not empty, we are on a list,
471 * and the next item is the display list that we must queue for
472 * automatic processing by the hardware.
474 struct vsp1_dl_list *next = list_next_entry(dl, chain);
476 dl->header->next_header = next->dma;
477 dl->header->flags = VSP1_DLH_AUTO_START;
478 } else if (!dlm->singleshot) {
480 * if the display list manager works in continuous mode, the VSP
481 * should loop over the display list continuously until
482 * instructed to do otherwise.
484 dl->header->next_header = dl->dma;
485 dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
486 } else {
488 * Otherwise, in mem-to-mem mode, we work in single-shot mode
489 * and the next display list must not be started automatically.
491 dl->header->flags = VSP1_DLH_INT_ENABLE;
495 static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
497 struct vsp1_device *vsp1 = dlm->vsp1;
499 if (!dlm->queued)
500 return false;
503 * Check whether the VSP1 has taken the update. In headerless mode the
504 * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
505 * register, and in header mode by clearing the UPDHDR bit in the CMD
506 * register.
508 if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
509 return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
510 & VI6_DL_BODY_SIZE_UPD);
511 else
512 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
513 & VI6_CMD_UPDHDR);
516 static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
518 struct vsp1_dl_manager *dlm = dl->dlm;
519 struct vsp1_device *vsp1 = dlm->vsp1;
521 if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
523 * In headerless mode, program the hardware directly with the
524 * display list body address and size and set the UPD bit. The
525 * bit will be cleared by the hardware when the display list
526 * processing starts.
528 vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
529 vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
530 (dl->body0.num_entries * sizeof(*dl->header->lists)));
531 } else {
533 * In header mode, program the display list header address. If
534 * the hardware is idle (single-shot mode or first frame in
535 * continuous mode) it will then be started independently. If
536 * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
537 * will be updated with the display list address.
539 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
543 static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
545 struct vsp1_dl_manager *dlm = dl->dlm;
548 * If a previous display list has been queued to the hardware but not
549 * processed yet, the VSP can start processing it at any time. In that
550 * case we can't replace the queued list by the new one, as we could
551 * race with the hardware. We thus mark the update as pending, it will
552 * be queued up to the hardware by the frame end interrupt handler.
554 if (vsp1_dl_list_hw_update_pending(dlm)) {
555 __vsp1_dl_list_put(dlm->pending);
556 dlm->pending = dl;
557 return;
561 * Pass the new display list to the hardware and mark it as queued. It
562 * will become active when the hardware starts processing it.
564 vsp1_dl_list_hw_enqueue(dl);
566 __vsp1_dl_list_put(dlm->queued);
567 dlm->queued = dl;
570 static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
572 struct vsp1_dl_manager *dlm = dl->dlm;
575 * When working in single-shot mode, the caller guarantees that the
576 * hardware is idle at this point. Just commit the head display list
577 * to hardware. Chained lists will be started automatically.
579 vsp1_dl_list_hw_enqueue(dl);
581 dlm->active = dl;
584 void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
586 struct vsp1_dl_manager *dlm = dl->dlm;
587 struct vsp1_dl_list *dl_child;
588 unsigned long flags;
590 if (dlm->mode == VSP1_DL_MODE_HEADER) {
591 /* Fill the header for the head and chained display lists. */
592 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
594 list_for_each_entry(dl_child, &dl->chain, chain) {
595 bool last = list_is_last(&dl_child->chain, &dl->chain);
597 vsp1_dl_list_fill_header(dl_child, last);
601 spin_lock_irqsave(&dlm->lock, flags);
603 if (dlm->singleshot)
604 vsp1_dl_list_commit_singleshot(dl);
605 else
606 vsp1_dl_list_commit_continuous(dl);
608 spin_unlock_irqrestore(&dlm->lock, flags);
611 /* -----------------------------------------------------------------------------
612 * Display List Manager
616 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
617 * @dlm: the display list manager
619 * Return true if the previous display list has completed at frame end, or false
620 * if it has been delayed by one frame because the display list commit raced
621 * with the frame end interrupt. The function always returns true in header mode
622 * as display list processing is then not continuous and races never occur.
624 bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
626 bool completed = false;
628 spin_lock(&dlm->lock);
631 * The mem-to-mem pipelines work in single-shot mode. No new display
632 * list can be queued, we don't have to do anything.
634 if (dlm->singleshot) {
635 __vsp1_dl_list_put(dlm->active);
636 dlm->active = NULL;
637 completed = true;
638 goto done;
642 * If the commit operation raced with the interrupt and occurred after
643 * the frame end event but before interrupt processing, the hardware
644 * hasn't taken the update into account yet. We have to skip one frame
645 * and retry.
647 if (vsp1_dl_list_hw_update_pending(dlm))
648 goto done;
651 * The device starts processing the queued display list right after the
652 * frame end interrupt. The display list thus becomes active.
654 if (dlm->queued) {
655 __vsp1_dl_list_put(dlm->active);
656 dlm->active = dlm->queued;
657 dlm->queued = NULL;
658 completed = true;
662 * Now that the VSP has started processing the queued display list, we
663 * can queue the pending display list to the hardware if one has been
664 * prepared.
666 if (dlm->pending) {
667 vsp1_dl_list_hw_enqueue(dlm->pending);
668 dlm->queued = dlm->pending;
669 dlm->pending = NULL;
672 done:
673 spin_unlock(&dlm->lock);
675 return completed;
678 /* Hardware Setup */
679 void vsp1_dlm_setup(struct vsp1_device *vsp1)
681 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
682 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
683 | VI6_DL_CTRL_DLE;
686 * The DRM pipeline operates with display lists in Continuous Frame
687 * Mode, all other pipelines use manual start.
689 if (vsp1->drm)
690 ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
692 vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
693 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
696 void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
698 unsigned long flags;
700 spin_lock_irqsave(&dlm->lock, flags);
702 __vsp1_dl_list_put(dlm->active);
703 __vsp1_dl_list_put(dlm->queued);
704 __vsp1_dl_list_put(dlm->pending);
706 spin_unlock_irqrestore(&dlm->lock, flags);
708 dlm->active = NULL;
709 dlm->queued = NULL;
710 dlm->pending = NULL;
714 * Free all fragments awaiting to be garbage-collected.
716 * This function must be called without the display list manager lock held.
718 static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
720 unsigned long flags;
722 spin_lock_irqsave(&dlm->lock, flags);
724 while (!list_empty(&dlm->gc_fragments)) {
725 struct vsp1_dl_body *dlb;
727 dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
728 list);
729 list_del(&dlb->list);
731 spin_unlock_irqrestore(&dlm->lock, flags);
732 vsp1_dl_fragment_free(dlb);
733 spin_lock_irqsave(&dlm->lock, flags);
736 spin_unlock_irqrestore(&dlm->lock, flags);
739 static void vsp1_dlm_garbage_collect(struct work_struct *work)
741 struct vsp1_dl_manager *dlm =
742 container_of(work, struct vsp1_dl_manager, gc_work);
744 vsp1_dlm_fragments_free(dlm);
747 struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
748 unsigned int index,
749 unsigned int prealloc)
751 struct vsp1_dl_manager *dlm;
752 unsigned int i;
754 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
755 if (!dlm)
756 return NULL;
758 dlm->index = index;
759 dlm->mode = index == 0 && !vsp1->info->uapi
760 ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
761 dlm->singleshot = vsp1->info->uapi;
762 dlm->vsp1 = vsp1;
764 spin_lock_init(&dlm->lock);
765 INIT_LIST_HEAD(&dlm->free);
766 INIT_LIST_HEAD(&dlm->gc_fragments);
767 INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
769 for (i = 0; i < prealloc; ++i) {
770 struct vsp1_dl_list *dl;
772 dl = vsp1_dl_list_alloc(dlm);
773 if (!dl)
774 return NULL;
776 list_add_tail(&dl->list, &dlm->free);
779 return dlm;
782 void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
784 struct vsp1_dl_list *dl, *next;
786 if (!dlm)
787 return;
789 cancel_work_sync(&dlm->gc_work);
791 list_for_each_entry_safe(dl, next, &dlm->free, list) {
792 list_del(&dl->list);
793 vsp1_dl_list_free(dl);
796 vsp1_dlm_fragments_free(dlm);