1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
8 * This file contains the implementation of virtual memory management object
17 #include <linux/slab.h>
20 #include <sound/pcm.h>
22 #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
23 #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
26 * Find or create vm block based on requested @size.
27 * @size must be page aligned.
29 static struct ct_vm_block
*
30 get_vm_block(struct ct_vm
*vm
, unsigned int size
, struct ct_atc
*atc
)
32 struct ct_vm_block
*block
= NULL
, *entry
;
33 struct list_head
*pos
;
35 size
= CT_PAGE_ALIGN(size
);
36 if (size
> vm
->size
) {
37 dev_err(atc
->card
->dev
,
38 "Fail! No sufficient device virtual memory space available!\n");
42 mutex_lock(&vm
->lock
);
43 list_for_each(pos
, &vm
->unused
) {
44 entry
= list_entry(pos
, struct ct_vm_block
, list
);
45 if (entry
->size
>= size
)
46 break; /* found a block that is big enough */
48 if (pos
== &vm
->unused
)
51 if (entry
->size
== size
) {
52 /* Move the vm node from unused list to used list directly */
53 list_move(&entry
->list
, &vm
->used
);
59 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
63 block
->addr
= entry
->addr
;
65 list_add(&block
->list
, &vm
->used
);
71 mutex_unlock(&vm
->lock
);
75 static void put_vm_block(struct ct_vm
*vm
, struct ct_vm_block
*block
)
77 struct ct_vm_block
*entry
, *pre_ent
;
78 struct list_head
*pos
, *pre
;
80 block
->size
= CT_PAGE_ALIGN(block
->size
);
82 mutex_lock(&vm
->lock
);
83 list_del(&block
->list
);
84 vm
->size
+= block
->size
;
86 list_for_each(pos
, &vm
->unused
) {
87 entry
= list_entry(pos
, struct ct_vm_block
, list
);
88 if (entry
->addr
>= (block
->addr
+ block
->size
))
89 break; /* found a position */
91 if (pos
== &vm
->unused
) {
92 list_add_tail(&block
->list
, &vm
->unused
);
95 if ((block
->addr
+ block
->size
) == entry
->addr
) {
96 entry
->addr
= block
->addr
;
97 entry
->size
+= block
->size
;
100 __list_add(&block
->list
, pos
->prev
, pos
);
107 while (pre
!= &vm
->unused
) {
108 entry
= list_entry(pos
, struct ct_vm_block
, list
);
109 pre_ent
= list_entry(pre
, struct ct_vm_block
, list
);
110 if ((pre_ent
->addr
+ pre_ent
->size
) > entry
->addr
)
113 pre_ent
->size
+= entry
->size
;
119 mutex_unlock(&vm
->lock
);
122 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
123 static struct ct_vm_block
*
124 ct_vm_map(struct ct_vm
*vm
, struct snd_pcm_substream
*substream
, int size
)
126 struct ct_vm_block
*block
;
127 unsigned int pte_start
;
130 struct ct_atc
*atc
= snd_pcm_substream_chip(substream
);
132 block
= get_vm_block(vm
, size
, atc
);
134 dev_err(atc
->card
->dev
,
135 "No virtual memory block that is big enough to allocate!\n");
139 ptp
= (unsigned long *)vm
->ptp
[0].area
;
140 pte_start
= (block
->addr
>> CT_PAGE_SHIFT
);
141 pages
= block
->size
>> CT_PAGE_SHIFT
;
142 for (i
= 0; i
< pages
; i
++) {
144 addr
= snd_pcm_sgbuf_get_addr(substream
, i
<< CT_PAGE_SHIFT
);
145 ptp
[pte_start
+ i
] = addr
;
152 static void ct_vm_unmap(struct ct_vm
*vm
, struct ct_vm_block
*block
)
155 put_vm_block(vm
, block
);
159 * return the host physical addr of the @index-th device
160 * page table page on success, or ~0UL on failure.
161 * The first returned ~0UL indicates the termination.
164 ct_get_ptp_phys(struct ct_vm
*vm
, int index
)
166 return (index
>= CT_PTP_NUM
) ? ~0UL : vm
->ptp
[index
].addr
;
169 int ct_vm_create(struct ct_vm
**rvm
, struct pci_dev
*pci
)
172 struct ct_vm_block
*block
;
177 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
181 mutex_init(&vm
->lock
);
183 /* Allocate page table pages */
184 for (i
= 0; i
< CT_PTP_NUM
; i
++) {
185 err
= snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV
,
187 PAGE_SIZE
, &vm
->ptp
[i
]);
192 /* no page table pages are allocated */
196 vm
->size
= CT_ADDRS_PER_PAGE
* i
;
198 vm
->unmap
= ct_vm_unmap
;
199 vm
->get_ptp_phys
= ct_get_ptp_phys
;
200 INIT_LIST_HEAD(&vm
->unused
);
201 INIT_LIST_HEAD(&vm
->used
);
202 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
205 block
->size
= vm
->size
;
206 list_add(&block
->list
, &vm
->unused
);
213 /* The caller must ensure no mapping pages are being used
214 * by hardware before calling this function */
215 void ct_vm_destroy(struct ct_vm
*vm
)
218 struct list_head
*pos
;
219 struct ct_vm_block
*entry
;
221 /* free used and unused list nodes */
222 while (!list_empty(&vm
->used
)) {
225 entry
= list_entry(pos
, struct ct_vm_block
, list
);
228 while (!list_empty(&vm
->unused
)) {
229 pos
= vm
->unused
.next
;
231 entry
= list_entry(pos
, struct ct_vm_block
, list
);
235 /* free allocated page table pages */
236 for (i
= 0; i
< CT_PTP_NUM
; i
++)
237 snd_dma_free_pages(&vm
->ptp
[i
]);