2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
11 * This file contains the implementation of virtual memory management object
20 #include <linux/slab.h>
23 #include <sound/pcm.h>
25 #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
26 #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
29 * Find or create vm block based on requested @size.
30 * @size must be page aligned.
32 static struct ct_vm_block
*
33 get_vm_block(struct ct_vm
*vm
, unsigned int size
, struct ct_atc
*atc
)
35 struct ct_vm_block
*block
= NULL
, *entry
;
36 struct list_head
*pos
;
38 size
= CT_PAGE_ALIGN(size
);
39 if (size
> vm
->size
) {
40 dev_err(atc
->card
->dev
,
41 "Fail! No sufficient device virtual memory space available!\n");
45 mutex_lock(&vm
->lock
);
46 list_for_each(pos
, &vm
->unused
) {
47 entry
= list_entry(pos
, struct ct_vm_block
, list
);
48 if (entry
->size
>= size
)
49 break; /* found a block that is big enough */
51 if (pos
== &vm
->unused
)
54 if (entry
->size
== size
) {
55 /* Move the vm node from unused list to used list directly */
56 list_move(&entry
->list
, &vm
->used
);
62 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
66 block
->addr
= entry
->addr
;
68 list_add(&block
->list
, &vm
->used
);
74 mutex_unlock(&vm
->lock
);
78 static void put_vm_block(struct ct_vm
*vm
, struct ct_vm_block
*block
)
80 struct ct_vm_block
*entry
, *pre_ent
;
81 struct list_head
*pos
, *pre
;
83 block
->size
= CT_PAGE_ALIGN(block
->size
);
85 mutex_lock(&vm
->lock
);
86 list_del(&block
->list
);
87 vm
->size
+= block
->size
;
89 list_for_each(pos
, &vm
->unused
) {
90 entry
= list_entry(pos
, struct ct_vm_block
, list
);
91 if (entry
->addr
>= (block
->addr
+ block
->size
))
92 break; /* found a position */
94 if (pos
== &vm
->unused
) {
95 list_add_tail(&block
->list
, &vm
->unused
);
98 if ((block
->addr
+ block
->size
) == entry
->addr
) {
99 entry
->addr
= block
->addr
;
100 entry
->size
+= block
->size
;
103 __list_add(&block
->list
, pos
->prev
, pos
);
110 while (pre
!= &vm
->unused
) {
111 entry
= list_entry(pos
, struct ct_vm_block
, list
);
112 pre_ent
= list_entry(pre
, struct ct_vm_block
, list
);
113 if ((pre_ent
->addr
+ pre_ent
->size
) > entry
->addr
)
116 pre_ent
->size
+= entry
->size
;
122 mutex_unlock(&vm
->lock
);
125 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
126 static struct ct_vm_block
*
127 ct_vm_map(struct ct_vm
*vm
, struct snd_pcm_substream
*substream
, int size
)
129 struct ct_vm_block
*block
;
130 unsigned int pte_start
;
133 struct ct_atc
*atc
= snd_pcm_substream_chip(substream
);
135 block
= get_vm_block(vm
, size
, atc
);
137 dev_err(atc
->card
->dev
,
138 "No virtual memory block that is big enough to allocate!\n");
142 ptp
= (unsigned long *)vm
->ptp
[0].area
;
143 pte_start
= (block
->addr
>> CT_PAGE_SHIFT
);
144 pages
= block
->size
>> CT_PAGE_SHIFT
;
145 for (i
= 0; i
< pages
; i
++) {
147 addr
= snd_pcm_sgbuf_get_addr(substream
, i
<< CT_PAGE_SHIFT
);
148 ptp
[pte_start
+ i
] = addr
;
155 static void ct_vm_unmap(struct ct_vm
*vm
, struct ct_vm_block
*block
)
158 put_vm_block(vm
, block
);
162 * return the host physical addr of the @index-th device
163 * page table page on success, or ~0UL on failure.
164 * The first returned ~0UL indicates the termination.
167 ct_get_ptp_phys(struct ct_vm
*vm
, int index
)
169 return (index
>= CT_PTP_NUM
) ? ~0UL : vm
->ptp
[index
].addr
;
172 int ct_vm_create(struct ct_vm
**rvm
, struct pci_dev
*pci
)
175 struct ct_vm_block
*block
;
180 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
184 mutex_init(&vm
->lock
);
186 /* Allocate page table pages */
187 for (i
= 0; i
< CT_PTP_NUM
; i
++) {
188 err
= snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV
,
189 snd_dma_pci_data(pci
),
190 PAGE_SIZE
, &vm
->ptp
[i
]);
195 /* no page table pages are allocated */
199 vm
->size
= CT_ADDRS_PER_PAGE
* i
;
201 vm
->unmap
= ct_vm_unmap
;
202 vm
->get_ptp_phys
= ct_get_ptp_phys
;
203 INIT_LIST_HEAD(&vm
->unused
);
204 INIT_LIST_HEAD(&vm
->used
);
205 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
208 block
->size
= vm
->size
;
209 list_add(&block
->list
, &vm
->unused
);
216 /* The caller must ensure no mapping pages are being used
217 * by hardware before calling this function */
218 void ct_vm_destroy(struct ct_vm
*vm
)
221 struct list_head
*pos
;
222 struct ct_vm_block
*entry
;
224 /* free used and unused list nodes */
225 while (!list_empty(&vm
->used
)) {
228 entry
= list_entry(pos
, struct ct_vm_block
, list
);
231 while (!list_empty(&vm
->unused
)) {
232 pos
= vm
->unused
.next
;
234 entry
= list_entry(pos
, struct ct_vm_block
, list
);
238 /* free allocated page table pages */
239 for (i
= 0; i
< CT_PTP_NUM
; i
++)
240 snd_dma_free_pages(&vm
->ptp
[i
]);