2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
29 #include <sound/core.h>
30 #include <sound/emu10k1.h>
32 /* page arguments of these two macros are Emu page (4096 bytes), not like
33 * aligned pages in others
35 #define __set_ptb_entry(emu,page,addr) \
36 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
38 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
39 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
40 /* get aligned page from offset address */
41 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
42 /* get offset address from aligned page */
43 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
46 /* page size == EMUPAGESIZE */
47 /* fill PTB entrie(s) corresponding to page with addr */
48 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
49 /* fill PTB entrie(s) corresponding to page with silence pointer */
50 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
52 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
53 static inline void set_ptb_entry(struct snd_emu10k1
*emu
, int page
, dma_addr_t addr
)
57 for (i
= 0; i
< UNIT_PAGES
; i
++, page
++) {
58 __set_ptb_entry(emu
, page
, addr
);
62 static inline void set_silent_ptb(struct snd_emu10k1
*emu
, int page
)
66 for (i
= 0; i
< UNIT_PAGES
; i
++, page
++)
67 /* do not increment ptr */
68 __set_ptb_entry(emu
, page
, emu
->silent_page
.addr
);
70 #endif /* PAGE_SIZE */
75 static int synth_alloc_pages(struct snd_emu10k1
*hw
, struct snd_emu10k1_memblk
*blk
);
76 static int synth_free_pages(struct snd_emu10k1
*hw
, struct snd_emu10k1_memblk
*blk
);
78 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
81 /* initialize emu10k1 part */
82 static void emu10k1_memblk_init(struct snd_emu10k1_memblk
*blk
)
84 blk
->mapped_page
= -1;
85 INIT_LIST_HEAD(&blk
->mapped_link
);
86 INIT_LIST_HEAD(&blk
->mapped_order_link
);
89 blk
->first_page
= get_aligned_page(blk
->mem
.offset
);
90 blk
->last_page
= get_aligned_page(blk
->mem
.offset
+ blk
->mem
.size
- 1);
91 blk
->pages
= blk
->last_page
- blk
->first_page
+ 1;
95 * search empty region on PTB with the given size
97 * if an empty region is found, return the page and store the next mapped block
99 * if not found, return a negative error code.
101 static int search_empty_map_area(struct snd_emu10k1
*emu
, int npages
, struct list_head
**nextp
)
103 int page
= 0, found_page
= -ENOMEM
;
104 int max_size
= npages
;
106 struct list_head
*candidate
= &emu
->mapped_link_head
;
107 struct list_head
*pos
;
109 list_for_each (pos
, &emu
->mapped_link_head
) {
110 struct snd_emu10k1_memblk
*blk
= get_emu10k1_memblk(pos
, mapped_link
);
111 if (blk
->mapped_page
< 0)
113 size
= blk
->mapped_page
- page
;
114 if (size
== npages
) {
118 else if (size
> max_size
) {
119 /* we look for the maximum empty hole */
124 page
= blk
->mapped_page
+ blk
->pages
;
126 size
= MAX_ALIGN_PAGES
- page
;
127 if (size
>= max_size
) {
136 * map a memory block onto emu10k1's PTB
138 * call with memblk_lock held
140 static int map_memblk(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
143 struct list_head
*next
;
145 page
= search_empty_map_area(emu
, blk
->pages
, &next
);
146 if (page
< 0) /* not found */
148 /* insert this block in the proper position of mapped list */
149 list_add_tail(&blk
->mapped_link
, next
);
150 /* append this as a newest block in order list */
151 list_add_tail(&blk
->mapped_order_link
, &emu
->mapped_order_link_head
);
152 blk
->mapped_page
= page
;
154 for (pg
= blk
->first_page
; pg
<= blk
->last_page
; pg
++) {
155 set_ptb_entry(emu
, page
, emu
->page_addr_table
[pg
]);
163 * return the size of resultant empty pages
165 * call with memblk_lock held
167 static int unmap_memblk(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
169 int start_page
, end_page
, mpage
, pg
;
171 struct snd_emu10k1_memblk
*q
;
173 /* calculate the expected size of empty region */
174 if ((p
= blk
->mapped_link
.prev
) != &emu
->mapped_link_head
) {
175 q
= get_emu10k1_memblk(p
, mapped_link
);
176 start_page
= q
->mapped_page
+ q
->pages
;
179 if ((p
= blk
->mapped_link
.next
) != &emu
->mapped_link_head
) {
180 q
= get_emu10k1_memblk(p
, mapped_link
);
181 end_page
= q
->mapped_page
;
183 end_page
= MAX_ALIGN_PAGES
;
186 list_del(&blk
->mapped_link
);
187 list_del(&blk
->mapped_order_link
);
189 mpage
= blk
->mapped_page
;
190 for (pg
= blk
->first_page
; pg
<= blk
->last_page
; pg
++) {
191 set_silent_ptb(emu
, mpage
);
194 blk
->mapped_page
= -1;
195 return end_page
- start_page
; /* return the new empty size */
199 * search empty pages with the given size, and create a memory block
201 * unlike synth_alloc the memory block is aligned to the page start
203 static struct snd_emu10k1_memblk
*
204 search_empty(struct snd_emu10k1
*emu
, int size
)
207 struct snd_emu10k1_memblk
*blk
;
210 psize
= get_aligned_page(size
+ PAGE_SIZE
-1);
212 list_for_each(p
, &emu
->memhdr
->block
) {
213 blk
= get_emu10k1_memblk(p
, mem
.list
);
214 if (page
+ psize
<= blk
->first_page
)
216 page
= blk
->last_page
+ 1;
218 if (page
+ psize
> emu
->max_cache_pages
)
222 /* create a new memory block */
223 blk
= (struct snd_emu10k1_memblk
*)__snd_util_memblk_new(emu
->memhdr
, psize
<< PAGE_SHIFT
, p
->prev
);
226 blk
->mem
.offset
= aligned_page_offset(page
); /* set aligned offset */
227 emu10k1_memblk_init(blk
);
233 * check if the given pointer is valid for pages
235 static int is_valid_page(struct snd_emu10k1
*emu
, dma_addr_t addr
)
237 if (addr
& ~emu
->dma_mask
) {
238 snd_printk(KERN_ERR
"max memory size is 0x%lx (addr = 0x%lx)!!\n", emu
->dma_mask
, (unsigned long)addr
);
241 if (addr
& (EMUPAGESIZE
-1)) {
242 snd_printk(KERN_ERR
"page is not aligned\n");
249 * map the given memory block on PTB.
250 * if the block is already mapped, update the link order.
251 * if no empty pages are found, tries to release unsed memory blocks
252 * and retry the mapping.
254 int snd_emu10k1_memblk_map(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
258 struct list_head
*p
, *nextp
;
259 struct snd_emu10k1_memblk
*deleted
;
262 spin_lock_irqsave(&emu
->memblk_lock
, flags
);
263 if (blk
->mapped_page
>= 0) {
264 /* update order link */
265 list_del(&blk
->mapped_order_link
);
266 list_add_tail(&blk
->mapped_order_link
, &emu
->mapped_order_link_head
);
267 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
270 if ((err
= map_memblk(emu
, blk
)) < 0) {
271 /* no enough page - try to unmap some blocks */
272 /* starting from the oldest block */
273 p
= emu
->mapped_order_link_head
.next
;
274 for (; p
!= &emu
->mapped_order_link_head
; p
= nextp
) {
276 deleted
= get_emu10k1_memblk(p
, mapped_order_link
);
277 if (deleted
->map_locked
)
279 size
= unmap_memblk(emu
, deleted
);
280 if (size
>= blk
->pages
) {
281 /* ok the empty region is enough large */
282 err
= map_memblk(emu
, blk
);
287 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
291 EXPORT_SYMBOL(snd_emu10k1_memblk_map
);
294 * page allocation for DMA
296 struct snd_util_memblk
*
297 snd_emu10k1_alloc_pages(struct snd_emu10k1
*emu
, struct snd_pcm_substream
*substream
)
299 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
300 struct snd_util_memhdr
*hdr
;
301 struct snd_emu10k1_memblk
*blk
;
304 if (snd_BUG_ON(!emu
))
306 if (snd_BUG_ON(runtime
->dma_bytes
<= 0 ||
307 runtime
->dma_bytes
>= MAXPAGES
* EMUPAGESIZE
))
310 if (snd_BUG_ON(!hdr
))
313 idx
= runtime
->period_size
>= runtime
->buffer_size
?
314 (emu
->delay_pcm_irq
* 2) : 0;
315 mutex_lock(&hdr
->block_mutex
);
316 blk
= search_empty(emu
, runtime
->dma_bytes
+ idx
);
318 mutex_unlock(&hdr
->block_mutex
);
321 /* fill buffer addresses but pointers are not stored so that
322 * snd_free_pci_page() is not called in in synth_free()
325 for (page
= blk
->first_page
; page
<= blk
->last_page
; page
++, idx
++) {
326 unsigned long ofs
= idx
<< PAGE_SHIFT
;
328 addr
= snd_pcm_sgbuf_get_addr(substream
, ofs
);
329 if (! is_valid_page(emu
, addr
)) {
330 printk(KERN_ERR
"emu: failure page = %d\n", idx
);
331 mutex_unlock(&hdr
->block_mutex
);
334 emu
->page_addr_table
[page
] = addr
;
335 emu
->page_ptr_table
[page
] = NULL
;
338 /* set PTB entries */
339 blk
->map_locked
= 1; /* do not unmap this block! */
340 err
= snd_emu10k1_memblk_map(emu
, blk
);
342 __snd_util_mem_free(hdr
, (struct snd_util_memblk
*)blk
);
343 mutex_unlock(&hdr
->block_mutex
);
346 mutex_unlock(&hdr
->block_mutex
);
347 return (struct snd_util_memblk
*)blk
;
352 * release DMA buffer from page table
354 int snd_emu10k1_free_pages(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
)
356 if (snd_BUG_ON(!emu
|| !blk
))
358 return snd_emu10k1_synth_free(emu
, blk
);
363 * memory allocation using multiple pages (for synth)
364 * Unlike the DMA allocation above, non-contiguous pages are assined.
368 * allocate a synth sample area
370 struct snd_util_memblk
*
371 snd_emu10k1_synth_alloc(struct snd_emu10k1
*hw
, unsigned int size
)
373 struct snd_emu10k1_memblk
*blk
;
374 struct snd_util_memhdr
*hdr
= hw
->memhdr
;
376 mutex_lock(&hdr
->block_mutex
);
377 blk
= (struct snd_emu10k1_memblk
*)__snd_util_mem_alloc(hdr
, size
);
379 mutex_unlock(&hdr
->block_mutex
);
382 if (synth_alloc_pages(hw
, blk
)) {
383 __snd_util_mem_free(hdr
, (struct snd_util_memblk
*)blk
);
384 mutex_unlock(&hdr
->block_mutex
);
387 snd_emu10k1_memblk_map(hw
, blk
);
388 mutex_unlock(&hdr
->block_mutex
);
389 return (struct snd_util_memblk
*)blk
;
392 EXPORT_SYMBOL(snd_emu10k1_synth_alloc
);
395 * free a synth sample area
398 snd_emu10k1_synth_free(struct snd_emu10k1
*emu
, struct snd_util_memblk
*memblk
)
400 struct snd_util_memhdr
*hdr
= emu
->memhdr
;
401 struct snd_emu10k1_memblk
*blk
= (struct snd_emu10k1_memblk
*)memblk
;
404 mutex_lock(&hdr
->block_mutex
);
405 spin_lock_irqsave(&emu
->memblk_lock
, flags
);
406 if (blk
->mapped_page
>= 0)
407 unmap_memblk(emu
, blk
);
408 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
409 synth_free_pages(emu
, blk
);
410 __snd_util_mem_free(hdr
, memblk
);
411 mutex_unlock(&hdr
->block_mutex
);
415 EXPORT_SYMBOL(snd_emu10k1_synth_free
);
417 /* check new allocation range */
418 static void get_single_page_range(struct snd_util_memhdr
*hdr
,
419 struct snd_emu10k1_memblk
*blk
,
420 int *first_page_ret
, int *last_page_ret
)
423 struct snd_emu10k1_memblk
*q
;
424 int first_page
, last_page
;
425 first_page
= blk
->first_page
;
426 if ((p
= blk
->mem
.list
.prev
) != &hdr
->block
) {
427 q
= get_emu10k1_memblk(p
, mem
.list
);
428 if (q
->last_page
== first_page
)
429 first_page
++; /* first page was already allocated */
431 last_page
= blk
->last_page
;
432 if ((p
= blk
->mem
.list
.next
) != &hdr
->block
) {
433 q
= get_emu10k1_memblk(p
, mem
.list
);
434 if (q
->first_page
== last_page
)
435 last_page
--; /* last page was already allocated */
437 *first_page_ret
= first_page
;
438 *last_page_ret
= last_page
;
441 /* release allocated pages */
442 static void __synth_free_pages(struct snd_emu10k1
*emu
, int first_page
,
447 for (page
= first_page
; page
<= last_page
; page
++) {
448 free_page((unsigned long)emu
->page_ptr_table
[page
]);
449 emu
->page_addr_table
[page
] = 0;
450 emu
->page_ptr_table
[page
] = NULL
;
455 * allocate kernel pages
457 static int synth_alloc_pages(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
459 int page
, first_page
, last_page
;
461 emu10k1_memblk_init(blk
);
462 get_single_page_range(emu
->memhdr
, blk
, &first_page
, &last_page
);
463 /* allocate kernel pages */
464 for (page
= first_page
; page
<= last_page
; page
++) {
465 /* first try to allocate from <4GB zone */
466 struct page
*p
= alloc_page(GFP_KERNEL
| GFP_DMA32
|
468 if (!p
|| (page_to_pfn(p
) & ~(emu
->dma_mask
>> PAGE_SHIFT
))) {
471 /* try to allocate from <16MB zone */
472 p
= alloc_page(GFP_ATOMIC
| GFP_DMA
|
473 __GFP_NORETRY
| /* no OOM-killer */
477 __synth_free_pages(emu
, first_page
, page
- 1);
480 emu
->page_addr_table
[page
] = page_to_phys(p
);
481 emu
->page_ptr_table
[page
] = page_address(p
);
489 static int synth_free_pages(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
491 int first_page
, last_page
;
493 get_single_page_range(emu
->memhdr
, blk
, &first_page
, &last_page
);
494 __synth_free_pages(emu
, first_page
, last_page
);
498 /* calculate buffer pointer from offset address */
499 static inline void *offset_ptr(struct snd_emu10k1
*emu
, int page
, int offset
)
502 if (snd_BUG_ON(page
< 0 || page
>= emu
->max_cache_pages
))
504 ptr
= emu
->page_ptr_table
[page
];
506 printk(KERN_ERR
"emu10k1: access to NULL ptr: page = %d\n", page
);
509 ptr
+= offset
& (PAGE_SIZE
- 1);
514 * bzero(blk + offset, size)
516 int snd_emu10k1_synth_bzero(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
,
517 int offset
, int size
)
519 int page
, nextofs
, end_offset
, temp
, temp1
;
521 struct snd_emu10k1_memblk
*p
= (struct snd_emu10k1_memblk
*)blk
;
523 offset
+= blk
->offset
& (PAGE_SIZE
- 1);
524 end_offset
= offset
+ size
;
525 page
= get_aligned_page(offset
);
527 nextofs
= aligned_page_offset(page
+ 1);
528 temp
= nextofs
- offset
;
529 temp1
= end_offset
- offset
;
532 ptr
= offset_ptr(emu
, page
+ p
->first_page
, offset
);
534 memset(ptr
, 0, temp
);
537 } while (offset
< end_offset
);
541 EXPORT_SYMBOL(snd_emu10k1_synth_bzero
);
544 * copy_from_user(blk + offset, data, size)
546 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
,
547 int offset
, const char __user
*data
, int size
)
549 int page
, nextofs
, end_offset
, temp
, temp1
;
551 struct snd_emu10k1_memblk
*p
= (struct snd_emu10k1_memblk
*)blk
;
553 offset
+= blk
->offset
& (PAGE_SIZE
- 1);
554 end_offset
= offset
+ size
;
555 page
= get_aligned_page(offset
);
557 nextofs
= aligned_page_offset(page
+ 1);
558 temp
= nextofs
- offset
;
559 temp1
= end_offset
- offset
;
562 ptr
= offset_ptr(emu
, page
+ p
->first_page
, offset
);
563 if (ptr
&& copy_from_user(ptr
, data
, temp
))
568 } while (offset
< end_offset
);
572 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user
);