2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28 #include <linux/export.h>
30 #include <sound/core.h>
31 #include <sound/emu10k1.h>
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
34 * aligned pages in others
36 #define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
39 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
41 /* get aligned page from offset address */
42 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
43 /* get offset address from aligned page */
44 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
47 /* page size == EMUPAGESIZE */
48 /* fill PTB entrie(s) corresponding to page with addr */
49 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
50 /* fill PTB entrie(s) corresponding to page with silence pointer */
51 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
53 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
54 static inline void set_ptb_entry(struct snd_emu10k1
*emu
, int page
, dma_addr_t addr
)
58 for (i
= 0; i
< UNIT_PAGES
; i
++, page
++) {
59 __set_ptb_entry(emu
, page
, addr
);
63 static inline void set_silent_ptb(struct snd_emu10k1
*emu
, int page
)
67 for (i
= 0; i
< UNIT_PAGES
; i
++, page
++)
68 /* do not increment ptr */
69 __set_ptb_entry(emu
, page
, emu
->silent_page
.addr
);
71 #endif /* PAGE_SIZE */
76 static int synth_alloc_pages(struct snd_emu10k1
*hw
, struct snd_emu10k1_memblk
*blk
);
77 static int synth_free_pages(struct snd_emu10k1
*hw
, struct snd_emu10k1_memblk
*blk
);
79 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
82 /* initialize emu10k1 part */
83 static void emu10k1_memblk_init(struct snd_emu10k1_memblk
*blk
)
85 blk
->mapped_page
= -1;
86 INIT_LIST_HEAD(&blk
->mapped_link
);
87 INIT_LIST_HEAD(&blk
->mapped_order_link
);
90 blk
->first_page
= get_aligned_page(blk
->mem
.offset
);
91 blk
->last_page
= get_aligned_page(blk
->mem
.offset
+ blk
->mem
.size
- 1);
92 blk
->pages
= blk
->last_page
- blk
->first_page
+ 1;
96 * search empty region on PTB with the given size
98 * if an empty region is found, return the page and store the next mapped block
100 * if not found, return a negative error code.
102 static int search_empty_map_area(struct snd_emu10k1
*emu
, int npages
, struct list_head
**nextp
)
104 int page
= 0, found_page
= -ENOMEM
;
105 int max_size
= npages
;
107 struct list_head
*candidate
= &emu
->mapped_link_head
;
108 struct list_head
*pos
;
110 list_for_each (pos
, &emu
->mapped_link_head
) {
111 struct snd_emu10k1_memblk
*blk
= get_emu10k1_memblk(pos
, mapped_link
);
112 if (blk
->mapped_page
< 0)
114 size
= blk
->mapped_page
- page
;
115 if (size
== npages
) {
119 else if (size
> max_size
) {
120 /* we look for the maximum empty hole */
125 page
= blk
->mapped_page
+ blk
->pages
;
127 size
= MAX_ALIGN_PAGES
- page
;
128 if (size
>= max_size
) {
137 * map a memory block onto emu10k1's PTB
139 * call with memblk_lock held
141 static int map_memblk(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
144 struct list_head
*next
;
146 page
= search_empty_map_area(emu
, blk
->pages
, &next
);
147 if (page
< 0) /* not found */
149 /* insert this block in the proper position of mapped list */
150 list_add_tail(&blk
->mapped_link
, next
);
151 /* append this as a newest block in order list */
152 list_add_tail(&blk
->mapped_order_link
, &emu
->mapped_order_link_head
);
153 blk
->mapped_page
= page
;
155 for (pg
= blk
->first_page
; pg
<= blk
->last_page
; pg
++) {
156 set_ptb_entry(emu
, page
, emu
->page_addr_table
[pg
]);
164 * return the size of resultant empty pages
166 * call with memblk_lock held
168 static int unmap_memblk(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
170 int start_page
, end_page
, mpage
, pg
;
172 struct snd_emu10k1_memblk
*q
;
174 /* calculate the expected size of empty region */
175 if ((p
= blk
->mapped_link
.prev
) != &emu
->mapped_link_head
) {
176 q
= get_emu10k1_memblk(p
, mapped_link
);
177 start_page
= q
->mapped_page
+ q
->pages
;
180 if ((p
= blk
->mapped_link
.next
) != &emu
->mapped_link_head
) {
181 q
= get_emu10k1_memblk(p
, mapped_link
);
182 end_page
= q
->mapped_page
;
184 end_page
= MAX_ALIGN_PAGES
;
187 list_del(&blk
->mapped_link
);
188 list_del(&blk
->mapped_order_link
);
190 mpage
= blk
->mapped_page
;
191 for (pg
= blk
->first_page
; pg
<= blk
->last_page
; pg
++) {
192 set_silent_ptb(emu
, mpage
);
195 blk
->mapped_page
= -1;
196 return end_page
- start_page
; /* return the new empty size */
200 * search empty pages with the given size, and create a memory block
202 * unlike synth_alloc the memory block is aligned to the page start
204 static struct snd_emu10k1_memblk
*
205 search_empty(struct snd_emu10k1
*emu
, int size
)
208 struct snd_emu10k1_memblk
*blk
;
211 psize
= get_aligned_page(size
+ PAGE_SIZE
-1);
213 list_for_each(p
, &emu
->memhdr
->block
) {
214 blk
= get_emu10k1_memblk(p
, mem
.list
);
215 if (page
+ psize
<= blk
->first_page
)
217 page
= blk
->last_page
+ 1;
219 if (page
+ psize
> emu
->max_cache_pages
)
223 /* create a new memory block */
224 blk
= (struct snd_emu10k1_memblk
*)__snd_util_memblk_new(emu
->memhdr
, psize
<< PAGE_SHIFT
, p
->prev
);
227 blk
->mem
.offset
= aligned_page_offset(page
); /* set aligned offset */
228 emu10k1_memblk_init(blk
);
234 * check if the given pointer is valid for pages
236 static int is_valid_page(struct snd_emu10k1
*emu
, dma_addr_t addr
)
238 if (addr
& ~emu
->dma_mask
) {
239 dev_err(emu
->card
->dev
,
240 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
241 emu
->dma_mask
, (unsigned long)addr
);
244 if (addr
& (EMUPAGESIZE
-1)) {
245 dev_err(emu
->card
->dev
, "page is not aligned\n");
252 * map the given memory block on PTB.
253 * if the block is already mapped, update the link order.
254 * if no empty pages are found, tries to release unused memory blocks
255 * and retry the mapping.
257 int snd_emu10k1_memblk_map(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
261 struct list_head
*p
, *nextp
;
262 struct snd_emu10k1_memblk
*deleted
;
265 spin_lock_irqsave(&emu
->memblk_lock
, flags
);
266 if (blk
->mapped_page
>= 0) {
267 /* update order link */
268 list_move_tail(&blk
->mapped_order_link
,
269 &emu
->mapped_order_link_head
);
270 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
273 if ((err
= map_memblk(emu
, blk
)) < 0) {
274 /* no enough page - try to unmap some blocks */
275 /* starting from the oldest block */
276 p
= emu
->mapped_order_link_head
.next
;
277 for (; p
!= &emu
->mapped_order_link_head
; p
= nextp
) {
279 deleted
= get_emu10k1_memblk(p
, mapped_order_link
);
280 if (deleted
->map_locked
)
282 size
= unmap_memblk(emu
, deleted
);
283 if (size
>= blk
->pages
) {
284 /* ok the empty region is enough large */
285 err
= map_memblk(emu
, blk
);
290 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
294 EXPORT_SYMBOL(snd_emu10k1_memblk_map
);
297 * page allocation for DMA
299 struct snd_util_memblk
*
300 snd_emu10k1_alloc_pages(struct snd_emu10k1
*emu
, struct snd_pcm_substream
*substream
)
302 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
303 struct snd_util_memhdr
*hdr
;
304 struct snd_emu10k1_memblk
*blk
;
307 if (snd_BUG_ON(!emu
))
309 if (snd_BUG_ON(runtime
->dma_bytes
<= 0 ||
310 runtime
->dma_bytes
>= MAXPAGES
* EMUPAGESIZE
))
313 if (snd_BUG_ON(!hdr
))
316 idx
= runtime
->period_size
>= runtime
->buffer_size
?
317 (emu
->delay_pcm_irq
* 2) : 0;
318 mutex_lock(&hdr
->block_mutex
);
319 blk
= search_empty(emu
, runtime
->dma_bytes
+ idx
);
321 mutex_unlock(&hdr
->block_mutex
);
324 /* fill buffer addresses but pointers are not stored so that
325 * snd_free_pci_page() is not called in in synth_free()
328 for (page
= blk
->first_page
; page
<= blk
->last_page
; page
++, idx
++) {
329 unsigned long ofs
= idx
<< PAGE_SHIFT
;
331 if (ofs
>= runtime
->dma_bytes
)
332 addr
= emu
->silent_page
.addr
;
334 addr
= snd_pcm_sgbuf_get_addr(substream
, ofs
);
335 if (! is_valid_page(emu
, addr
)) {
336 dev_err(emu
->card
->dev
,
337 "emu: failure page = %d\n", idx
);
338 mutex_unlock(&hdr
->block_mutex
);
341 emu
->page_addr_table
[page
] = addr
;
342 emu
->page_ptr_table
[page
] = NULL
;
345 /* set PTB entries */
346 blk
->map_locked
= 1; /* do not unmap this block! */
347 err
= snd_emu10k1_memblk_map(emu
, blk
);
349 __snd_util_mem_free(hdr
, (struct snd_util_memblk
*)blk
);
350 mutex_unlock(&hdr
->block_mutex
);
353 mutex_unlock(&hdr
->block_mutex
);
354 return (struct snd_util_memblk
*)blk
;
359 * release DMA buffer from page table
361 int snd_emu10k1_free_pages(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
)
363 if (snd_BUG_ON(!emu
|| !blk
))
365 return snd_emu10k1_synth_free(emu
, blk
);
370 * memory allocation using multiple pages (for synth)
371 * Unlike the DMA allocation above, non-contiguous pages are assined.
375 * allocate a synth sample area
377 struct snd_util_memblk
*
378 snd_emu10k1_synth_alloc(struct snd_emu10k1
*hw
, unsigned int size
)
380 struct snd_emu10k1_memblk
*blk
;
381 struct snd_util_memhdr
*hdr
= hw
->memhdr
;
383 mutex_lock(&hdr
->block_mutex
);
384 blk
= (struct snd_emu10k1_memblk
*)__snd_util_mem_alloc(hdr
, size
);
386 mutex_unlock(&hdr
->block_mutex
);
389 if (synth_alloc_pages(hw
, blk
)) {
390 __snd_util_mem_free(hdr
, (struct snd_util_memblk
*)blk
);
391 mutex_unlock(&hdr
->block_mutex
);
394 snd_emu10k1_memblk_map(hw
, blk
);
395 mutex_unlock(&hdr
->block_mutex
);
396 return (struct snd_util_memblk
*)blk
;
399 EXPORT_SYMBOL(snd_emu10k1_synth_alloc
);
402 * free a synth sample area
405 snd_emu10k1_synth_free(struct snd_emu10k1
*emu
, struct snd_util_memblk
*memblk
)
407 struct snd_util_memhdr
*hdr
= emu
->memhdr
;
408 struct snd_emu10k1_memblk
*blk
= (struct snd_emu10k1_memblk
*)memblk
;
411 mutex_lock(&hdr
->block_mutex
);
412 spin_lock_irqsave(&emu
->memblk_lock
, flags
);
413 if (blk
->mapped_page
>= 0)
414 unmap_memblk(emu
, blk
);
415 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
416 synth_free_pages(emu
, blk
);
417 __snd_util_mem_free(hdr
, memblk
);
418 mutex_unlock(&hdr
->block_mutex
);
422 EXPORT_SYMBOL(snd_emu10k1_synth_free
);
424 /* check new allocation range */
425 static void get_single_page_range(struct snd_util_memhdr
*hdr
,
426 struct snd_emu10k1_memblk
*blk
,
427 int *first_page_ret
, int *last_page_ret
)
430 struct snd_emu10k1_memblk
*q
;
431 int first_page
, last_page
;
432 first_page
= blk
->first_page
;
433 if ((p
= blk
->mem
.list
.prev
) != &hdr
->block
) {
434 q
= get_emu10k1_memblk(p
, mem
.list
);
435 if (q
->last_page
== first_page
)
436 first_page
++; /* first page was already allocated */
438 last_page
= blk
->last_page
;
439 if ((p
= blk
->mem
.list
.next
) != &hdr
->block
) {
440 q
= get_emu10k1_memblk(p
, mem
.list
);
441 if (q
->first_page
== last_page
)
442 last_page
--; /* last page was already allocated */
444 *first_page_ret
= first_page
;
445 *last_page_ret
= last_page
;
448 /* release allocated pages */
449 static void __synth_free_pages(struct snd_emu10k1
*emu
, int first_page
,
454 for (page
= first_page
; page
<= last_page
; page
++) {
455 free_page((unsigned long)emu
->page_ptr_table
[page
]);
456 emu
->page_addr_table
[page
] = 0;
457 emu
->page_ptr_table
[page
] = NULL
;
462 * allocate kernel pages
464 static int synth_alloc_pages(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
466 int page
, first_page
, last_page
;
468 emu10k1_memblk_init(blk
);
469 get_single_page_range(emu
->memhdr
, blk
, &first_page
, &last_page
);
470 /* allocate kernel pages */
471 for (page
= first_page
; page
<= last_page
; page
++) {
472 /* first try to allocate from <4GB zone */
473 struct page
*p
= alloc_page(GFP_KERNEL
| GFP_DMA32
|
475 if (!p
|| (page_to_pfn(p
) & ~(emu
->dma_mask
>> PAGE_SHIFT
))) {
478 /* try to allocate from <16MB zone */
479 p
= alloc_page(GFP_ATOMIC
| GFP_DMA
|
480 __GFP_NORETRY
| /* no OOM-killer */
484 __synth_free_pages(emu
, first_page
, page
- 1);
487 emu
->page_addr_table
[page
] = page_to_phys(p
);
488 emu
->page_ptr_table
[page
] = page_address(p
);
496 static int synth_free_pages(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
498 int first_page
, last_page
;
500 get_single_page_range(emu
->memhdr
, blk
, &first_page
, &last_page
);
501 __synth_free_pages(emu
, first_page
, last_page
);
505 /* calculate buffer pointer from offset address */
506 static inline void *offset_ptr(struct snd_emu10k1
*emu
, int page
, int offset
)
509 if (snd_BUG_ON(page
< 0 || page
>= emu
->max_cache_pages
))
511 ptr
= emu
->page_ptr_table
[page
];
513 dev_err(emu
->card
->dev
,
514 "access to NULL ptr: page = %d\n", page
);
517 ptr
+= offset
& (PAGE_SIZE
- 1);
522 * bzero(blk + offset, size)
524 int snd_emu10k1_synth_bzero(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
,
525 int offset
, int size
)
527 int page
, nextofs
, end_offset
, temp
, temp1
;
529 struct snd_emu10k1_memblk
*p
= (struct snd_emu10k1_memblk
*)blk
;
531 offset
+= blk
->offset
& (PAGE_SIZE
- 1);
532 end_offset
= offset
+ size
;
533 page
= get_aligned_page(offset
);
535 nextofs
= aligned_page_offset(page
+ 1);
536 temp
= nextofs
- offset
;
537 temp1
= end_offset
- offset
;
540 ptr
= offset_ptr(emu
, page
+ p
->first_page
, offset
);
542 memset(ptr
, 0, temp
);
545 } while (offset
< end_offset
);
549 EXPORT_SYMBOL(snd_emu10k1_synth_bzero
);
552 * copy_from_user(blk + offset, data, size)
554 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
,
555 int offset
, const char __user
*data
, int size
)
557 int page
, nextofs
, end_offset
, temp
, temp1
;
559 struct snd_emu10k1_memblk
*p
= (struct snd_emu10k1_memblk
*)blk
;
561 offset
+= blk
->offset
& (PAGE_SIZE
- 1);
562 end_offset
= offset
+ size
;
563 page
= get_aligned_page(offset
);
565 nextofs
= aligned_page_offset(page
+ 1);
566 temp
= nextofs
- offset
;
567 temp1
= end_offset
- offset
;
570 ptr
= offset_ptr(emu
, page
+ p
->first_page
, offset
);
571 if (ptr
&& copy_from_user(ptr
, data
, temp
))
576 } while (offset
< end_offset
);
580 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user
);