2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28 #include <linux/export.h>
30 #include <sound/core.h>
31 #include <sound/emu10k1.h>
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
34 * aligned pages in others
36 #define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
39 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
40 #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
41 #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
42 /* get aligned page from offset address */
43 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
44 /* get offset address from aligned page */
45 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
48 /* page size == EMUPAGESIZE */
49 /* fill PTB entrie(s) corresponding to page with addr */
50 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
51 /* fill PTB entrie(s) corresponding to page with silence pointer */
52 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
54 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
55 static inline void set_ptb_entry(struct snd_emu10k1
*emu
, int page
, dma_addr_t addr
)
59 for (i
= 0; i
< UNIT_PAGES
; i
++, page
++) {
60 __set_ptb_entry(emu
, page
, addr
);
64 static inline void set_silent_ptb(struct snd_emu10k1
*emu
, int page
)
68 for (i
= 0; i
< UNIT_PAGES
; i
++, page
++)
69 /* do not increment ptr */
70 __set_ptb_entry(emu
, page
, emu
->silent_page
.addr
);
72 #endif /* PAGE_SIZE */
77 static int synth_alloc_pages(struct snd_emu10k1
*hw
, struct snd_emu10k1_memblk
*blk
);
78 static int synth_free_pages(struct snd_emu10k1
*hw
, struct snd_emu10k1_memblk
*blk
);
80 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
83 /* initialize emu10k1 part */
84 static void emu10k1_memblk_init(struct snd_emu10k1_memblk
*blk
)
86 blk
->mapped_page
= -1;
87 INIT_LIST_HEAD(&blk
->mapped_link
);
88 INIT_LIST_HEAD(&blk
->mapped_order_link
);
91 blk
->first_page
= get_aligned_page(blk
->mem
.offset
);
92 blk
->last_page
= get_aligned_page(blk
->mem
.offset
+ blk
->mem
.size
- 1);
93 blk
->pages
= blk
->last_page
- blk
->first_page
+ 1;
97 * search empty region on PTB with the given size
99 * if an empty region is found, return the page and store the next mapped block
101 * if not found, return a negative error code.
103 static int search_empty_map_area(struct snd_emu10k1
*emu
, int npages
, struct list_head
**nextp
)
105 int page
= 0, found_page
= -ENOMEM
;
106 int max_size
= npages
;
108 struct list_head
*candidate
= &emu
->mapped_link_head
;
109 struct list_head
*pos
;
111 list_for_each (pos
, &emu
->mapped_link_head
) {
112 struct snd_emu10k1_memblk
*blk
= get_emu10k1_memblk(pos
, mapped_link
);
113 if (blk
->mapped_page
< 0)
115 size
= blk
->mapped_page
- page
;
116 if (size
== npages
) {
120 else if (size
> max_size
) {
121 /* we look for the maximum empty hole */
126 page
= blk
->mapped_page
+ blk
->pages
;
128 size
= (emu
->address_mode
? MAX_ALIGN_PAGES1
: MAX_ALIGN_PAGES0
) - page
;
129 if (size
>= max_size
) {
138 * map a memory block onto emu10k1's PTB
140 * call with memblk_lock held
142 static int map_memblk(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
145 struct list_head
*next
;
147 page
= search_empty_map_area(emu
, blk
->pages
, &next
);
148 if (page
< 0) /* not found */
150 /* insert this block in the proper position of mapped list */
151 list_add_tail(&blk
->mapped_link
, next
);
152 /* append this as a newest block in order list */
153 list_add_tail(&blk
->mapped_order_link
, &emu
->mapped_order_link_head
);
154 blk
->mapped_page
= page
;
156 for (pg
= blk
->first_page
; pg
<= blk
->last_page
; pg
++) {
157 set_ptb_entry(emu
, page
, emu
->page_addr_table
[pg
]);
165 * return the size of resultant empty pages
167 * call with memblk_lock held
169 static int unmap_memblk(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
171 int start_page
, end_page
, mpage
, pg
;
173 struct snd_emu10k1_memblk
*q
;
175 /* calculate the expected size of empty region */
176 if ((p
= blk
->mapped_link
.prev
) != &emu
->mapped_link_head
) {
177 q
= get_emu10k1_memblk(p
, mapped_link
);
178 start_page
= q
->mapped_page
+ q
->pages
;
181 if ((p
= blk
->mapped_link
.next
) != &emu
->mapped_link_head
) {
182 q
= get_emu10k1_memblk(p
, mapped_link
);
183 end_page
= q
->mapped_page
;
185 end_page
= (emu
->address_mode
? MAX_ALIGN_PAGES1
: MAX_ALIGN_PAGES0
);
188 list_del(&blk
->mapped_link
);
189 list_del(&blk
->mapped_order_link
);
191 mpage
= blk
->mapped_page
;
192 for (pg
= blk
->first_page
; pg
<= blk
->last_page
; pg
++) {
193 set_silent_ptb(emu
, mpage
);
196 blk
->mapped_page
= -1;
197 return end_page
- start_page
; /* return the new empty size */
201 * search empty pages with the given size, and create a memory block
203 * unlike synth_alloc the memory block is aligned to the page start
205 static struct snd_emu10k1_memblk
*
206 search_empty(struct snd_emu10k1
*emu
, int size
)
209 struct snd_emu10k1_memblk
*blk
;
212 psize
= get_aligned_page(size
+ PAGE_SIZE
-1);
214 list_for_each(p
, &emu
->memhdr
->block
) {
215 blk
= get_emu10k1_memblk(p
, mem
.list
);
216 if (page
+ psize
<= blk
->first_page
)
218 page
= blk
->last_page
+ 1;
220 if (page
+ psize
> emu
->max_cache_pages
)
224 /* create a new memory block */
225 blk
= (struct snd_emu10k1_memblk
*)__snd_util_memblk_new(emu
->memhdr
, psize
<< PAGE_SHIFT
, p
->prev
);
228 blk
->mem
.offset
= aligned_page_offset(page
); /* set aligned offset */
229 emu10k1_memblk_init(blk
);
235 * check if the given pointer is valid for pages
237 static int is_valid_page(struct snd_emu10k1
*emu
, dma_addr_t addr
)
239 if (addr
& ~emu
->dma_mask
) {
240 dev_err(emu
->card
->dev
,
241 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
242 emu
->dma_mask
, (unsigned long)addr
);
245 if (addr
& (EMUPAGESIZE
-1)) {
246 dev_err(emu
->card
->dev
, "page is not aligned\n");
253 * map the given memory block on PTB.
254 * if the block is already mapped, update the link order.
255 * if no empty pages are found, tries to release unused memory blocks
256 * and retry the mapping.
258 int snd_emu10k1_memblk_map(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
262 struct list_head
*p
, *nextp
;
263 struct snd_emu10k1_memblk
*deleted
;
266 spin_lock_irqsave(&emu
->memblk_lock
, flags
);
267 if (blk
->mapped_page
>= 0) {
268 /* update order link */
269 list_move_tail(&blk
->mapped_order_link
,
270 &emu
->mapped_order_link_head
);
271 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
274 if ((err
= map_memblk(emu
, blk
)) < 0) {
275 /* no enough page - try to unmap some blocks */
276 /* starting from the oldest block */
277 p
= emu
->mapped_order_link_head
.next
;
278 for (; p
!= &emu
->mapped_order_link_head
; p
= nextp
) {
280 deleted
= get_emu10k1_memblk(p
, mapped_order_link
);
281 if (deleted
->map_locked
)
283 size
= unmap_memblk(emu
, deleted
);
284 if (size
>= blk
->pages
) {
285 /* ok the empty region is enough large */
286 err
= map_memblk(emu
, blk
);
291 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
295 EXPORT_SYMBOL(snd_emu10k1_memblk_map
);
298 * page allocation for DMA
300 struct snd_util_memblk
*
301 snd_emu10k1_alloc_pages(struct snd_emu10k1
*emu
, struct snd_pcm_substream
*substream
)
303 struct snd_pcm_runtime
*runtime
= substream
->runtime
;
304 struct snd_util_memhdr
*hdr
;
305 struct snd_emu10k1_memblk
*blk
;
308 if (snd_BUG_ON(!emu
))
310 if (snd_BUG_ON(runtime
->dma_bytes
<= 0 ||
311 runtime
->dma_bytes
>= (emu
->address_mode
? MAXPAGES1
: MAXPAGES0
) * EMUPAGESIZE
))
314 if (snd_BUG_ON(!hdr
))
317 idx
= runtime
->period_size
>= runtime
->buffer_size
?
318 (emu
->delay_pcm_irq
* 2) : 0;
319 mutex_lock(&hdr
->block_mutex
);
320 blk
= search_empty(emu
, runtime
->dma_bytes
+ idx
);
322 mutex_unlock(&hdr
->block_mutex
);
325 /* fill buffer addresses but pointers are not stored so that
326 * snd_free_pci_page() is not called in in synth_free()
329 for (page
= blk
->first_page
; page
<= blk
->last_page
; page
++, idx
++) {
330 unsigned long ofs
= idx
<< PAGE_SHIFT
;
332 if (ofs
>= runtime
->dma_bytes
)
333 addr
= emu
->silent_page
.addr
;
335 addr
= snd_pcm_sgbuf_get_addr(substream
, ofs
);
336 if (! is_valid_page(emu
, addr
)) {
337 dev_err(emu
->card
->dev
,
338 "emu: failure page = %d\n", idx
);
339 mutex_unlock(&hdr
->block_mutex
);
342 emu
->page_addr_table
[page
] = addr
;
343 emu
->page_ptr_table
[page
] = NULL
;
346 /* set PTB entries */
347 blk
->map_locked
= 1; /* do not unmap this block! */
348 err
= snd_emu10k1_memblk_map(emu
, blk
);
350 __snd_util_mem_free(hdr
, (struct snd_util_memblk
*)blk
);
351 mutex_unlock(&hdr
->block_mutex
);
354 mutex_unlock(&hdr
->block_mutex
);
355 return (struct snd_util_memblk
*)blk
;
360 * release DMA buffer from page table
362 int snd_emu10k1_free_pages(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
)
364 if (snd_BUG_ON(!emu
|| !blk
))
366 return snd_emu10k1_synth_free(emu
, blk
);
371 * memory allocation using multiple pages (for synth)
372 * Unlike the DMA allocation above, non-contiguous pages are assined.
376 * allocate a synth sample area
378 struct snd_util_memblk
*
379 snd_emu10k1_synth_alloc(struct snd_emu10k1
*hw
, unsigned int size
)
381 struct snd_emu10k1_memblk
*blk
;
382 struct snd_util_memhdr
*hdr
= hw
->memhdr
;
384 mutex_lock(&hdr
->block_mutex
);
385 blk
= (struct snd_emu10k1_memblk
*)__snd_util_mem_alloc(hdr
, size
);
387 mutex_unlock(&hdr
->block_mutex
);
390 if (synth_alloc_pages(hw
, blk
)) {
391 __snd_util_mem_free(hdr
, (struct snd_util_memblk
*)blk
);
392 mutex_unlock(&hdr
->block_mutex
);
395 snd_emu10k1_memblk_map(hw
, blk
);
396 mutex_unlock(&hdr
->block_mutex
);
397 return (struct snd_util_memblk
*)blk
;
400 EXPORT_SYMBOL(snd_emu10k1_synth_alloc
);
403 * free a synth sample area
406 snd_emu10k1_synth_free(struct snd_emu10k1
*emu
, struct snd_util_memblk
*memblk
)
408 struct snd_util_memhdr
*hdr
= emu
->memhdr
;
409 struct snd_emu10k1_memblk
*blk
= (struct snd_emu10k1_memblk
*)memblk
;
412 mutex_lock(&hdr
->block_mutex
);
413 spin_lock_irqsave(&emu
->memblk_lock
, flags
);
414 if (blk
->mapped_page
>= 0)
415 unmap_memblk(emu
, blk
);
416 spin_unlock_irqrestore(&emu
->memblk_lock
, flags
);
417 synth_free_pages(emu
, blk
);
418 __snd_util_mem_free(hdr
, memblk
);
419 mutex_unlock(&hdr
->block_mutex
);
423 EXPORT_SYMBOL(snd_emu10k1_synth_free
);
425 /* check new allocation range */
426 static void get_single_page_range(struct snd_util_memhdr
*hdr
,
427 struct snd_emu10k1_memblk
*blk
,
428 int *first_page_ret
, int *last_page_ret
)
431 struct snd_emu10k1_memblk
*q
;
432 int first_page
, last_page
;
433 first_page
= blk
->first_page
;
434 if ((p
= blk
->mem
.list
.prev
) != &hdr
->block
) {
435 q
= get_emu10k1_memblk(p
, mem
.list
);
436 if (q
->last_page
== first_page
)
437 first_page
++; /* first page was already allocated */
439 last_page
= blk
->last_page
;
440 if ((p
= blk
->mem
.list
.next
) != &hdr
->block
) {
441 q
= get_emu10k1_memblk(p
, mem
.list
);
442 if (q
->first_page
== last_page
)
443 last_page
--; /* last page was already allocated */
445 *first_page_ret
= first_page
;
446 *last_page_ret
= last_page
;
449 /* release allocated pages */
450 static void __synth_free_pages(struct snd_emu10k1
*emu
, int first_page
,
455 for (page
= first_page
; page
<= last_page
; page
++) {
456 free_page((unsigned long)emu
->page_ptr_table
[page
]);
457 emu
->page_addr_table
[page
] = 0;
458 emu
->page_ptr_table
[page
] = NULL
;
463 * allocate kernel pages
465 static int synth_alloc_pages(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
467 int page
, first_page
, last_page
;
469 emu10k1_memblk_init(blk
);
470 get_single_page_range(emu
->memhdr
, blk
, &first_page
, &last_page
);
471 /* allocate kernel pages */
472 for (page
= first_page
; page
<= last_page
; page
++) {
473 /* first try to allocate from <4GB zone */
474 struct page
*p
= alloc_page(GFP_KERNEL
| GFP_DMA32
|
476 if (!p
|| (page_to_pfn(p
) & ~(emu
->dma_mask
>> PAGE_SHIFT
))) {
479 /* try to allocate from <16MB zone */
480 p
= alloc_page(GFP_ATOMIC
| GFP_DMA
|
481 __GFP_NORETRY
| /* no OOM-killer */
485 __synth_free_pages(emu
, first_page
, page
- 1);
488 emu
->page_addr_table
[page
] = page_to_phys(p
);
489 emu
->page_ptr_table
[page
] = page_address(p
);
497 static int synth_free_pages(struct snd_emu10k1
*emu
, struct snd_emu10k1_memblk
*blk
)
499 int first_page
, last_page
;
501 get_single_page_range(emu
->memhdr
, blk
, &first_page
, &last_page
);
502 __synth_free_pages(emu
, first_page
, last_page
);
506 /* calculate buffer pointer from offset address */
507 static inline void *offset_ptr(struct snd_emu10k1
*emu
, int page
, int offset
)
510 if (snd_BUG_ON(page
< 0 || page
>= emu
->max_cache_pages
))
512 ptr
= emu
->page_ptr_table
[page
];
514 dev_err(emu
->card
->dev
,
515 "access to NULL ptr: page = %d\n", page
);
518 ptr
+= offset
& (PAGE_SIZE
- 1);
523 * bzero(blk + offset, size)
525 int snd_emu10k1_synth_bzero(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
,
526 int offset
, int size
)
528 int page
, nextofs
, end_offset
, temp
, temp1
;
530 struct snd_emu10k1_memblk
*p
= (struct snd_emu10k1_memblk
*)blk
;
532 offset
+= blk
->offset
& (PAGE_SIZE
- 1);
533 end_offset
= offset
+ size
;
534 page
= get_aligned_page(offset
);
536 nextofs
= aligned_page_offset(page
+ 1);
537 temp
= nextofs
- offset
;
538 temp1
= end_offset
- offset
;
541 ptr
= offset_ptr(emu
, page
+ p
->first_page
, offset
);
543 memset(ptr
, 0, temp
);
546 } while (offset
< end_offset
);
550 EXPORT_SYMBOL(snd_emu10k1_synth_bzero
);
553 * copy_from_user(blk + offset, data, size)
555 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1
*emu
, struct snd_util_memblk
*blk
,
556 int offset
, const char __user
*data
, int size
)
558 int page
, nextofs
, end_offset
, temp
, temp1
;
560 struct snd_emu10k1_memblk
*p
= (struct snd_emu10k1_memblk
*)blk
;
562 offset
+= blk
->offset
& (PAGE_SIZE
- 1);
563 end_offset
= offset
+ size
;
564 page
= get_aligned_page(offset
);
566 nextofs
= aligned_page_offset(page
+ 1);
567 temp
= nextofs
- offset
;
568 temp1
= end_offset
- offset
;
571 ptr
= offset_ptr(emu
, page
+ p
->first_page
, offset
);
572 if (ptr
&& copy_from_user(ptr
, data
, temp
))
577 } while (offset
< end_offset
);
581 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user
);