net: dsa: mt7530: set CPU port to fallback mode
[linux/fpc-iii.git] / sound / pci / emu10k1 / memory.c
blobdbc7d8d0e1c434d61e279478b97ed29c7ad52229
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 * EMU10K1 memory page allocation (PTB area)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28 #include <linux/export.h>
30 #include <sound/core.h>
31 #include <sound/emu10k1.h>
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
34 * aligned pages in others
36 #define __set_ptb_entry(emu,page,addr) \
37 (((__le32 *)(emu)->ptb_pages.area)[page] = \
38 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
39 #define __get_ptb_entry(emu, page) \
40 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
42 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
43 #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
44 #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
45 /* get aligned page from offset address */
46 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
47 /* get offset address from aligned page */
48 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
50 #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
51 /* fill PTB entrie(s) corresponding to page with addr */
52 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
53 /* fill PTB entrie(s) corresponding to page with silence pointer */
54 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
55 #else
56 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
57 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
59 int i;
60 page *= UNIT_PAGES;
61 for (i = 0; i < UNIT_PAGES; i++, page++) {
62 __set_ptb_entry(emu, page, addr);
63 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
64 (unsigned int)__get_ptb_entry(emu, page));
65 addr += EMUPAGESIZE;
68 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
70 int i;
71 page *= UNIT_PAGES;
72 for (i = 0; i < UNIT_PAGES; i++, page++) {
73 /* do not increment ptr */
74 __set_ptb_entry(emu, page, emu->silent_page.addr);
75 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
76 page, (unsigned int)__get_ptb_entry(emu, page));
79 #endif /* PAGE_SIZE */
84 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
85 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
87 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
90 /* initialize emu10k1 part */
91 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
93 blk->mapped_page = -1;
94 INIT_LIST_HEAD(&blk->mapped_link);
95 INIT_LIST_HEAD(&blk->mapped_order_link);
96 blk->map_locked = 0;
98 blk->first_page = get_aligned_page(blk->mem.offset);
99 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
100 blk->pages = blk->last_page - blk->first_page + 1;
104 * search empty region on PTB with the given size
106 * if an empty region is found, return the page and store the next mapped block
107 * in nextp
108 * if not found, return a negative error code.
110 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
112 int page = 1, found_page = -ENOMEM;
113 int max_size = npages;
114 int size;
115 struct list_head *candidate = &emu->mapped_link_head;
116 struct list_head *pos;
118 list_for_each (pos, &emu->mapped_link_head) {
119 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
120 if (blk->mapped_page < 0)
121 continue;
122 size = blk->mapped_page - page;
123 if (size == npages) {
124 *nextp = pos;
125 return page;
127 else if (size > max_size) {
128 /* we look for the maximum empty hole */
129 max_size = size;
130 candidate = pos;
131 found_page = page;
133 page = blk->mapped_page + blk->pages;
135 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
136 if (size >= max_size) {
137 *nextp = pos;
138 return page;
140 *nextp = candidate;
141 return found_page;
145 * map a memory block onto emu10k1's PTB
147 * call with memblk_lock held
149 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
151 int page, pg;
152 struct list_head *next;
154 page = search_empty_map_area(emu, blk->pages, &next);
155 if (page < 0) /* not found */
156 return page;
157 if (page == 0) {
158 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
159 return -EINVAL;
161 /* insert this block in the proper position of mapped list */
162 list_add_tail(&blk->mapped_link, next);
163 /* append this as a newest block in order list */
164 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
165 blk->mapped_page = page;
166 /* fill PTB */
167 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
168 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
169 page++;
171 return 0;
175 * unmap the block
176 * return the size of resultant empty pages
178 * call with memblk_lock held
180 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
182 int start_page, end_page, mpage, pg;
183 struct list_head *p;
184 struct snd_emu10k1_memblk *q;
186 /* calculate the expected size of empty region */
187 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
188 q = get_emu10k1_memblk(p, mapped_link);
189 start_page = q->mapped_page + q->pages;
190 } else
191 start_page = 1;
192 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
193 q = get_emu10k1_memblk(p, mapped_link);
194 end_page = q->mapped_page;
195 } else
196 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
198 /* remove links */
199 list_del(&blk->mapped_link);
200 list_del(&blk->mapped_order_link);
201 /* clear PTB */
202 mpage = blk->mapped_page;
203 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
204 set_silent_ptb(emu, mpage);
205 mpage++;
207 blk->mapped_page = -1;
208 return end_page - start_page; /* return the new empty size */
212 * search empty pages with the given size, and create a memory block
214 * unlike synth_alloc the memory block is aligned to the page start
216 static struct snd_emu10k1_memblk *
217 search_empty(struct snd_emu10k1 *emu, int size)
219 struct list_head *p;
220 struct snd_emu10k1_memblk *blk;
221 int page, psize;
223 psize = get_aligned_page(size + PAGE_SIZE -1);
224 page = 0;
225 list_for_each(p, &emu->memhdr->block) {
226 blk = get_emu10k1_memblk(p, mem.list);
227 if (page + psize <= blk->first_page)
228 goto __found_pages;
229 page = blk->last_page + 1;
231 if (page + psize > emu->max_cache_pages)
232 return NULL;
234 __found_pages:
235 /* create a new memory block */
236 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
237 if (blk == NULL)
238 return NULL;
239 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
240 emu10k1_memblk_init(blk);
241 return blk;
246 * check if the given pointer is valid for pages
248 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
250 if (addr & ~emu->dma_mask) {
251 dev_err_ratelimited(emu->card->dev,
252 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
253 emu->dma_mask, (unsigned long)addr);
254 return 0;
256 if (addr & (EMUPAGESIZE-1)) {
257 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
258 return 0;
260 return 1;
264 * map the given memory block on PTB.
265 * if the block is already mapped, update the link order.
266 * if no empty pages are found, tries to release unused memory blocks
267 * and retry the mapping.
269 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
271 int err;
272 int size;
273 struct list_head *p, *nextp;
274 struct snd_emu10k1_memblk *deleted;
275 unsigned long flags;
277 spin_lock_irqsave(&emu->memblk_lock, flags);
278 if (blk->mapped_page >= 0) {
279 /* update order link */
280 list_move_tail(&blk->mapped_order_link,
281 &emu->mapped_order_link_head);
282 spin_unlock_irqrestore(&emu->memblk_lock, flags);
283 return 0;
285 if ((err = map_memblk(emu, blk)) < 0) {
286 /* no enough page - try to unmap some blocks */
287 /* starting from the oldest block */
288 p = emu->mapped_order_link_head.next;
289 for (; p != &emu->mapped_order_link_head; p = nextp) {
290 nextp = p->next;
291 deleted = get_emu10k1_memblk(p, mapped_order_link);
292 if (deleted->map_locked)
293 continue;
294 size = unmap_memblk(emu, deleted);
295 if (size >= blk->pages) {
296 /* ok the empty region is enough large */
297 err = map_memblk(emu, blk);
298 break;
302 spin_unlock_irqrestore(&emu->memblk_lock, flags);
303 return err;
306 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
309 * page allocation for DMA
311 struct snd_util_memblk *
312 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
314 struct snd_pcm_runtime *runtime = substream->runtime;
315 struct snd_util_memhdr *hdr;
316 struct snd_emu10k1_memblk *blk;
317 int page, err, idx;
319 if (snd_BUG_ON(!emu))
320 return NULL;
321 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
322 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
323 return NULL;
324 hdr = emu->memhdr;
325 if (snd_BUG_ON(!hdr))
326 return NULL;
328 idx = runtime->period_size >= runtime->buffer_size ?
329 (emu->delay_pcm_irq * 2) : 0;
330 mutex_lock(&hdr->block_mutex);
331 blk = search_empty(emu, runtime->dma_bytes + idx);
332 if (blk == NULL) {
333 mutex_unlock(&hdr->block_mutex);
334 return NULL;
336 /* fill buffer addresses but pointers are not stored so that
337 * snd_free_pci_page() is not called in in synth_free()
339 idx = 0;
340 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
341 unsigned long ofs = idx << PAGE_SHIFT;
342 dma_addr_t addr;
343 if (ofs >= runtime->dma_bytes)
344 addr = emu->silent_page.addr;
345 else
346 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
347 if (! is_valid_page(emu, addr)) {
348 dev_err_ratelimited(emu->card->dev,
349 "emu: failure page = %d\n", idx);
350 mutex_unlock(&hdr->block_mutex);
351 return NULL;
353 emu->page_addr_table[page] = addr;
354 emu->page_ptr_table[page] = NULL;
357 /* set PTB entries */
358 blk->map_locked = 1; /* do not unmap this block! */
359 err = snd_emu10k1_memblk_map(emu, blk);
360 if (err < 0) {
361 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
362 mutex_unlock(&hdr->block_mutex);
363 return NULL;
365 mutex_unlock(&hdr->block_mutex);
366 return (struct snd_util_memblk *)blk;
371 * release DMA buffer from page table
373 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
375 if (snd_BUG_ON(!emu || !blk))
376 return -EINVAL;
377 return snd_emu10k1_synth_free(emu, blk);
381 * allocate DMA pages, widening the allocation if necessary
383 * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
384 * this might be needed.
386 * If you modify this function check whether __synth_free_pages() also needs
387 * changes.
389 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
390 struct snd_dma_buffer *dmab)
392 if (emu->iommu_workaround) {
393 size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
394 size_t size_real = npages * PAGE_SIZE;
397 * The device has been observed to accesses up to 256 extra
398 * bytes, but use 1k to be safe.
400 if (size_real < size + 1024)
401 size += PAGE_SIZE;
404 return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
405 snd_dma_pci_data(emu->pci), size, dmab);
409 * memory allocation using multiple pages (for synth)
410 * Unlike the DMA allocation above, non-contiguous pages are assined.
414 * allocate a synth sample area
416 struct snd_util_memblk *
417 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
419 struct snd_emu10k1_memblk *blk;
420 struct snd_util_memhdr *hdr = hw->memhdr;
422 mutex_lock(&hdr->block_mutex);
423 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
424 if (blk == NULL) {
425 mutex_unlock(&hdr->block_mutex);
426 return NULL;
428 if (synth_alloc_pages(hw, blk)) {
429 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
430 mutex_unlock(&hdr->block_mutex);
431 return NULL;
433 snd_emu10k1_memblk_map(hw, blk);
434 mutex_unlock(&hdr->block_mutex);
435 return (struct snd_util_memblk *)blk;
438 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
441 * free a synth sample area
444 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
446 struct snd_util_memhdr *hdr = emu->memhdr;
447 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
448 unsigned long flags;
450 mutex_lock(&hdr->block_mutex);
451 spin_lock_irqsave(&emu->memblk_lock, flags);
452 if (blk->mapped_page >= 0)
453 unmap_memblk(emu, blk);
454 spin_unlock_irqrestore(&emu->memblk_lock, flags);
455 synth_free_pages(emu, blk);
456 __snd_util_mem_free(hdr, memblk);
457 mutex_unlock(&hdr->block_mutex);
458 return 0;
461 EXPORT_SYMBOL(snd_emu10k1_synth_free);
463 /* check new allocation range */
464 static void get_single_page_range(struct snd_util_memhdr *hdr,
465 struct snd_emu10k1_memblk *blk,
466 int *first_page_ret, int *last_page_ret)
468 struct list_head *p;
469 struct snd_emu10k1_memblk *q;
470 int first_page, last_page;
471 first_page = blk->first_page;
472 if ((p = blk->mem.list.prev) != &hdr->block) {
473 q = get_emu10k1_memblk(p, mem.list);
474 if (q->last_page == first_page)
475 first_page++; /* first page was already allocated */
477 last_page = blk->last_page;
478 if ((p = blk->mem.list.next) != &hdr->block) {
479 q = get_emu10k1_memblk(p, mem.list);
480 if (q->first_page == last_page)
481 last_page--; /* last page was already allocated */
483 *first_page_ret = first_page;
484 *last_page_ret = last_page;
487 /* release allocated pages */
488 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
489 int last_page)
491 struct snd_dma_buffer dmab;
492 int page;
494 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
495 dmab.dev.dev = snd_dma_pci_data(emu->pci);
497 for (page = first_page; page <= last_page; page++) {
498 if (emu->page_ptr_table[page] == NULL)
499 continue;
500 dmab.area = emu->page_ptr_table[page];
501 dmab.addr = emu->page_addr_table[page];
504 * please keep me in sync with logic in
505 * snd_emu10k1_alloc_pages_maybe_wider()
507 dmab.bytes = PAGE_SIZE;
508 if (emu->iommu_workaround)
509 dmab.bytes *= 2;
511 snd_dma_free_pages(&dmab);
512 emu->page_addr_table[page] = 0;
513 emu->page_ptr_table[page] = NULL;
518 * allocate kernel pages
520 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
522 int page, first_page, last_page;
523 struct snd_dma_buffer dmab;
525 emu10k1_memblk_init(blk);
526 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
527 /* allocate kernel pages */
528 for (page = first_page; page <= last_page; page++) {
529 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
530 &dmab) < 0)
531 goto __fail;
532 if (!is_valid_page(emu, dmab.addr)) {
533 snd_dma_free_pages(&dmab);
534 goto __fail;
536 emu->page_addr_table[page] = dmab.addr;
537 emu->page_ptr_table[page] = dmab.area;
539 return 0;
541 __fail:
542 /* release allocated pages */
543 last_page = page - 1;
544 __synth_free_pages(emu, first_page, last_page);
546 return -ENOMEM;
550 * free pages
552 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
554 int first_page, last_page;
556 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
557 __synth_free_pages(emu, first_page, last_page);
558 return 0;
561 /* calculate buffer pointer from offset address */
562 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
564 char *ptr;
565 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
566 return NULL;
567 ptr = emu->page_ptr_table[page];
568 if (! ptr) {
569 dev_err(emu->card->dev,
570 "access to NULL ptr: page = %d\n", page);
571 return NULL;
573 ptr += offset & (PAGE_SIZE - 1);
574 return (void*)ptr;
578 * bzero(blk + offset, size)
580 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
581 int offset, int size)
583 int page, nextofs, end_offset, temp, temp1;
584 void *ptr;
585 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
587 offset += blk->offset & (PAGE_SIZE - 1);
588 end_offset = offset + size;
589 page = get_aligned_page(offset);
590 do {
591 nextofs = aligned_page_offset(page + 1);
592 temp = nextofs - offset;
593 temp1 = end_offset - offset;
594 if (temp1 < temp)
595 temp = temp1;
596 ptr = offset_ptr(emu, page + p->first_page, offset);
597 if (ptr)
598 memset(ptr, 0, temp);
599 offset = nextofs;
600 page++;
601 } while (offset < end_offset);
602 return 0;
605 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
608 * copy_from_user(blk + offset, data, size)
610 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
611 int offset, const char __user *data, int size)
613 int page, nextofs, end_offset, temp, temp1;
614 void *ptr;
615 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
617 offset += blk->offset & (PAGE_SIZE - 1);
618 end_offset = offset + size;
619 page = get_aligned_page(offset);
620 do {
621 nextofs = aligned_page_offset(page + 1);
622 temp = nextofs - offset;
623 temp1 = end_offset - offset;
624 if (temp1 < temp)
625 temp = temp1;
626 ptr = offset_ptr(emu, page + p->first_page, offset);
627 if (ptr && copy_from_user(ptr, data, temp))
628 return -EFAULT;
629 offset = nextofs;
630 data += temp;
631 page++;
632 } while (offset < end_offset);
633 return 0;
636 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);