Merge tag 'fixes-for-v3.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi...
[linux/fpc-iii.git] / sound / core / sgbuf.c
blob0a418503ec412c61e22da5ac31ddac96e6e436a4
1 /*
2 * Scatter-Gather buffer
4 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/mm.h>
24 #include <linux/vmalloc.h>
25 #include <linux/export.h>
26 #include <sound/memalloc.h>
29 /* table entries are align to 32 */
30 #define SGBUF_TBL_ALIGN 32
31 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
33 int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
35 struct snd_sg_buf *sgbuf = dmab->private_data;
36 struct snd_dma_buffer tmpb;
37 int i;
39 if (! sgbuf)
40 return -EINVAL;
42 if (dmab->area)
43 vunmap(dmab->area);
44 dmab->area = NULL;
46 tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
47 tmpb.dev.dev = sgbuf->dev;
48 for (i = 0; i < sgbuf->pages; i++) {
49 if (!(sgbuf->table[i].addr & ~PAGE_MASK))
50 continue; /* continuous pages */
51 tmpb.area = sgbuf->table[i].buf;
52 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
53 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
54 snd_dma_free_pages(&tmpb);
57 kfree(sgbuf->table);
58 kfree(sgbuf->page_table);
59 kfree(sgbuf);
60 dmab->private_data = NULL;
62 return 0;
65 #define MAX_ALLOC_PAGES 32
67 void *snd_malloc_sgbuf_pages(struct device *device,
68 size_t size, struct snd_dma_buffer *dmab,
69 size_t *res_size)
71 struct snd_sg_buf *sgbuf;
72 unsigned int i, pages, chunk, maxpages;
73 struct snd_dma_buffer tmpb;
74 struct snd_sg_page *table;
75 struct page **pgtable;
77 dmab->area = NULL;
78 dmab->addr = 0;
79 dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
80 if (! sgbuf)
81 return NULL;
82 sgbuf->dev = device;
83 pages = snd_sgbuf_aligned_pages(size);
84 sgbuf->tblsize = sgbuf_align_table(pages);
85 table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
86 if (!table)
87 goto _failed;
88 sgbuf->table = table;
89 pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
90 if (!pgtable)
91 goto _failed;
92 sgbuf->page_table = pgtable;
94 /* allocate pages */
95 maxpages = MAX_ALLOC_PAGES;
96 while (pages > 0) {
97 chunk = pages;
98 /* don't be too eager to take a huge chunk */
99 if (chunk > maxpages)
100 chunk = maxpages;
101 chunk <<= PAGE_SHIFT;
102 if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
103 chunk, &tmpb) < 0) {
104 if (!sgbuf->pages)
105 goto _failed;
106 if (!res_size)
107 goto _failed;
108 size = sgbuf->pages * PAGE_SIZE;
109 break;
111 chunk = tmpb.bytes >> PAGE_SHIFT;
112 for (i = 0; i < chunk; i++) {
113 table->buf = tmpb.area;
114 table->addr = tmpb.addr;
115 if (!i)
116 table->addr |= chunk; /* mark head */
117 table++;
118 *pgtable++ = virt_to_page(tmpb.area);
119 tmpb.area += PAGE_SIZE;
120 tmpb.addr += PAGE_SIZE;
122 sgbuf->pages += chunk;
123 pages -= chunk;
124 if (chunk < maxpages)
125 maxpages = chunk;
128 sgbuf->size = size;
129 dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
130 if (! dmab->area)
131 goto _failed;
132 if (res_size)
133 *res_size = sgbuf->size;
134 return dmab->area;
136 _failed:
137 snd_free_sgbuf_pages(dmab); /* free the table */
138 return NULL;
142 * compute the max chunk size with continuous pages on sg-buffer
144 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
145 unsigned int ofs, unsigned int size)
147 struct snd_sg_buf *sg = dmab->private_data;
148 unsigned int start, end, pg;
150 start = ofs >> PAGE_SHIFT;
151 end = (ofs + size - 1) >> PAGE_SHIFT;
152 /* check page continuity */
153 pg = sg->table[start].addr >> PAGE_SHIFT;
154 for (;;) {
155 start++;
156 if (start > end)
157 break;
158 pg++;
159 if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
160 return (start << PAGE_SHIFT) - ofs;
162 /* ok, all on continuous pages */
163 return size;
165 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);