2 * Scatter-Gather buffer
4 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/export.h>
26 #include <sound/memalloc.h>
29 /* table entries are align to 32 */
30 #define SGBUF_TBL_ALIGN 32
31 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
33 int snd_free_sgbuf_pages(struct snd_dma_buffer
*dmab
)
35 struct snd_sg_buf
*sgbuf
= dmab
->private_data
;
36 struct snd_dma_buffer tmpb
;
46 tmpb
.dev
.type
= SNDRV_DMA_TYPE_DEV
;
47 tmpb
.dev
.dev
= sgbuf
->dev
;
48 for (i
= 0; i
< sgbuf
->pages
; i
++) {
49 if (!(sgbuf
->table
[i
].addr
& ~PAGE_MASK
))
50 continue; /* continuous pages */
51 tmpb
.area
= sgbuf
->table
[i
].buf
;
52 tmpb
.addr
= sgbuf
->table
[i
].addr
& PAGE_MASK
;
53 tmpb
.bytes
= (sgbuf
->table
[i
].addr
& ~PAGE_MASK
) << PAGE_SHIFT
;
54 snd_dma_free_pages(&tmpb
);
58 kfree(sgbuf
->page_table
);
60 dmab
->private_data
= NULL
;
65 #define MAX_ALLOC_PAGES 32
67 void *snd_malloc_sgbuf_pages(struct device
*device
,
68 size_t size
, struct snd_dma_buffer
*dmab
,
71 struct snd_sg_buf
*sgbuf
;
72 unsigned int i
, pages
, chunk
, maxpages
;
73 struct snd_dma_buffer tmpb
;
74 struct snd_sg_page
*table
;
75 struct page
**pgtable
;
79 dmab
->private_data
= sgbuf
= kzalloc(sizeof(*sgbuf
), GFP_KERNEL
);
83 pages
= snd_sgbuf_aligned_pages(size
);
84 sgbuf
->tblsize
= sgbuf_align_table(pages
);
85 table
= kcalloc(sgbuf
->tblsize
, sizeof(*table
), GFP_KERNEL
);
89 pgtable
= kcalloc(sgbuf
->tblsize
, sizeof(*pgtable
), GFP_KERNEL
);
92 sgbuf
->page_table
= pgtable
;
95 maxpages
= MAX_ALLOC_PAGES
;
98 /* don't be too eager to take a huge chunk */
101 chunk
<<= PAGE_SHIFT
;
102 if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV
, device
,
108 size
= sgbuf
->pages
* PAGE_SIZE
;
111 chunk
= tmpb
.bytes
>> PAGE_SHIFT
;
112 for (i
= 0; i
< chunk
; i
++) {
113 table
->buf
= tmpb
.area
;
114 table
->addr
= tmpb
.addr
;
116 table
->addr
|= chunk
; /* mark head */
118 *pgtable
++ = virt_to_page(tmpb
.area
);
119 tmpb
.area
+= PAGE_SIZE
;
120 tmpb
.addr
+= PAGE_SIZE
;
122 sgbuf
->pages
+= chunk
;
124 if (chunk
< maxpages
)
129 dmab
->area
= vmap(sgbuf
->page_table
, sgbuf
->pages
, VM_MAP
, PAGE_KERNEL
);
133 *res_size
= sgbuf
->size
;
137 snd_free_sgbuf_pages(dmab
); /* free the table */
142 * compute the max chunk size with continuous pages on sg-buffer
144 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer
*dmab
,
145 unsigned int ofs
, unsigned int size
)
147 struct snd_sg_buf
*sg
= dmab
->private_data
;
148 unsigned int start
, end
, pg
;
150 start
= ofs
>> PAGE_SHIFT
;
151 end
= (ofs
+ size
- 1) >> PAGE_SHIFT
;
152 /* check page continuity */
153 pg
= sg
->table
[start
].addr
>> PAGE_SHIFT
;
159 if ((sg
->table
[start
].addr
>> PAGE_SHIFT
) != pg
)
160 return (start
<< PAGE_SHIFT
) - ofs
;
162 /* ok, all on continuous pages */
165 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size
);