2 * Scatter-Gather buffer
4 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/export.h>
26 #include <asm/pgtable.h>
27 #include <sound/memalloc.h>
30 /* table entries are align to 32 */
31 #define SGBUF_TBL_ALIGN 32
32 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
34 int snd_free_sgbuf_pages(struct snd_dma_buffer
*dmab
)
36 struct snd_sg_buf
*sgbuf
= dmab
->private_data
;
37 struct snd_dma_buffer tmpb
;
46 tmpb
.dev
.type
= SNDRV_DMA_TYPE_DEV
;
47 if (dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_UC_SG
)
48 tmpb
.dev
.type
= SNDRV_DMA_TYPE_DEV_UC
;
49 tmpb
.dev
.dev
= sgbuf
->dev
;
50 for (i
= 0; i
< sgbuf
->pages
; i
++) {
51 if (!(sgbuf
->table
[i
].addr
& ~PAGE_MASK
))
52 continue; /* continuous pages */
53 tmpb
.area
= sgbuf
->table
[i
].buf
;
54 tmpb
.addr
= sgbuf
->table
[i
].addr
& PAGE_MASK
;
55 tmpb
.bytes
= (sgbuf
->table
[i
].addr
& ~PAGE_MASK
) << PAGE_SHIFT
;
56 snd_dma_free_pages(&tmpb
);
60 kfree(sgbuf
->page_table
);
62 dmab
->private_data
= NULL
;
67 #define MAX_ALLOC_PAGES 32
69 void *snd_malloc_sgbuf_pages(struct device
*device
,
70 size_t size
, struct snd_dma_buffer
*dmab
,
73 struct snd_sg_buf
*sgbuf
;
74 unsigned int i
, pages
, chunk
, maxpages
;
75 struct snd_dma_buffer tmpb
;
76 struct snd_sg_page
*table
;
77 struct page
**pgtable
;
78 int type
= SNDRV_DMA_TYPE_DEV
;
79 pgprot_t prot
= PAGE_KERNEL
;
83 dmab
->private_data
= sgbuf
= kzalloc(sizeof(*sgbuf
), GFP_KERNEL
);
86 if (dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_UC_SG
) {
87 type
= SNDRV_DMA_TYPE_DEV_UC
;
88 #ifdef pgprot_noncached
89 prot
= pgprot_noncached(PAGE_KERNEL
);
93 pages
= snd_sgbuf_aligned_pages(size
);
94 sgbuf
->tblsize
= sgbuf_align_table(pages
);
95 table
= kcalloc(sgbuf
->tblsize
, sizeof(*table
), GFP_KERNEL
);
99 pgtable
= kcalloc(sgbuf
->tblsize
, sizeof(*pgtable
), GFP_KERNEL
);
102 sgbuf
->page_table
= pgtable
;
105 maxpages
= MAX_ALLOC_PAGES
;
108 /* don't be too eager to take a huge chunk */
109 if (chunk
> maxpages
)
111 chunk
<<= PAGE_SHIFT
;
112 if (snd_dma_alloc_pages_fallback(type
, device
,
118 size
= sgbuf
->pages
* PAGE_SIZE
;
121 chunk
= tmpb
.bytes
>> PAGE_SHIFT
;
122 for (i
= 0; i
< chunk
; i
++) {
123 table
->buf
= tmpb
.area
;
124 table
->addr
= tmpb
.addr
;
126 table
->addr
|= chunk
; /* mark head */
128 *pgtable
++ = virt_to_page(tmpb
.area
);
129 tmpb
.area
+= PAGE_SIZE
;
130 tmpb
.addr
+= PAGE_SIZE
;
132 sgbuf
->pages
+= chunk
;
134 if (chunk
< maxpages
)
139 dmab
->area
= vmap(sgbuf
->page_table
, sgbuf
->pages
, VM_MAP
, prot
);
143 *res_size
= sgbuf
->size
;
147 snd_free_sgbuf_pages(dmab
); /* free the table */
152 * compute the max chunk size with continuous pages on sg-buffer
154 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer
*dmab
,
155 unsigned int ofs
, unsigned int size
)
157 struct snd_sg_buf
*sg
= dmab
->private_data
;
158 unsigned int start
, end
, pg
;
160 start
= ofs
>> PAGE_SHIFT
;
161 end
= (ofs
+ size
- 1) >> PAGE_SHIFT
;
162 /* check page continuity */
163 pg
= sg
->table
[start
].addr
>> PAGE_SHIFT
;
169 if ((sg
->table
[start
].addr
>> PAGE_SHIFT
) != pg
)
170 return (start
<< PAGE_SHIFT
) - ofs
;
172 /* ok, all on continuous pages */
175 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size
);