1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Scatter-Gather buffer
5 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
8 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/export.h>
12 #include <sound/memalloc.h>
15 /* table entries are align to 32 */
16 #define SGBUF_TBL_ALIGN 32
17 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
19 int snd_free_sgbuf_pages(struct snd_dma_buffer
*dmab
)
21 struct snd_sg_buf
*sgbuf
= dmab
->private_data
;
22 struct snd_dma_buffer tmpb
;
31 tmpb
.dev
.type
= SNDRV_DMA_TYPE_DEV
;
32 if (dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_UC_SG
)
33 tmpb
.dev
.type
= SNDRV_DMA_TYPE_DEV_UC
;
34 tmpb
.dev
.dev
= sgbuf
->dev
;
35 for (i
= 0; i
< sgbuf
->pages
; i
++) {
36 if (!(sgbuf
->table
[i
].addr
& ~PAGE_MASK
))
37 continue; /* continuous pages */
38 tmpb
.area
= sgbuf
->table
[i
].buf
;
39 tmpb
.addr
= sgbuf
->table
[i
].addr
& PAGE_MASK
;
40 tmpb
.bytes
= (sgbuf
->table
[i
].addr
& ~PAGE_MASK
) << PAGE_SHIFT
;
41 snd_dma_free_pages(&tmpb
);
45 kfree(sgbuf
->page_table
);
47 dmab
->private_data
= NULL
;
52 #define MAX_ALLOC_PAGES 32
54 void *snd_malloc_sgbuf_pages(struct device
*device
,
55 size_t size
, struct snd_dma_buffer
*dmab
,
58 struct snd_sg_buf
*sgbuf
;
59 unsigned int i
, pages
, chunk
, maxpages
;
60 struct snd_dma_buffer tmpb
;
61 struct snd_sg_page
*table
;
62 struct page
**pgtable
;
63 int type
= SNDRV_DMA_TYPE_DEV
;
64 pgprot_t prot
= PAGE_KERNEL
;
68 dmab
->private_data
= sgbuf
= kzalloc(sizeof(*sgbuf
), GFP_KERNEL
);
71 if (dmab
->dev
.type
== SNDRV_DMA_TYPE_DEV_UC_SG
) {
72 type
= SNDRV_DMA_TYPE_DEV_UC
;
73 #ifdef pgprot_noncached
74 prot
= pgprot_noncached(PAGE_KERNEL
);
78 pages
= snd_sgbuf_aligned_pages(size
);
79 sgbuf
->tblsize
= sgbuf_align_table(pages
);
80 table
= kcalloc(sgbuf
->tblsize
, sizeof(*table
), GFP_KERNEL
);
84 pgtable
= kcalloc(sgbuf
->tblsize
, sizeof(*pgtable
), GFP_KERNEL
);
87 sgbuf
->page_table
= pgtable
;
90 maxpages
= MAX_ALLOC_PAGES
;
93 /* don't be too eager to take a huge chunk */
97 if (snd_dma_alloc_pages_fallback(type
, device
,
103 size
= sgbuf
->pages
* PAGE_SIZE
;
106 chunk
= tmpb
.bytes
>> PAGE_SHIFT
;
107 for (i
= 0; i
< chunk
; i
++) {
108 table
->buf
= tmpb
.area
;
109 table
->addr
= tmpb
.addr
;
111 table
->addr
|= chunk
; /* mark head */
113 *pgtable
++ = virt_to_page(tmpb
.area
);
114 tmpb
.area
+= PAGE_SIZE
;
115 tmpb
.addr
+= PAGE_SIZE
;
117 sgbuf
->pages
+= chunk
;
119 if (chunk
< maxpages
)
124 dmab
->area
= vmap(sgbuf
->page_table
, sgbuf
->pages
, VM_MAP
, prot
);
128 *res_size
= sgbuf
->size
;
132 snd_free_sgbuf_pages(dmab
); /* free the table */
137 * compute the max chunk size with continuous pages on sg-buffer
139 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer
*dmab
,
140 unsigned int ofs
, unsigned int size
)
142 struct snd_sg_buf
*sg
= dmab
->private_data
;
143 unsigned int start
, end
, pg
;
148 start
= ofs
>> PAGE_SHIFT
;
149 end
= (ofs
+ size
- 1) >> PAGE_SHIFT
;
150 /* check page continuity */
151 pg
= sg
->table
[start
].addr
>> PAGE_SHIFT
;
157 if ((sg
->table
[start
].addr
>> PAGE_SHIFT
) != pg
)
158 return (start
<< PAGE_SHIFT
) - ofs
;
160 /* ok, all on continuous pages */
163 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size
);