1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018-2020 Broadcom.
5 #include <linux/dma-mapping.h>
7 #include <linux/pagemap.h>
8 #include <linux/pgtable.h>
9 #include <linux/vmalloc.h>
12 #include <linux/unaligned.h>
14 #include <uapi/linux/misc/bcm_vk.h>
17 #include "bcm_vk_msg.h"
18 #include "bcm_vk_sg.h"
21 * Valkyrie has a hardware limitation of 16M transfer size.
22 * So limit the SGL chunks to 16M.
24 #define BCM_VK_MAX_SGL_CHUNK SZ_16M
26 static int bcm_vk_dma_alloc(struct device
*dev
,
27 struct bcm_vk_dma
*dma
,
29 struct _vk_data
*vkdata
);
30 static int bcm_vk_dma_free(struct device
*dev
, struct bcm_vk_dma
*dma
);
32 /* Uncomment to dump SGLIST */
33 /* #define BCM_VK_DUMP_SGLIST */
35 static int bcm_vk_dma_alloc(struct device
*dev
,
36 struct bcm_vk_dma
*dma
,
38 struct _vk_data
*vkdata
)
40 dma_addr_t addr
, sg_addr
;
48 unsigned long first
, last
;
49 struct _vk_data
*sgdata
;
51 /* Get 64-bit user address */
52 data
= get_unaligned(&vkdata
->address
);
54 /* offset into first page */
55 offset
= offset_in_page(data
);
57 /* Calculate number of pages */
58 first
= (data
& PAGE_MASK
) >> PAGE_SHIFT
;
59 last
= ((data
+ vkdata
->size
- 1) & PAGE_MASK
) >> PAGE_SHIFT
;
60 dma
->nr_pages
= last
- first
+ 1;
62 /* Allocate DMA pages */
63 dma
->pages
= kmalloc_array(dma
->nr_pages
,
64 sizeof(struct page
*),
69 dev_dbg(dev
, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
70 data
, vkdata
->size
, dma
->nr_pages
);
72 dma
->direction
= direction
;
74 /* Get user pages into memory */
75 err
= get_user_pages_fast(data
& PAGE_MASK
,
77 direction
== DMA_FROM_DEVICE
,
79 if (err
!= dma
->nr_pages
) {
80 dma
->nr_pages
= (err
>= 0) ? err
: 0;
81 dev_err(dev
, "get_user_pages_fast, err=%d [%d]\n",
83 return err
< 0 ? err
: -EINVAL
;
86 /* Max size of sg list is 1 per mapped page + fields at start */
87 dma
->sglen
= (dma
->nr_pages
* sizeof(*sgdata
)) +
88 (sizeof(u32
) * SGLIST_VKDATA_START
);
91 dma
->sglist
= dma_alloc_coherent(dev
,
98 dma
->sglist
[SGLIST_NUM_SG
] = 0;
99 dma
->sglist
[SGLIST_TOTALSIZE
] = vkdata
->size
;
100 remaining_size
= vkdata
->size
;
101 sgdata
= (struct _vk_data
*)&dma
->sglist
[SGLIST_VKDATA_START
];
103 /* Map all pages into DMA */
104 size
= min_t(size_t, PAGE_SIZE
- offset
, remaining_size
);
105 remaining_size
-= size
;
106 sg_addr
= dma_map_page(dev
,
111 transfer_size
= size
;
112 if (unlikely(dma_mapping_error(dev
, sg_addr
))) {
113 __free_page(dma
->pages
[0]);
117 for (i
= 1; i
< dma
->nr_pages
; i
++) {
118 size
= min_t(size_t, PAGE_SIZE
, remaining_size
);
119 remaining_size
-= size
;
120 addr
= dma_map_page(dev
,
125 if (unlikely(dma_mapping_error(dev
, addr
))) {
126 __free_page(dma
->pages
[i
]);
131 * Compress SG list entry when pages are contiguous
132 * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
134 if ((addr
== (sg_addr
+ transfer_size
)) &&
135 ((transfer_size
+ size
) <= BCM_VK_MAX_SGL_CHUNK
)) {
136 /* pages are contiguous, add to same sg entry */
137 transfer_size
+= size
;
139 /* pages are not contiguous, write sg entry */
140 sgdata
->size
= transfer_size
;
141 put_unaligned(sg_addr
, (u64
*)&sgdata
->address
);
142 dma
->sglist
[SGLIST_NUM_SG
]++;
144 /* start new sg entry */
147 transfer_size
= size
;
150 /* Write last sg list entry */
151 sgdata
->size
= transfer_size
;
152 put_unaligned(sg_addr
, (u64
*)&sgdata
->address
);
153 dma
->sglist
[SGLIST_NUM_SG
]++;
155 /* Update pointers and size field to point to sglist */
156 put_unaligned((u64
)dma
->handle
, &vkdata
->address
);
157 vkdata
->size
= (dma
->sglist
[SGLIST_NUM_SG
] * sizeof(*sgdata
)) +
158 (sizeof(u32
) * SGLIST_VKDATA_START
);
160 #ifdef BCM_VK_DUMP_SGLIST
162 "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
167 for (i
= 0; i
< vkdata
->size
/ sizeof(u32
); i
++)
168 dev_dbg(dev
, "i:0x%x 0x%x\n", i
, dma
->sglist
[i
]);
174 int bcm_vk_sg_alloc(struct device
*dev
,
175 struct bcm_vk_dma
*dma
,
177 struct _vk_data
*vkdata
,
183 /* Convert user addresses to DMA SG List */
184 for (i
= 0; i
< num
; i
++) {
185 if (vkdata
[i
].size
&& vkdata
[i
].address
) {
187 * If both size and address are non-zero
190 rc
= bcm_vk_dma_alloc(dev
,
194 } else if (vkdata
[i
].size
||
197 * If one of size and address are zero
198 * there is a problem.
201 "Invalid vkdata %x 0x%x 0x%llx\n",
202 i
, vkdata
[i
].size
, vkdata
[i
].address
);
206 * If size and address are both zero
207 * don't convert, but return success.
221 bcm_vk_dma_free(dev
, &dma
[i
]);
226 static int bcm_vk_dma_free(struct device
*dev
, struct bcm_vk_dma
*dma
)
232 struct _vk_data
*vkdata
;
234 dev_dbg(dev
, "free sglist=%p sglen=0x%x\n", dma
->sglist
, dma
->sglen
);
236 /* Unmap all pages in the sglist */
237 num_sg
= dma
->sglist
[SGLIST_NUM_SG
];
238 vkdata
= (struct _vk_data
*)&dma
->sglist
[SGLIST_VKDATA_START
];
239 for (i
= 0; i
< num_sg
; i
++) {
240 size
= vkdata
[i
].size
;
241 addr
= get_unaligned(&vkdata
[i
].address
);
243 dma_unmap_page(dev
, addr
, size
, dma
->direction
);
246 /* Free allocated sglist */
247 dma_free_coherent(dev
, dma
->sglen
, dma
->sglist
, dma
->handle
);
249 /* Release lock on all pages */
250 for (i
= 0; i
< dma
->nr_pages
; i
++)
251 put_page(dma
->pages
[i
]);
253 /* Free allocated dma pages */
260 int bcm_vk_sg_free(struct device
*dev
, struct bcm_vk_dma
*dma
, int num
,
266 /* Unmap and free all pages and sglists */
267 for (i
= 0; i
< num
; i
++) {
269 bcm_vk_dma_free(dev
, &dma
[i
]);