1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013--2024 Intel Corporation
6 #include <linux/cacheflush.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/iova.h>
9 #include <linux/list.h>
11 #include <linux/vmalloc.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
22 struct list_head list
;
29 static struct vm_info
*get_vm_info(struct ipu6_mmu
*mmu
, dma_addr_t iova
)
31 struct vm_info
*info
, *save
;
33 list_for_each_entry_safe(info
, save
, &mmu
->vma_list
, list
) {
34 if (iova
>= info
->ipu6_iova
&&
35 iova
< (info
->ipu6_iova
+ info
->size
))
42 static void __clear_buffer(struct page
*page
, size_t size
, unsigned long attrs
)
49 * Ensure that the allocated pages are zeroed, and that any data
50 * lurking in the kernel direct-mapped region is invalidated.
52 ptr
= page_address(page
);
54 if ((attrs
& DMA_ATTR_SKIP_CPU_SYNC
) == 0)
55 clflush_cache_range(ptr
, size
);
58 static struct page
**__alloc_buffer(size_t size
, gfp_t gfp
, unsigned long attrs
)
60 int count
= PHYS_PFN(size
);
61 int array_size
= count
* sizeof(struct page
*);
65 pages
= kvzalloc(array_size
, GFP_KERNEL
);
72 int j
, order
= __fls(count
);
74 pages
[i
] = alloc_pages(gfp
, order
);
75 while (!pages
[i
] && order
)
76 pages
[i
] = alloc_pages(gfp
, --order
);
81 split_page(pages
[i
], order
);
84 pages
[i
+ j
] = pages
[i
] + j
;
87 __clear_buffer(pages
[i
], PAGE_SIZE
<< order
, attrs
);
96 __free_pages(pages
[i
], 0);
101 static void __free_buffer(struct page
**pages
, size_t size
, unsigned long attrs
)
103 int count
= PHYS_PFN(size
);
106 for (i
= 0; i
< count
&& pages
[i
]; i
++) {
107 __clear_buffer(pages
[i
], PAGE_SIZE
, attrs
);
108 __free_pages(pages
[i
], 0);
114 void ipu6_dma_sync_single(struct ipu6_bus_device
*sys
, dma_addr_t dma_handle
,
119 struct vm_info
*info
;
120 struct ipu6_mmu
*mmu
= sys
->mmu
;
122 info
= get_vm_info(mmu
, dma_handle
);
126 offset
= dma_handle
- info
->ipu6_iova
;
127 if (WARN_ON(size
> (info
->size
- offset
)))
130 vaddr
= info
->vaddr
+ offset
;
131 clflush_cache_range(vaddr
, size
);
133 EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single
, INTEL_IPU6
);
135 void ipu6_dma_sync_sg(struct ipu6_bus_device
*sys
, struct scatterlist
*sglist
,
138 struct scatterlist
*sg
;
141 for_each_sg(sglist
, sg
, nents
, i
)
142 clflush_cache_range(sg_virt(sg
), sg
->length
);
144 EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg
, INTEL_IPU6
);
146 void ipu6_dma_sync_sgtable(struct ipu6_bus_device
*sys
, struct sg_table
*sgt
)
148 ipu6_dma_sync_sg(sys
, sgt
->sgl
, sgt
->orig_nents
);
150 EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable
, INTEL_IPU6
);
152 void *ipu6_dma_alloc(struct ipu6_bus_device
*sys
, size_t size
,
153 dma_addr_t
*dma_handle
, gfp_t gfp
,
156 struct device
*dev
= &sys
->auxdev
.dev
;
157 struct pci_dev
*pdev
= sys
->isp
->pdev
;
158 dma_addr_t pci_dma_addr
, ipu6_iova
;
159 struct ipu6_mmu
*mmu
= sys
->mmu
;
160 struct vm_info
*info
;
167 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
171 size
= PAGE_ALIGN(size
);
172 count
= PHYS_PFN(size
);
174 iova
= alloc_iova(&mmu
->dmap
->iovad
, count
,
175 PHYS_PFN(dma_get_mask(dev
)), 0);
179 pages
= __alloc_buffer(size
, gfp
, attrs
);
183 dev_dbg(dev
, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n",
184 size
, iova
->pfn_lo
, iova
->pfn_hi
);
185 for (i
= 0; iova
->pfn_lo
+ i
<= iova
->pfn_hi
; i
++) {
186 pci_dma_addr
= dma_map_page_attrs(&pdev
->dev
, pages
[i
], 0,
187 PAGE_SIZE
, DMA_BIDIRECTIONAL
,
189 dev_dbg(dev
, "dma_alloc: mapped pci_dma_addr %pad\n",
191 if (dma_mapping_error(&pdev
->dev
, pci_dma_addr
)) {
192 dev_err(dev
, "pci_dma_mapping for page[%d] failed", i
);
196 ret
= ipu6_mmu_map(mmu
->dmap
->mmu_info
,
197 PFN_PHYS(iova
->pfn_lo
+ i
), pci_dma_addr
,
200 dev_err(dev
, "ipu6_mmu_map for pci_dma[%d] %pad failed",
202 dma_unmap_page_attrs(&pdev
->dev
, pci_dma_addr
,
203 PAGE_SIZE
, DMA_BIDIRECTIONAL
,
209 info
->vaddr
= vmap(pages
, count
, VM_USERMAP
, PAGE_KERNEL
);
213 *dma_handle
= PFN_PHYS(iova
->pfn_lo
);
216 info
->ipu6_iova
= *dma_handle
;
218 list_add(&info
->list
, &mmu
->vma_list
);
224 ipu6_iova
= PFN_PHYS(iova
->pfn_lo
+ i
);
225 pci_dma_addr
= ipu6_mmu_iova_to_phys(mmu
->dmap
->mmu_info
,
227 dma_unmap_page_attrs(&pdev
->dev
, pci_dma_addr
, PAGE_SIZE
,
228 DMA_BIDIRECTIONAL
, attrs
);
230 ipu6_mmu_unmap(mmu
->dmap
->mmu_info
, ipu6_iova
, PAGE_SIZE
);
233 __free_buffer(pages
, size
, attrs
);
236 __free_iova(&mmu
->dmap
->iovad
, iova
);
242 EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc
, INTEL_IPU6
);
244 void ipu6_dma_free(struct ipu6_bus_device
*sys
, size_t size
, void *vaddr
,
245 dma_addr_t dma_handle
, unsigned long attrs
)
247 struct ipu6_mmu
*mmu
= sys
->mmu
;
248 struct pci_dev
*pdev
= sys
->isp
->pdev
;
249 struct iova
*iova
= find_iova(&mmu
->dmap
->iovad
, PHYS_PFN(dma_handle
));
250 dma_addr_t pci_dma_addr
, ipu6_iova
;
251 struct vm_info
*info
;
258 info
= get_vm_info(mmu
, dma_handle
);
262 if (WARN_ON(!info
->vaddr
))
265 if (WARN_ON(!info
->pages
))
268 list_del(&info
->list
);
270 size
= PAGE_ALIGN(size
);
276 for (i
= 0; i
< PHYS_PFN(size
); i
++) {
277 ipu6_iova
= PFN_PHYS(iova
->pfn_lo
+ i
);
278 pci_dma_addr
= ipu6_mmu_iova_to_phys(mmu
->dmap
->mmu_info
,
280 dma_unmap_page_attrs(&pdev
->dev
, pci_dma_addr
, PAGE_SIZE
,
281 DMA_BIDIRECTIONAL
, attrs
);
284 ipu6_mmu_unmap(mmu
->dmap
->mmu_info
, PFN_PHYS(iova
->pfn_lo
),
285 PFN_PHYS(iova_size(iova
)));
287 __free_buffer(pages
, size
, attrs
);
289 mmu
->tlb_invalidate(mmu
);
291 __free_iova(&mmu
->dmap
->iovad
, iova
);
295 EXPORT_SYMBOL_NS_GPL(ipu6_dma_free
, INTEL_IPU6
);
297 int ipu6_dma_mmap(struct ipu6_bus_device
*sys
, struct vm_area_struct
*vma
,
298 void *addr
, dma_addr_t iova
, size_t size
,
301 struct ipu6_mmu
*mmu
= sys
->mmu
;
302 size_t count
= PFN_UP(size
);
303 struct vm_info
*info
;
307 info
= get_vm_info(mmu
, iova
);
314 if (vma
->vm_start
& ~PAGE_MASK
)
317 if (size
> info
->size
)
320 for (i
= 0; i
< count
; i
++) {
321 ret
= vm_insert_page(vma
, vma
->vm_start
+ PFN_PHYS(i
),
330 void ipu6_dma_unmap_sg(struct ipu6_bus_device
*sys
, struct scatterlist
*sglist
,
331 int nents
, enum dma_data_direction dir
,
334 struct device
*dev
= &sys
->auxdev
.dev
;
335 struct ipu6_mmu
*mmu
= sys
->mmu
;
336 struct iova
*iova
= find_iova(&mmu
->dmap
->iovad
,
337 PHYS_PFN(sg_dma_address(sglist
)));
338 struct scatterlist
*sg
;
339 dma_addr_t pci_dma_addr
;
349 * Before IPU6 mmu unmap, return the pci dma address back to sg
350 * assume the nents is less than orig_nents as the least granule
353 dev_dbg(dev
, "trying to unmap concatenated %u ents\n", nents
);
354 for_each_sg(sglist
, sg
, nents
, i
) {
355 dev_dbg(dev
, "unmap sg[%d] %pad size %u\n", i
,
356 &sg_dma_address(sg
), sg_dma_len(sg
));
357 pci_dma_addr
= ipu6_mmu_iova_to_phys(mmu
->dmap
->mmu_info
,
359 dev_dbg(dev
, "return pci_dma_addr %pad back to sg[%d]\n",
361 sg_dma_address(sg
) = pci_dma_addr
;
364 dev_dbg(dev
, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n",
365 iova
->pfn_lo
, iova
->pfn_hi
);
366 ipu6_mmu_unmap(mmu
->dmap
->mmu_info
, PFN_PHYS(iova
->pfn_lo
),
367 PFN_PHYS(iova_size(iova
)));
369 mmu
->tlb_invalidate(mmu
);
370 __free_iova(&mmu
->dmap
->iovad
, iova
);
372 EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg
, INTEL_IPU6
);
374 int ipu6_dma_map_sg(struct ipu6_bus_device
*sys
, struct scatterlist
*sglist
,
375 int nents
, enum dma_data_direction dir
,
378 struct device
*dev
= &sys
->auxdev
.dev
;
379 struct ipu6_mmu
*mmu
= sys
->mmu
;
380 struct scatterlist
*sg
;
383 unsigned long iova_addr
;
386 for_each_sg(sglist
, sg
, nents
, i
) {
388 dev_err(dev
, "Unsupported non-zero sg[%d].offset %x\n",
394 for_each_sg(sglist
, sg
, nents
, i
)
395 npages
+= PFN_UP(sg_dma_len(sg
));
397 dev_dbg(dev
, "dmamap trying to map %d ents %zu pages\n",
400 iova
= alloc_iova(&mmu
->dmap
->iovad
, npages
,
401 PHYS_PFN(dma_get_mask(dev
)), 0);
405 dev_dbg(dev
, "dmamap: iova low pfn %lu, high pfn %lu\n", iova
->pfn_lo
,
408 iova_addr
= iova
->pfn_lo
;
409 for_each_sg(sglist
, sg
, nents
, i
) {
413 iova_pa
= PFN_PHYS(iova_addr
);
414 dev_dbg(dev
, "mapping entry %d: iova %pap phy %pap size %d\n",
415 i
, &iova_pa
, &sg_dma_address(sg
), sg_dma_len(sg
));
417 ret
= ipu6_mmu_map(mmu
->dmap
->mmu_info
, PFN_PHYS(iova_addr
),
419 PAGE_ALIGN(sg_dma_len(sg
)));
423 sg_dma_address(sg
) = PFN_PHYS(iova_addr
);
425 iova_addr
+= PFN_UP(sg_dma_len(sg
));
428 dev_dbg(dev
, "dmamap %d ents %zu pages mapped\n", nents
, npages
);
433 ipu6_dma_unmap_sg(sys
, sglist
, i
, dir
, attrs
);
437 EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg
, INTEL_IPU6
);
439 int ipu6_dma_map_sgtable(struct ipu6_bus_device
*sys
, struct sg_table
*sgt
,
440 enum dma_data_direction dir
, unsigned long attrs
)
444 nents
= ipu6_dma_map_sg(sys
, sgt
->sgl
, sgt
->nents
, dir
, attrs
);
452 EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable
, INTEL_IPU6
);
454 void ipu6_dma_unmap_sgtable(struct ipu6_bus_device
*sys
, struct sg_table
*sgt
,
455 enum dma_data_direction dir
, unsigned long attrs
)
457 ipu6_dma_unmap_sg(sys
, sgt
->sgl
, sgt
->nents
, dir
, attrs
);
459 EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable
, INTEL_IPU6
);
462 * Create scatter-list for the already allocated DMA buffer
464 int ipu6_dma_get_sgtable(struct ipu6_bus_device
*sys
, struct sg_table
*sgt
,
465 void *cpu_addr
, dma_addr_t handle
, size_t size
,
468 struct device
*dev
= &sys
->auxdev
.dev
;
469 struct ipu6_mmu
*mmu
= sys
->mmu
;
470 struct vm_info
*info
;
474 info
= get_vm_info(mmu
, handle
);
481 if (WARN_ON(!info
->pages
))
484 n_pages
= PFN_UP(size
);
486 ret
= sg_alloc_table_from_pages(sgt
, info
->pages
, n_pages
, 0, size
,
489 dev_warn(dev
, "get sgt table failed\n");