Linux 4.13.16
[linux/fpc-iii.git] / arch / powerpc / kernel / dma-iommu.c
blob8f7abf9baa63a9c6163fb25d9932fb3f036fc222
1 /*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * busses using the iommu infrastructure
6 */
8 #include <linux/export.h>
9 #include <asm/iommu.h>
12 * Generic iommu implementation
15 /* Allocates a contiguous real buffer and creates mappings over it.
16 * Returns the virtual address of the buffer and sets dma_handle
17 * to the dma address (mapping) of the first page.
19 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, gfp_t flag,
21 unsigned long attrs)
23 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
24 dma_handle, dev->coherent_dma_mask, flag,
25 dev_to_node(dev));
28 static void dma_iommu_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle,
30 unsigned long attrs)
32 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
35 /* Creates TCEs for a user provided buffer. The user buffer must be
36 * contiguous real kernel storage (not vmalloc). The address passed here
37 * comprises a page address and offset into that page. The dma_addr_t
38 * returned will point to the same byte within the page as was passed in.
40 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
41 unsigned long offset, size_t size,
42 enum dma_data_direction direction,
43 unsigned long attrs)
45 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
46 size, device_to_mask(dev), direction, attrs);
50 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
51 size_t size, enum dma_data_direction direction,
52 unsigned long attrs)
54 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
55 attrs);
59 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
60 int nelems, enum dma_data_direction direction,
61 unsigned long attrs)
63 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
64 device_to_mask(dev), direction, attrs);
67 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
68 int nelems, enum dma_data_direction direction,
69 unsigned long attrs)
71 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
72 direction, attrs);
75 /* We support DMA to/from any memory page via the iommu */
76 int dma_iommu_dma_supported(struct device *dev, u64 mask)
78 struct iommu_table *tbl = get_iommu_table_base(dev);
80 if (!tbl) {
81 dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
82 ", table unavailable\n", mask);
83 return 0;
86 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
89 mask, tbl->it_offset << tbl->it_page_shift);
90 return 0;
91 } else
92 return 1;
95 static u64 dma_iommu_get_required_mask(struct device *dev)
97 struct iommu_table *tbl = get_iommu_table_base(dev);
98 u64 mask;
99 if (!tbl)
100 return 0;
102 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
103 mask += mask - 1;
105 return mask;
108 int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
110 return dma_addr == IOMMU_MAPPING_ERROR;
113 struct dma_map_ops dma_iommu_ops = {
114 .alloc = dma_iommu_alloc_coherent,
115 .free = dma_iommu_free_coherent,
116 .mmap = dma_direct_mmap_coherent,
117 .map_sg = dma_iommu_map_sg,
118 .unmap_sg = dma_iommu_unmap_sg,
119 .dma_supported = dma_iommu_dma_supported,
120 .map_page = dma_iommu_map_page,
121 .unmap_page = dma_iommu_unmap_page,
122 .get_required_mask = dma_iommu_get_required_mask,
123 .mapping_error = dma_iommu_mapping_error,
125 EXPORT_SYMBOL(dma_iommu_ops);