2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <asm/pci_dma.h>
17 static struct kmem_cache
*dma_region_table_cache
;
18 static struct kmem_cache
*dma_page_table_cache
;
19 static int s390_iommu_strict
;
21 static int zpci_refresh_global(struct zpci_dev
*zdev
)
23 return zpci_refresh_trans((u64
) zdev
->fh
<< 32, zdev
->start_dma
,
24 zdev
->iommu_pages
* PAGE_SIZE
);
27 unsigned long *dma_alloc_cpu_table(void)
29 unsigned long *table
, *entry
;
31 table
= kmem_cache_alloc(dma_region_table_cache
, GFP_ATOMIC
);
35 for (entry
= table
; entry
< table
+ ZPCI_TABLE_ENTRIES
; entry
++)
36 *entry
= ZPCI_TABLE_INVALID
;
40 static void dma_free_cpu_table(void *table
)
42 kmem_cache_free(dma_region_table_cache
, table
);
45 static unsigned long *dma_alloc_page_table(void)
47 unsigned long *table
, *entry
;
49 table
= kmem_cache_alloc(dma_page_table_cache
, GFP_ATOMIC
);
53 for (entry
= table
; entry
< table
+ ZPCI_PT_ENTRIES
; entry
++)
54 *entry
= ZPCI_PTE_INVALID
;
58 static void dma_free_page_table(void *table
)
60 kmem_cache_free(dma_page_table_cache
, table
);
63 static unsigned long *dma_get_seg_table_origin(unsigned long *entry
)
67 if (reg_entry_isvalid(*entry
))
68 sto
= get_rt_sto(*entry
);
70 sto
= dma_alloc_cpu_table();
74 set_rt_sto(entry
, sto
);
75 validate_rt_entry(entry
);
76 entry_clr_protected(entry
);
81 static unsigned long *dma_get_page_table_origin(unsigned long *entry
)
85 if (reg_entry_isvalid(*entry
))
86 pto
= get_st_pto(*entry
);
88 pto
= dma_alloc_page_table();
91 set_st_pto(entry
, pto
);
92 validate_st_entry(entry
);
93 entry_clr_protected(entry
);
98 unsigned long *dma_walk_cpu_trans(unsigned long *rto
, dma_addr_t dma_addr
)
100 unsigned long *sto
, *pto
;
101 unsigned int rtx
, sx
, px
;
103 rtx
= calc_rtx(dma_addr
);
104 sto
= dma_get_seg_table_origin(&rto
[rtx
]);
108 sx
= calc_sx(dma_addr
);
109 pto
= dma_get_page_table_origin(&sto
[sx
]);
113 px
= calc_px(dma_addr
);
117 void dma_update_cpu_trans(unsigned long *entry
, void *page_addr
, int flags
)
119 if (flags
& ZPCI_PTE_INVALID
) {
120 invalidate_pt_entry(entry
);
122 set_pt_pfaa(entry
, page_addr
);
123 validate_pt_entry(entry
);
126 if (flags
& ZPCI_TABLE_PROTECTED
)
127 entry_set_protected(entry
);
129 entry_clr_protected(entry
);
132 static int dma_update_trans(struct zpci_dev
*zdev
, unsigned long pa
,
133 dma_addr_t dma_addr
, size_t size
, int flags
)
135 unsigned int nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
136 u8
*page_addr
= (u8
*) (pa
& PAGE_MASK
);
137 dma_addr_t start_dma_addr
= dma_addr
;
138 unsigned long irq_flags
;
139 unsigned long *entry
;
145 spin_lock_irqsave(&zdev
->dma_table_lock
, irq_flags
);
146 if (!zdev
->dma_table
) {
151 for (i
= 0; i
< nr_pages
; i
++) {
152 entry
= dma_walk_cpu_trans(zdev
->dma_table
, dma_addr
);
157 dma_update_cpu_trans(entry
, page_addr
, flags
);
158 page_addr
+= PAGE_SIZE
;
159 dma_addr
+= PAGE_SIZE
;
163 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
164 * translations when previously invalid translation-table entries are
165 * validated. With lazy unmap, it also is skipped for previously valid
166 * entries, but a global rpcit is then required before any address can
167 * be re-used, i.e. after each iommu bitmap wrap-around.
169 if (!zdev
->tlb_refresh
&&
170 (!s390_iommu_strict
||
171 ((flags
& ZPCI_PTE_VALID_MASK
) == ZPCI_PTE_VALID
)))
174 rc
= zpci_refresh_trans((u64
) zdev
->fh
<< 32, start_dma_addr
,
175 nr_pages
* PAGE_SIZE
);
177 if (rc
&& ((flags
& ZPCI_PTE_VALID_MASK
) == ZPCI_PTE_VALID
)) {
178 flags
= ZPCI_PTE_INVALID
;
180 page_addr
-= PAGE_SIZE
;
181 dma_addr
-= PAGE_SIZE
;
182 entry
= dma_walk_cpu_trans(zdev
->dma_table
, dma_addr
);
185 dma_update_cpu_trans(entry
, page_addr
, flags
);
190 spin_unlock_irqrestore(&zdev
->dma_table_lock
, irq_flags
);
194 void dma_free_seg_table(unsigned long entry
)
196 unsigned long *sto
= get_rt_sto(entry
);
199 for (sx
= 0; sx
< ZPCI_TABLE_ENTRIES
; sx
++)
200 if (reg_entry_isvalid(sto
[sx
]))
201 dma_free_page_table(get_st_pto(sto
[sx
]));
203 dma_free_cpu_table(sto
);
206 void dma_cleanup_tables(unsigned long *table
)
213 for (rtx
= 0; rtx
< ZPCI_TABLE_ENTRIES
; rtx
++)
214 if (reg_entry_isvalid(table
[rtx
]))
215 dma_free_seg_table(table
[rtx
]);
217 dma_free_cpu_table(table
);
220 static unsigned long __dma_alloc_iommu(struct device
*dev
,
221 unsigned long start
, int size
)
223 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
224 unsigned long boundary_size
;
226 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
227 PAGE_SIZE
) >> PAGE_SHIFT
;
228 return iommu_area_alloc(zdev
->iommu_bitmap
, zdev
->iommu_pages
,
229 start
, size
, zdev
->start_dma
>> PAGE_SHIFT
,
233 static unsigned long dma_alloc_iommu(struct device
*dev
, int size
)
235 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
236 unsigned long offset
, flags
;
239 spin_lock_irqsave(&zdev
->iommu_bitmap_lock
, flags
);
240 offset
= __dma_alloc_iommu(dev
, zdev
->next_bit
, size
);
243 offset
= __dma_alloc_iommu(dev
, 0, size
);
248 zdev
->next_bit
= offset
+ size
;
249 if (!zdev
->tlb_refresh
&& !s390_iommu_strict
&& wrap
)
250 /* global flush after wrap-around with lazy unmap */
251 zpci_refresh_global(zdev
);
253 spin_unlock_irqrestore(&zdev
->iommu_bitmap_lock
, flags
);
257 static void dma_free_iommu(struct device
*dev
, unsigned long offset
, int size
)
259 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
262 spin_lock_irqsave(&zdev
->iommu_bitmap_lock
, flags
);
263 if (!zdev
->iommu_bitmap
)
265 bitmap_clear(zdev
->iommu_bitmap
, offset
, size
);
267 * Lazy flush for unmap: need to move next_bit to avoid address re-use
270 if (!s390_iommu_strict
&& offset
>= zdev
->next_bit
)
271 zdev
->next_bit
= offset
+ size
;
273 spin_unlock_irqrestore(&zdev
->iommu_bitmap_lock
, flags
);
276 static inline void zpci_err_dma(unsigned long rc
, unsigned long addr
)
281 } __packed data
= {rc
, addr
};
283 zpci_err_hex(&data
, sizeof(data
));
286 static dma_addr_t
s390_dma_map_pages(struct device
*dev
, struct page
*page
,
287 unsigned long offset
, size_t size
,
288 enum dma_data_direction direction
,
291 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
292 unsigned long nr_pages
, iommu_page_index
;
293 unsigned long pa
= page_to_phys(page
) + offset
;
294 int flags
= ZPCI_PTE_VALID
;
298 /* This rounds up number of pages based on size and offset */
299 nr_pages
= iommu_num_pages(pa
, size
, PAGE_SIZE
);
300 iommu_page_index
= dma_alloc_iommu(dev
, nr_pages
);
301 if (iommu_page_index
== -1) {
306 /* Use rounded up size */
307 size
= nr_pages
* PAGE_SIZE
;
309 dma_addr
= zdev
->start_dma
+ iommu_page_index
* PAGE_SIZE
;
310 if (dma_addr
+ size
> zdev
->end_dma
) {
315 if (direction
== DMA_NONE
|| direction
== DMA_TO_DEVICE
)
316 flags
|= ZPCI_TABLE_PROTECTED
;
318 ret
= dma_update_trans(zdev
, pa
, dma_addr
, size
, flags
);
322 atomic64_add(nr_pages
, &zdev
->mapped_pages
);
323 return dma_addr
+ (offset
& ~PAGE_MASK
);
326 dma_free_iommu(dev
, iommu_page_index
, nr_pages
);
328 zpci_err("map error:\n");
329 zpci_err_dma(ret
, pa
);
330 return DMA_ERROR_CODE
;
333 static void s390_dma_unmap_pages(struct device
*dev
, dma_addr_t dma_addr
,
334 size_t size
, enum dma_data_direction direction
,
337 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
338 unsigned long iommu_page_index
;
341 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
342 dma_addr
= dma_addr
& PAGE_MASK
;
343 ret
= dma_update_trans(zdev
, 0, dma_addr
, npages
* PAGE_SIZE
,
346 zpci_err("unmap error:\n");
347 zpci_err_dma(ret
, dma_addr
);
351 atomic64_add(npages
, &zdev
->unmapped_pages
);
352 iommu_page_index
= (dma_addr
- zdev
->start_dma
) >> PAGE_SHIFT
;
353 dma_free_iommu(dev
, iommu_page_index
, npages
);
356 static void *s390_dma_alloc(struct device
*dev
, size_t size
,
357 dma_addr_t
*dma_handle
, gfp_t flag
,
360 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
365 size
= PAGE_ALIGN(size
);
366 page
= alloc_pages(flag
, get_order(size
));
370 pa
= page_to_phys(page
);
371 memset((void *) pa
, 0, size
);
373 map
= s390_dma_map_pages(dev
, page
, 0, size
, DMA_BIDIRECTIONAL
, 0);
374 if (dma_mapping_error(dev
, map
)) {
375 free_pages(pa
, get_order(size
));
379 atomic64_add(size
/ PAGE_SIZE
, &zdev
->allocated_pages
);
385 static void s390_dma_free(struct device
*dev
, size_t size
,
386 void *pa
, dma_addr_t dma_handle
,
389 struct zpci_dev
*zdev
= to_zpci(to_pci_dev(dev
));
391 size
= PAGE_ALIGN(size
);
392 atomic64_sub(size
/ PAGE_SIZE
, &zdev
->allocated_pages
);
393 s390_dma_unmap_pages(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
, 0);
394 free_pages((unsigned long) pa
, get_order(size
));
397 static int s390_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
398 int nr_elements
, enum dma_data_direction dir
,
401 int mapped_elements
= 0;
402 struct scatterlist
*s
;
405 for_each_sg(sg
, s
, nr_elements
, i
) {
406 struct page
*page
= sg_page(s
);
407 s
->dma_address
= s390_dma_map_pages(dev
, page
, s
->offset
,
409 if (!dma_mapping_error(dev
, s
->dma_address
)) {
410 s
->dma_length
= s
->length
;
416 return mapped_elements
;
419 for_each_sg(sg
, s
, mapped_elements
, i
) {
421 s390_dma_unmap_pages(dev
, s
->dma_address
, s
->dma_length
,
430 static void s390_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
431 int nr_elements
, enum dma_data_direction dir
,
434 struct scatterlist
*s
;
437 for_each_sg(sg
, s
, nr_elements
, i
) {
438 s390_dma_unmap_pages(dev
, s
->dma_address
, s
->dma_length
, dir
,
445 int zpci_dma_init_device(struct zpci_dev
*zdev
)
450 * At this point, if the device is part of an IOMMU domain, this would
451 * be a strong hint towards a bug in the IOMMU API (common) code and/or
452 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
454 WARN_ON(zdev
->s390_domain
);
456 spin_lock_init(&zdev
->iommu_bitmap_lock
);
457 spin_lock_init(&zdev
->dma_table_lock
);
459 zdev
->dma_table
= dma_alloc_cpu_table();
460 if (!zdev
->dma_table
) {
466 * Restrict the iommu bitmap size to the minimum of the following:
468 * - 3-level pagetable address limit minus start_dma offset
469 * - DMA address range allowed by the hardware (clp query pci fn)
471 * Also set zdev->end_dma to the actual end address of the usable
472 * range, instead of the theoretical maximum as reported by hardware.
474 zdev
->start_dma
= PAGE_ALIGN(zdev
->start_dma
);
475 zdev
->iommu_size
= min3((u64
) high_memory
,
476 ZPCI_TABLE_SIZE_RT
- zdev
->start_dma
,
477 zdev
->end_dma
- zdev
->start_dma
+ 1);
478 zdev
->end_dma
= zdev
->start_dma
+ zdev
->iommu_size
- 1;
479 zdev
->iommu_pages
= zdev
->iommu_size
>> PAGE_SHIFT
;
480 zdev
->iommu_bitmap
= vzalloc(zdev
->iommu_pages
/ 8);
481 if (!zdev
->iommu_bitmap
) {
486 rc
= zpci_register_ioat(zdev
, 0, zdev
->start_dma
, zdev
->end_dma
,
487 (u64
) zdev
->dma_table
);
493 vfree(zdev
->iommu_bitmap
);
494 zdev
->iommu_bitmap
= NULL
;
496 dma_free_cpu_table(zdev
->dma_table
);
497 zdev
->dma_table
= NULL
;
502 void zpci_dma_exit_device(struct zpci_dev
*zdev
)
505 * At this point, if the device is part of an IOMMU domain, this would
506 * be a strong hint towards a bug in the IOMMU API (common) code and/or
507 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
509 WARN_ON(zdev
->s390_domain
);
511 zpci_unregister_ioat(zdev
, 0);
512 dma_cleanup_tables(zdev
->dma_table
);
513 zdev
->dma_table
= NULL
;
514 vfree(zdev
->iommu_bitmap
);
515 zdev
->iommu_bitmap
= NULL
;
519 static int __init
dma_alloc_cpu_table_caches(void)
521 dma_region_table_cache
= kmem_cache_create("PCI_DMA_region_tables",
522 ZPCI_TABLE_SIZE
, ZPCI_TABLE_ALIGN
,
524 if (!dma_region_table_cache
)
527 dma_page_table_cache
= kmem_cache_create("PCI_DMA_page_tables",
528 ZPCI_PT_SIZE
, ZPCI_PT_ALIGN
,
530 if (!dma_page_table_cache
) {
531 kmem_cache_destroy(dma_region_table_cache
);
537 int __init
zpci_dma_init(void)
539 return dma_alloc_cpu_table_caches();
542 void zpci_dma_exit(void)
544 kmem_cache_destroy(dma_page_table_cache
);
545 kmem_cache_destroy(dma_region_table_cache
);
548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
550 static int __init
dma_debug_do_init(void)
552 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
555 fs_initcall(dma_debug_do_init
);
557 struct dma_map_ops s390_pci_dma_ops
= {
558 .alloc
= s390_dma_alloc
,
559 .free
= s390_dma_free
,
560 .map_sg
= s390_dma_map_sg
,
561 .unmap_sg
= s390_dma_unmap_sg
,
562 .map_page
= s390_dma_map_pages
,
563 .unmap_page
= s390_dma_unmap_pages
,
564 /* if we support direct DMA this must be conditional */
566 /* dma_supported is unconditionally true without a callback */
568 EXPORT_SYMBOL_GPL(s390_pci_dma_ops
);
570 static int __init
s390_iommu_setup(char *str
)
572 if (!strncmp(str
, "strict", 6))
573 s390_iommu_strict
= 1;
577 __setup("s390_iommu=", s390_iommu_setup
);