Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / s390 / pci / pci_dma.c
blob60c11a629d96d0220d6b01cf1e1425d41c0ffaab
1 /*
2 * Copyright IBM Corp. 2012
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <asm/pci_dma.h>
17 static struct kmem_cache *dma_region_table_cache;
18 static struct kmem_cache *dma_page_table_cache;
20 static unsigned long *dma_alloc_cpu_table(void)
22 unsigned long *table, *entry;
24 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
25 if (!table)
26 return NULL;
28 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
29 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
30 return table;
33 static void dma_free_cpu_table(void *table)
35 kmem_cache_free(dma_region_table_cache, table);
38 static unsigned long *dma_alloc_page_table(void)
40 unsigned long *table, *entry;
42 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
43 if (!table)
44 return NULL;
46 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
47 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
48 return table;
51 static void dma_free_page_table(void *table)
53 kmem_cache_free(dma_page_table_cache, table);
56 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
58 unsigned long *sto;
60 if (reg_entry_isvalid(*entry))
61 sto = get_rt_sto(*entry);
62 else {
63 sto = dma_alloc_cpu_table();
64 if (!sto)
65 return NULL;
67 set_rt_sto(entry, sto);
68 validate_rt_entry(entry);
69 entry_clr_protected(entry);
71 return sto;
74 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
76 unsigned long *pto;
78 if (reg_entry_isvalid(*entry))
79 pto = get_st_pto(*entry);
80 else {
81 pto = dma_alloc_page_table();
82 if (!pto)
83 return NULL;
84 set_st_pto(entry, pto);
85 validate_st_entry(entry);
86 entry_clr_protected(entry);
88 return pto;
91 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
93 unsigned long *sto, *pto;
94 unsigned int rtx, sx, px;
96 rtx = calc_rtx(dma_addr);
97 sto = dma_get_seg_table_origin(&rto[rtx]);
98 if (!sto)
99 return NULL;
101 sx = calc_sx(dma_addr);
102 pto = dma_get_page_table_origin(&sto[sx]);
103 if (!pto)
104 return NULL;
106 px = calc_px(dma_addr);
107 return &pto[px];
110 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
111 dma_addr_t dma_addr, int flags)
113 unsigned long *entry;
115 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
116 if (!entry) {
117 WARN_ON_ONCE(1);
118 return;
121 if (flags & ZPCI_PTE_INVALID) {
122 invalidate_pt_entry(entry);
123 return;
124 } else {
125 set_pt_pfaa(entry, page_addr);
126 validate_pt_entry(entry);
129 if (flags & ZPCI_TABLE_PROTECTED)
130 entry_set_protected(entry);
131 else
132 entry_clr_protected(entry);
135 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
136 dma_addr_t dma_addr, size_t size, int flags)
138 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
139 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
140 dma_addr_t start_dma_addr = dma_addr;
141 unsigned long irq_flags;
142 int i, rc = 0;
144 if (!nr_pages)
145 return -EINVAL;
147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148 if (!zdev->dma_table)
149 goto no_refresh;
151 for (i = 0; i < nr_pages; i++) {
152 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
153 page_addr += PAGE_SIZE;
154 dma_addr += PAGE_SIZE;
158 * rpcit is not required to establish new translations when previously
159 * invalid translation-table entries are validated, however it is
160 * required when altering previously valid entries.
162 if (!zdev->tlb_refresh &&
163 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
165 * TODO: also need to check that the old entry is indeed INVALID
166 * and not only for one page but for the whole range...
167 * -> now we WARN_ON in that case but with lazy unmap that
168 * needs to be redone!
170 goto no_refresh;
172 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
173 nr_pages * PAGE_SIZE);
175 no_refresh:
176 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
177 return rc;
180 static void dma_free_seg_table(unsigned long entry)
182 unsigned long *sto = get_rt_sto(entry);
183 int sx;
185 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
186 if (reg_entry_isvalid(sto[sx]))
187 dma_free_page_table(get_st_pto(sto[sx]));
189 dma_free_cpu_table(sto);
192 static void dma_cleanup_tables(struct zpci_dev *zdev)
194 unsigned long *table;
195 int rtx;
197 if (!zdev || !zdev->dma_table)
198 return;
200 table = zdev->dma_table;
201 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
202 if (reg_entry_isvalid(table[rtx]))
203 dma_free_seg_table(table[rtx]);
205 dma_free_cpu_table(table);
206 zdev->dma_table = NULL;
209 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
210 int size)
212 unsigned long boundary_size = 0x1000000;
214 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
215 start, size, 0, boundary_size, 0);
218 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
220 unsigned long offset, flags;
222 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
223 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
224 if (offset == -1)
225 offset = __dma_alloc_iommu(zdev, 0, size);
227 if (offset != -1) {
228 zdev->next_bit = offset + size;
229 if (zdev->next_bit >= zdev->iommu_pages)
230 zdev->next_bit = 0;
232 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
233 return offset;
236 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
238 unsigned long flags;
240 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
241 if (!zdev->iommu_bitmap)
242 goto out;
243 bitmap_clear(zdev->iommu_bitmap, offset, size);
244 if (offset >= zdev->next_bit)
245 zdev->next_bit = offset + size;
246 out:
247 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
250 int dma_set_mask(struct device *dev, u64 mask)
252 if (!dev->dma_mask || !dma_supported(dev, mask))
253 return -EIO;
255 *dev->dma_mask = mask;
256 return 0;
258 EXPORT_SYMBOL_GPL(dma_set_mask);
260 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
261 unsigned long offset, size_t size,
262 enum dma_data_direction direction,
263 struct dma_attrs *attrs)
265 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
266 unsigned long nr_pages, iommu_page_index;
267 unsigned long pa = page_to_phys(page) + offset;
268 int flags = ZPCI_PTE_VALID;
269 dma_addr_t dma_addr;
271 /* This rounds up number of pages based on size and offset */
272 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
273 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
274 if (iommu_page_index == -1)
275 goto out_err;
277 /* Use rounded up size */
278 size = nr_pages * PAGE_SIZE;
280 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
281 if (dma_addr + size > zdev->end_dma)
282 goto out_free;
284 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
285 flags |= ZPCI_TABLE_PROTECTED;
287 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
288 atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
289 return dma_addr + (offset & ~PAGE_MASK);
292 out_free:
293 dma_free_iommu(zdev, iommu_page_index, nr_pages);
294 out_err:
295 zpci_err("map error:\n");
296 zpci_err_hex(&pa, sizeof(pa));
297 return DMA_ERROR_CODE;
300 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
301 size_t size, enum dma_data_direction direction,
302 struct dma_attrs *attrs)
304 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
305 unsigned long iommu_page_index;
306 int npages;
308 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
309 dma_addr = dma_addr & PAGE_MASK;
310 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
311 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
312 zpci_err("unmap error:\n");
313 zpci_err_hex(&dma_addr, sizeof(dma_addr));
316 atomic64_add(npages, &zdev->fmb->unmapped_pages);
317 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
318 dma_free_iommu(zdev, iommu_page_index, npages);
321 static void *s390_dma_alloc(struct device *dev, size_t size,
322 dma_addr_t *dma_handle, gfp_t flag,
323 struct dma_attrs *attrs)
325 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
326 struct page *page;
327 unsigned long pa;
328 dma_addr_t map;
330 size = PAGE_ALIGN(size);
331 page = alloc_pages(flag, get_order(size));
332 if (!page)
333 return NULL;
335 pa = page_to_phys(page);
336 memset((void *) pa, 0, size);
338 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
339 size, DMA_BIDIRECTIONAL, NULL);
340 if (dma_mapping_error(dev, map)) {
341 free_pages(pa, get_order(size));
342 return NULL;
345 atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
346 if (dma_handle)
347 *dma_handle = map;
348 return (void *) pa;
351 static void s390_dma_free(struct device *dev, size_t size,
352 void *pa, dma_addr_t dma_handle,
353 struct dma_attrs *attrs)
355 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
357 size = PAGE_ALIGN(size);
358 atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
359 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
360 free_pages((unsigned long) pa, get_order(size));
363 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
364 int nr_elements, enum dma_data_direction dir,
365 struct dma_attrs *attrs)
367 int mapped_elements = 0;
368 struct scatterlist *s;
369 int i;
371 for_each_sg(sg, s, nr_elements, i) {
372 struct page *page = sg_page(s);
373 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
374 s->length, dir, NULL);
375 if (!dma_mapping_error(dev, s->dma_address)) {
376 s->dma_length = s->length;
377 mapped_elements++;
378 } else
379 goto unmap;
381 out:
382 return mapped_elements;
384 unmap:
385 for_each_sg(sg, s, mapped_elements, i) {
386 if (s->dma_address)
387 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
388 dir, NULL);
389 s->dma_address = 0;
390 s->dma_length = 0;
392 mapped_elements = 0;
393 goto out;
396 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
397 int nr_elements, enum dma_data_direction dir,
398 struct dma_attrs *attrs)
400 struct scatterlist *s;
401 int i;
403 for_each_sg(sg, s, nr_elements, i) {
404 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
405 s->dma_address = 0;
406 s->dma_length = 0;
410 int zpci_dma_init_device(struct zpci_dev *zdev)
412 int rc;
414 spin_lock_init(&zdev->iommu_bitmap_lock);
415 spin_lock_init(&zdev->dma_table_lock);
417 zdev->dma_table = dma_alloc_cpu_table();
418 if (!zdev->dma_table) {
419 rc = -ENOMEM;
420 goto out_clean;
423 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
424 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
425 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
426 if (!zdev->iommu_bitmap) {
427 rc = -ENOMEM;
428 goto out_reg;
431 rc = zpci_register_ioat(zdev,
433 zdev->start_dma + PAGE_OFFSET,
434 zdev->start_dma + zdev->iommu_size - 1,
435 (u64) zdev->dma_table);
436 if (rc)
437 goto out_reg;
438 return 0;
440 out_reg:
441 dma_free_cpu_table(zdev->dma_table);
442 out_clean:
443 return rc;
446 void zpci_dma_exit_device(struct zpci_dev *zdev)
448 zpci_unregister_ioat(zdev, 0);
449 dma_cleanup_tables(zdev);
450 vfree(zdev->iommu_bitmap);
451 zdev->iommu_bitmap = NULL;
452 zdev->next_bit = 0;
455 static int __init dma_alloc_cpu_table_caches(void)
457 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
458 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
459 0, NULL);
460 if (!dma_region_table_cache)
461 return -ENOMEM;
463 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
464 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
465 0, NULL);
466 if (!dma_page_table_cache) {
467 kmem_cache_destroy(dma_region_table_cache);
468 return -ENOMEM;
470 return 0;
473 int __init zpci_dma_init(void)
475 return dma_alloc_cpu_table_caches();
478 void zpci_dma_exit(void)
480 kmem_cache_destroy(dma_page_table_cache);
481 kmem_cache_destroy(dma_region_table_cache);
484 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
486 static int __init dma_debug_do_init(void)
488 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
489 return 0;
491 fs_initcall(dma_debug_do_init);
493 struct dma_map_ops s390_dma_ops = {
494 .alloc = s390_dma_alloc,
495 .free = s390_dma_free,
496 .map_sg = s390_dma_map_sg,
497 .unmap_sg = s390_dma_unmap_sg,
498 .map_page = s390_dma_map_pages,
499 .unmap_page = s390_dma_unmap_pages,
500 /* if we support direct DMA this must be conditional */
501 .is_phys = 0,
502 /* dma_supported is unconditionally true without a callback */
504 EXPORT_SYMBOL_GPL(s390_dma_ops);