pinctrl: cherryview: Prevent possible interrupt storm on resume
[linux/fpc-iii.git] / arch / mn10300 / mm / dma-alloc.c
blob4f4b9029f0ea176c81faf729fe5dde9ed4078f87
1 /* MN10300 Dynamic DMA mapping support
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * Derived from: arch/i386/kernel/pci-dma.c
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/string.h>
16 #include <linux/pci.h>
17 #include <linux/gfp.h>
18 #include <linux/export.h>
19 #include <asm/io.h>
21 static unsigned long pci_sram_allocated = 0xbc000000;
23 static void *mn10300_dma_alloc(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
26 unsigned long addr;
27 void *ret;
29 pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
30 dev ? dev_name(dev) : "?", size, gfp);
32 if (0xbe000000 - pci_sram_allocated >= size) {
33 size = (size + 255) & ~255;
34 addr = pci_sram_allocated;
35 pci_sram_allocated += size;
36 ret = (void *) addr;
37 goto done;
40 /* ignore region specifiers */
41 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
43 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
44 gfp |= GFP_DMA;
46 addr = __get_free_pages(gfp, get_order(size));
47 if (!addr)
48 return NULL;
50 /* map the coherent memory through the uncached memory window */
51 ret = (void *) (addr | 0x20000000);
53 /* fill the memory with obvious rubbish */
54 memset((void *) addr, 0xfb, size);
56 /* write back and evict all cache lines covering this region */
57 mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
59 done:
60 *dma_handle = virt_to_bus((void *) addr);
61 printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
62 return ret;
65 static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
66 dma_addr_t dma_handle, unsigned long attrs)
68 unsigned long addr = (unsigned long) vaddr & ~0x20000000;
70 if (addr >= 0x9c000000)
71 return;
73 free_pages(addr, get_order(size));
76 static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
77 int nents, enum dma_data_direction direction,
78 unsigned long attrs)
80 struct scatterlist *sg;
81 int i;
83 for_each_sg(sglist, sg, nents, i) {
84 BUG_ON(!sg_page(sg));
86 sg->dma_address = sg_phys(sg);
89 mn10300_dcache_flush_inv();
90 return nents;
93 static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction direction, unsigned long attrs)
97 return page_to_bus(page) + offset;
100 static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
101 size_t size, enum dma_data_direction direction)
103 mn10300_dcache_flush_inv();
106 static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
107 int nelems, enum dma_data_direction direction)
109 mn10300_dcache_flush_inv();
112 static int mn10300_dma_supported(struct device *dev, u64 mask)
115 * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
116 * guarantee allocations that must be within a tighter range than
117 * GFP_DMA
119 if (mask < 0x00ffffff)
120 return 0;
121 return 1;
124 struct dma_map_ops mn10300_dma_ops = {
125 .alloc = mn10300_dma_alloc,
126 .free = mn10300_dma_free,
127 .map_page = mn10300_dma_map_page,
128 .map_sg = mn10300_dma_map_sg,
129 .sync_single_for_device = mn10300_dma_sync_single_for_device,
130 .sync_sg_for_device = mn10300_dma_sync_sg_for_device,