xen-netfront: use correct linear area after linearizing an skb
[linux/fpc-iii.git] / arch / arm / xen / mm32.c
blob3b99860fd7ae0ad5e26fd09e3b308dd754faf7dc
1 #include <linux/cpu.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/gfp.h>
4 #include <linux/highmem.h>
6 #include <xen/features.h>
8 static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9 static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
11 static int alloc_xen_mm32_scratch_page(int cpu)
13 struct page *page;
14 unsigned long virt;
15 pmd_t *pmdp;
16 pte_t *ptep;
18 if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 return 0;
21 page = alloc_page(GFP_KERNEL);
22 if (page == NULL) {
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 return -ENOMEM;
27 virt = (unsigned long)__va(page_to_phys(page));
28 pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 ptep = pte_offset_kernel(pmdp, virt);
31 per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
34 return 0;
37 static int xen_mm32_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu)
40 int cpu = (long)hcpu;
41 switch (action) {
42 case CPU_UP_PREPARE:
43 if (alloc_xen_mm32_scratch_page(cpu))
44 return NOTIFY_BAD;
45 break;
46 default:
47 break;
49 return NOTIFY_OK;
52 static struct notifier_block xen_mm32_cpu_notifier = {
53 .notifier_call = xen_mm32_cpu_notify,
56 static void* xen_mm32_remap_page(dma_addr_t handle)
58 unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
61 *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 local_flush_tlb_kernel_page(virt);
64 return (void*)virt;
67 static void xen_mm32_unmap(void *vaddr)
69 put_cpu_var(xen_mm32_scratch_virt);
73 /* functions called by SWIOTLB */
75 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
76 size_t size, enum dma_data_direction dir,
77 void (*op)(const void *, size_t, int))
79 unsigned long pfn;
80 size_t left = size;
82 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
83 offset %= PAGE_SIZE;
85 do {
86 size_t len = left;
87 void *vaddr;
89 if (!pfn_valid(pfn))
91 /* Cannot map the page, we don't know its physical address.
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity))
94 return;
95 vaddr = xen_mm32_remap_page(handle) + offset;
96 op(vaddr, len, dir);
97 xen_mm32_unmap(vaddr - offset);
98 } else {
99 struct page *page = pfn_to_page(pfn);
101 if (PageHighMem(page)) {
102 if (len + offset > PAGE_SIZE)
103 len = PAGE_SIZE - offset;
105 if (cache_is_vipt_nonaliasing()) {
106 vaddr = kmap_atomic(page);
107 op(vaddr + offset, len, dir);
108 kunmap_atomic(vaddr);
109 } else {
110 vaddr = kmap_high_get(page);
111 if (vaddr) {
112 op(vaddr + offset, len, dir);
113 kunmap_high(page);
116 } else {
117 vaddr = page_address(page) + offset;
118 op(vaddr, len, dir);
122 offset = 0;
123 pfn++;
124 left -= len;
125 } while (left);
128 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
129 size_t size, enum dma_data_direction dir)
131 /* Cannot use __dma_page_dev_to_cpu because we don't have a
132 * struct page for handle */
134 if (dir != DMA_TO_DEVICE)
135 outer_inv_range(handle, handle + size);
137 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
140 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
141 size_t size, enum dma_data_direction dir)
144 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
146 if (dir == DMA_FROM_DEVICE) {
147 outer_inv_range(handle, handle + size);
148 } else {
149 outer_clean_range(handle, handle + size);
153 void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
154 size_t size, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
158 if (!__generic_dma_ops(hwdev)->unmap_page)
159 return;
160 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
161 return;
163 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
166 void xen_dma_sync_single_for_cpu(struct device *hwdev,
167 dma_addr_t handle, size_t size, enum dma_data_direction dir)
169 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
170 return;
171 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
174 void xen_dma_sync_single_for_device(struct device *hwdev,
175 dma_addr_t handle, size_t size, enum dma_data_direction dir)
177 if (!__generic_dma_ops(hwdev)->sync_single_for_device)
178 return;
179 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
182 int __init xen_mm32_init(void)
184 int cpu;
186 if (!xen_initial_domain())
187 return 0;
189 register_cpu_notifier(&xen_mm32_cpu_notifier);
190 get_online_cpus();
191 for_each_online_cpu(cpu) {
192 if (alloc_xen_mm32_scratch_page(cpu)) {
193 put_online_cpus();
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 return -ENOMEM;
198 put_online_cpus();
200 return 0;
202 arch_initcall(xen_mm32_init);