2 #include <linux/dma-mapping.h>
4 #include <linux/highmem.h>
6 #include <xen/features.h>
8 static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt
);
9 static DEFINE_PER_CPU(pte_t
*, xen_mm32_scratch_ptep
);
11 static int alloc_xen_mm32_scratch_page(int cpu
)
18 if (per_cpu(xen_mm32_scratch_ptep
, cpu
) != NULL
)
21 page
= alloc_page(GFP_KERNEL
);
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu
);
27 virt
= (unsigned long)__va(page_to_phys(page
));
28 pmdp
= pmd_offset(pud_offset(pgd_offset_k(virt
), virt
), virt
);
29 ptep
= pte_offset_kernel(pmdp
, virt
);
31 per_cpu(xen_mm32_scratch_virt
, cpu
) = virt
;
32 per_cpu(xen_mm32_scratch_ptep
, cpu
) = ptep
;
37 static int xen_mm32_cpu_notify(struct notifier_block
*self
,
38 unsigned long action
, void *hcpu
)
43 if (alloc_xen_mm32_scratch_page(cpu
))
52 static struct notifier_block xen_mm32_cpu_notifier
= {
53 .notifier_call
= xen_mm32_cpu_notify
,
56 static void* xen_mm32_remap_page(dma_addr_t handle
)
58 unsigned long virt
= get_cpu_var(xen_mm32_scratch_virt
);
59 pte_t
*ptep
= __get_cpu_var(xen_mm32_scratch_ptep
);
61 *ptep
= pfn_pte(handle
>> PAGE_SHIFT
, PAGE_KERNEL
);
62 local_flush_tlb_kernel_page(virt
);
67 static void xen_mm32_unmap(void *vaddr
)
69 put_cpu_var(xen_mm32_scratch_virt
);
73 /* functions called by SWIOTLB */
75 static void dma_cache_maint(dma_addr_t handle
, unsigned long offset
,
76 size_t size
, enum dma_data_direction dir
,
77 void (*op
)(const void *, size_t, int))
82 pfn
= (handle
>> PAGE_SHIFT
) + offset
/ PAGE_SIZE
;
91 /* Cannot map the page, we don't know its physical address.
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity
))
95 vaddr
= xen_mm32_remap_page(handle
) + offset
;
97 xen_mm32_unmap(vaddr
- offset
);
99 struct page
*page
= pfn_to_page(pfn
);
101 if (PageHighMem(page
)) {
102 if (len
+ offset
> PAGE_SIZE
)
103 len
= PAGE_SIZE
- offset
;
105 if (cache_is_vipt_nonaliasing()) {
106 vaddr
= kmap_atomic(page
);
107 op(vaddr
+ offset
, len
, dir
);
108 kunmap_atomic(vaddr
);
110 vaddr
= kmap_high_get(page
);
112 op(vaddr
+ offset
, len
, dir
);
117 vaddr
= page_address(page
) + offset
;
128 static void __xen_dma_page_dev_to_cpu(struct device
*hwdev
, dma_addr_t handle
,
129 size_t size
, enum dma_data_direction dir
)
131 /* Cannot use __dma_page_dev_to_cpu because we don't have a
132 * struct page for handle */
134 if (dir
!= DMA_TO_DEVICE
)
135 outer_inv_range(handle
, handle
+ size
);
137 dma_cache_maint(handle
& PAGE_MASK
, handle
& ~PAGE_MASK
, size
, dir
, dmac_unmap_area
);
140 static void __xen_dma_page_cpu_to_dev(struct device
*hwdev
, dma_addr_t handle
,
141 size_t size
, enum dma_data_direction dir
)
144 dma_cache_maint(handle
& PAGE_MASK
, handle
& ~PAGE_MASK
, size
, dir
, dmac_map_area
);
146 if (dir
== DMA_FROM_DEVICE
) {
147 outer_inv_range(handle
, handle
+ size
);
149 outer_clean_range(handle
, handle
+ size
);
153 void xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
154 size_t size
, enum dma_data_direction dir
,
155 struct dma_attrs
*attrs
)
158 if (!__generic_dma_ops(hwdev
)->unmap_page
)
160 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
163 __xen_dma_page_dev_to_cpu(hwdev
, handle
, size
, dir
);
166 void xen_dma_sync_single_for_cpu(struct device
*hwdev
,
167 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
169 if (!__generic_dma_ops(hwdev
)->sync_single_for_cpu
)
171 __xen_dma_page_dev_to_cpu(hwdev
, handle
, size
, dir
);
174 void xen_dma_sync_single_for_device(struct device
*hwdev
,
175 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
177 if (!__generic_dma_ops(hwdev
)->sync_single_for_device
)
179 __xen_dma_page_cpu_to_dev(hwdev
, handle
, size
, dir
);
182 int __init
xen_mm32_init(void)
186 if (!xen_initial_domain())
189 register_cpu_notifier(&xen_mm32_cpu_notifier
);
191 for_each_online_cpu(cpu
) {
192 if (alloc_xen_mm32_scratch_page(cpu
)) {
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier
);
202 arch_initcall(xen_mm32_init
);