2 * Contains routines needed to support swiotlb for ppc.
4 * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <linux/dma-mapping.h>
14 #include <linux/pfn.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/pci.h>
19 #include <asm/machdep.h>
20 #include <asm/swiotlb.h>
22 #include <asm/abs_addr.h>
24 int swiotlb __read_mostly
;
25 unsigned int ppc_swiotlb_enable
;
27 void *swiotlb_bus_to_virt(struct device
*hwdev
, dma_addr_t addr
)
29 unsigned long pfn
= PFN_DOWN(swiotlb_bus_to_phys(hwdev
, addr
));
30 void *pageaddr
= page_address(pfn_to_page(pfn
));
33 return pageaddr
+ (addr
% PAGE_SIZE
);
37 dma_addr_t
swiotlb_phys_to_bus(struct device
*hwdev
, phys_addr_t paddr
)
39 return paddr
+ get_dma_direct_offset(hwdev
);
42 phys_addr_t
swiotlb_bus_to_phys(struct device
*hwdev
, dma_addr_t baddr
)
45 return baddr
- get_dma_direct_offset(hwdev
);
49 * Determine if an address needs bounce buffering via swiotlb.
50 * Going forward I expect the swiotlb code to generalize on using
51 * a dma_ops->addr_needs_map, and this function will move from here to the
52 * generic swiotlb code.
55 swiotlb_arch_address_needs_mapping(struct device
*hwdev
, dma_addr_t addr
,
58 struct dma_mapping_ops
*dma_ops
= get_dma_ops(hwdev
);
61 return dma_ops
->addr_needs_map(hwdev
, addr
, size
);
65 * Determine if an address is reachable by a pci device, or if we must bounce.
68 swiotlb_pci_addr_needs_map(struct device
*hwdev
, dma_addr_t addr
, size_t size
)
70 u64 mask
= dma_get_mask(hwdev
);
72 struct pci_controller
*hose
;
73 struct pci_dev
*pdev
= to_pci_dev(hwdev
);
75 hose
= pci_bus_to_host(pdev
->bus
);
76 max
= hose
->dma_window_base_cur
+ hose
->dma_window_size
;
78 /* check that we're within mapped pci window space */
79 if ((addr
+ size
> max
) | (addr
< hose
->dma_window_base_cur
))
82 return !is_buffer_dma_capable(mask
, addr
, size
);
86 swiotlb_addr_needs_map(struct device
*hwdev
, dma_addr_t addr
, size_t size
)
88 return !is_buffer_dma_capable(dma_get_mask(hwdev
), addr
, size
);
93 * At the moment, all platforms that use this code only require
94 * swiotlb to be used if we're operating on HIGHMEM. Since
95 * we don't ever call anything other than map_sg, unmap_sg,
96 * map_page, and unmap_page on highmem, use normal dma_ops
97 * for everything else.
99 struct dma_mapping_ops swiotlb_dma_ops
= {
100 .alloc_coherent
= dma_direct_alloc_coherent
,
101 .free_coherent
= dma_direct_free_coherent
,
102 .map_sg
= swiotlb_map_sg_attrs
,
103 .unmap_sg
= swiotlb_unmap_sg_attrs
,
104 .dma_supported
= swiotlb_dma_supported
,
105 .map_page
= swiotlb_map_page
,
106 .unmap_page
= swiotlb_unmap_page
,
107 .addr_needs_map
= swiotlb_addr_needs_map
,
108 .sync_single_range_for_cpu
= swiotlb_sync_single_range_for_cpu
,
109 .sync_single_range_for_device
= swiotlb_sync_single_range_for_device
,
110 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
111 .sync_sg_for_device
= swiotlb_sync_sg_for_device
114 struct dma_mapping_ops swiotlb_pci_dma_ops
= {
115 .alloc_coherent
= dma_direct_alloc_coherent
,
116 .free_coherent
= dma_direct_free_coherent
,
117 .map_sg
= swiotlb_map_sg_attrs
,
118 .unmap_sg
= swiotlb_unmap_sg_attrs
,
119 .dma_supported
= swiotlb_dma_supported
,
120 .map_page
= swiotlb_map_page
,
121 .unmap_page
= swiotlb_unmap_page
,
122 .addr_needs_map
= swiotlb_pci_addr_needs_map
,
123 .sync_single_range_for_cpu
= swiotlb_sync_single_range_for_cpu
,
124 .sync_single_range_for_device
= swiotlb_sync_single_range_for_device
,
125 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
126 .sync_sg_for_device
= swiotlb_sync_sg_for_device
129 static int ppc_swiotlb_bus_notify(struct notifier_block
*nb
,
130 unsigned long action
, void *data
)
132 struct device
*dev
= data
;
134 /* We are only intereted in device addition */
135 if (action
!= BUS_NOTIFY_ADD_DEVICE
)
138 /* May need to bounce if the device can't address all of DRAM */
139 if (dma_get_mask(dev
) < lmb_end_of_DRAM())
140 set_dma_ops(dev
, &swiotlb_dma_ops
);
145 static struct notifier_block ppc_swiotlb_plat_bus_notifier
= {
146 .notifier_call
= ppc_swiotlb_bus_notify
,
150 static struct notifier_block ppc_swiotlb_of_bus_notifier
= {
151 .notifier_call
= ppc_swiotlb_bus_notify
,
155 int __init
swiotlb_setup_bus_notifier(void)
157 bus_register_notifier(&platform_bus_type
,
158 &ppc_swiotlb_plat_bus_notifier
);
159 bus_register_notifier(&of_platform_bus_type
,
160 &ppc_swiotlb_of_bus_notifier
);