debugfs: Modified default dir of debugfs for debugging UHCI.
[linux/fpc-iii.git] / arch / powerpc / kernel / dma-swiotlb.c
blobe8a57de85bcfd0bd72160773d3977356685f94b0
1 /*
2 * Contains routines needed to support swiotlb for ppc.
4 * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <linux/dma-mapping.h>
14 #include <linux/pfn.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/pci.h>
19 #include <asm/machdep.h>
20 #include <asm/swiotlb.h>
21 #include <asm/dma.h>
22 #include <asm/abs_addr.h>
24 int swiotlb __read_mostly;
25 unsigned int ppc_swiotlb_enable;
28 * Determine if an address is reachable by a pci device, or if we must bounce.
30 static int
31 swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
33 dma_addr_t max;
34 struct pci_controller *hose;
35 struct pci_dev *pdev = to_pci_dev(hwdev);
37 hose = pci_bus_to_host(pdev->bus);
38 max = hose->dma_window_base_cur + hose->dma_window_size;
40 /* check that we're within mapped pci window space */
41 if ((addr + size > max) | (addr < hose->dma_window_base_cur))
42 return 1;
44 return 0;
48 * At the moment, all platforms that use this code only require
49 * swiotlb to be used if we're operating on HIGHMEM. Since
50 * we don't ever call anything other than map_sg, unmap_sg,
51 * map_page, and unmap_page on highmem, use normal dma_ops
52 * for everything else.
54 struct dma_mapping_ops swiotlb_dma_ops = {
55 .alloc_coherent = dma_direct_alloc_coherent,
56 .free_coherent = dma_direct_free_coherent,
57 .map_sg = swiotlb_map_sg_attrs,
58 .unmap_sg = swiotlb_unmap_sg_attrs,
59 .dma_supported = swiotlb_dma_supported,
60 .map_page = swiotlb_map_page,
61 .unmap_page = swiotlb_unmap_page,
62 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
63 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
64 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
65 .sync_sg_for_device = swiotlb_sync_sg_for_device
68 struct dma_mapping_ops swiotlb_pci_dma_ops = {
69 .alloc_coherent = dma_direct_alloc_coherent,
70 .free_coherent = dma_direct_free_coherent,
71 .map_sg = swiotlb_map_sg_attrs,
72 .unmap_sg = swiotlb_unmap_sg_attrs,
73 .dma_supported = swiotlb_dma_supported,
74 .map_page = swiotlb_map_page,
75 .unmap_page = swiotlb_unmap_page,
76 .addr_needs_map = swiotlb_pci_addr_needs_map,
77 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
78 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
79 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
80 .sync_sg_for_device = swiotlb_sync_sg_for_device
83 static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
84 unsigned long action, void *data)
86 struct device *dev = data;
88 /* We are only intereted in device addition */
89 if (action != BUS_NOTIFY_ADD_DEVICE)
90 return 0;
92 /* May need to bounce if the device can't address all of DRAM */
93 if (dma_get_mask(dev) < lmb_end_of_DRAM())
94 set_dma_ops(dev, &swiotlb_dma_ops);
96 return NOTIFY_DONE;
99 static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
100 .notifier_call = ppc_swiotlb_bus_notify,
101 .priority = 0,
104 static struct notifier_block ppc_swiotlb_of_bus_notifier = {
105 .notifier_call = ppc_swiotlb_bus_notify,
106 .priority = 0,
109 int __init swiotlb_setup_bus_notifier(void)
111 bus_register_notifier(&platform_bus_type,
112 &ppc_swiotlb_plat_bus_notifier);
113 bus_register_notifier(&of_platform_bus_type,
114 &ppc_swiotlb_of_bus_notifier);
116 return 0;