2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
9 #ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
10 #define __ASM_MACH_IP27_DMA_COHERENCE_H
12 #include <asm/pci/bridge.h>
14 #define pdev_to_baddr(pdev, addr) \
15 (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
16 #define dev_to_baddr(dev, addr) \
17 pdev_to_baddr(to_pci_dev(dev), (addr))
21 static inline dma_addr_t
plat_map_dma_mem(struct device
*dev
, void *addr
,
24 dma_addr_t pa
= dev_to_baddr(dev
, virt_to_phys(addr
));
29 static inline dma_addr_t
plat_map_dma_mem_page(struct device
*dev
,
32 dma_addr_t pa
= dev_to_baddr(dev
, page_to_phys(page
));
37 static inline unsigned long plat_dma_addr_to_phys(struct device
*dev
,
40 return dma_addr
& ~(0xffUL
<< 56);
43 static inline void plat_unmap_dma_mem(struct device
*dev
, dma_addr_t dma_addr
,
44 size_t size
, enum dma_data_direction direction
)
48 static inline int plat_dma_supported(struct device
*dev
, u64 mask
)
51 * we fall back to GFP_DMA when the mask isn't all 1s,
52 * so we can't guarantee allocations that must be
53 * within a tighter range than GFP_DMA..
55 if (mask
< DMA_BIT_MASK(24))
61 static inline void plat_extra_sync_for_device(struct device
*dev
)
66 static inline int plat_dma_mapping_error(struct device
*dev
,
72 static inline int plat_device_is_coherent(struct device
*dev
)
74 return 1; /* IP27 non-cohernet mode is unsupported */
77 #endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */