2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
9 #ifndef __ASM_MACH_IP27_DMA_COHERENCE_H
10 #define __ASM_MACH_IP27_DMA_COHERENCE_H
12 #include <asm/pci/bridge.h>
14 #define pdev_to_baddr(pdev, addr) \
15 (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr))
16 #define dev_to_baddr(dev, addr) \
17 pdev_to_baddr(to_pci_dev(dev), (addr))
21 static inline dma_addr_t
plat_map_dma_mem(struct device
*dev
, void *addr
,
24 dma_addr_t pa
= dev_to_baddr(dev
, virt_to_phys(addr
));
29 static dma_addr_t
plat_map_dma_mem_page(struct device
*dev
, struct page
*page
)
31 dma_addr_t pa
= dev_to_baddr(dev
, page_to_phys(page
));
36 static unsigned long plat_dma_addr_to_phys(struct device
*dev
,
39 return dma_addr
& ~(0xffUL
<< 56);
42 static inline void plat_unmap_dma_mem(struct device
*dev
, dma_addr_t dma_addr
,
43 size_t size
, enum dma_data_direction direction
)
47 static inline int plat_dma_supported(struct device
*dev
, u64 mask
)
50 * we fall back to GFP_DMA when the mask isn't all 1s,
51 * so we can't guarantee allocations that must be
52 * within a tighter range than GFP_DMA..
54 if (mask
< DMA_BIT_MASK(24))
60 static inline void plat_extra_sync_for_device(struct device
*dev
)
65 static inline int plat_dma_mapping_error(struct device
*dev
,
71 static inline int plat_device_is_coherent(struct device
*dev
)
73 return 1; /* IP27 non-cohernet mode is unsupported */
76 #endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */