2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Version from mach-generic modified to support PowerTV port
7 * Portions Copyright (C) 2009 Cisco Systems, Inc.
8 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
12 #ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
13 #define __ASM_MACH_POWERTV_DMA_COHERENCE_H
15 #include <linux/sched.h>
16 #include <linux/version.h>
17 #include <linux/device.h>
18 #include <asm/mach-powertv/asic.h>
20 static inline bool is_kseg2(void *addr
)
22 return (unsigned long)addr
>= KSEG2
;
25 static inline unsigned long virt_to_phys_from_pte(void *addr
)
32 unsigned long virt_addr
= (unsigned long)addr
;
33 unsigned long phys_addr
= 0UL;
35 /* get the page global directory. */
36 pgd
= pgd_offset_k(virt_addr
);
38 if (!pgd_none(*pgd
)) {
39 /* get the page upper directory */
40 pud
= pud_offset(pgd
, virt_addr
);
41 if (!pud_none(*pud
)) {
42 /* get the page middle directory */
43 pmd
= pmd_offset(pud
, virt_addr
);
44 if (!pmd_none(*pmd
)) {
45 /* get a pointer to the page table entry */
46 ptep
= pte_offset(pmd
, virt_addr
);
48 /* check for a valid page */
49 if (pte_present(pte
)) {
50 /* get the physical address the page is
52 phys_addr
= (unsigned long)
53 page_to_phys(pte_page(pte
));
54 /* add the offset within the page */
55 phys_addr
|= (virt_addr
& ~PAGE_MASK
);
64 static inline dma_addr_t
plat_map_dma_mem(struct device
*dev
, void *addr
,
68 return phys_to_dma(virt_to_phys_from_pte(addr
));
70 return phys_to_dma(virt_to_phys(addr
));
73 static inline dma_addr_t
plat_map_dma_mem_page(struct device
*dev
,
76 return phys_to_dma(page_to_phys(page
));
79 static inline unsigned long plat_dma_addr_to_phys(struct device
*dev
,
82 return dma_to_phys(dma_addr
);
85 static inline void plat_unmap_dma_mem(struct device
*dev
, dma_addr_t dma_addr
,
86 size_t size
, enum dma_data_direction direction
)
90 static inline int plat_dma_supported(struct device
*dev
, u64 mask
)
93 * we fall back to GFP_DMA when the mask isn't all 1s,
94 * so we can't guarantee allocations that must be
95 * within a tighter range than GFP_DMA..
97 if (mask
< DMA_BIT_MASK(24))
103 static inline void plat_extra_sync_for_device(struct device
*dev
)
107 static inline int plat_dma_mapping_error(struct device
*dev
,
113 static inline int plat_device_is_coherent(struct device
*dev
)
118 #endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */