spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / mips / include / asm / mach-powertv / dma-coherence.h
blob35371641575d3b86c85ac913095b0e6d06f8775d
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Version from mach-generic modified to support PowerTV port
7 * Portions Copyright (C) 2009 Cisco Systems, Inc.
8 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
12 #ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
13 #define __ASM_MACH_POWERTV_DMA_COHERENCE_H
15 #include <linux/sched.h>
16 #include <linux/device.h>
17 #include <asm/mach-powertv/asic.h>
19 static inline bool is_kseg2(void *addr)
21 return (unsigned long)addr >= KSEG2;
24 static inline unsigned long virt_to_phys_from_pte(void *addr)
26 pgd_t *pgd;
27 pud_t *pud;
28 pmd_t *pmd;
29 pte_t *ptep, pte;
31 unsigned long virt_addr = (unsigned long)addr;
32 unsigned long phys_addr = 0UL;
34 /* get the page global directory. */
35 pgd = pgd_offset_k(virt_addr);
37 if (!pgd_none(*pgd)) {
38 /* get the page upper directory */
39 pud = pud_offset(pgd, virt_addr);
40 if (!pud_none(*pud)) {
41 /* get the page middle directory */
42 pmd = pmd_offset(pud, virt_addr);
43 if (!pmd_none(*pmd)) {
44 /* get a pointer to the page table entry */
45 ptep = pte_offset(pmd, virt_addr);
46 pte = *ptep;
47 /* check for a valid page */
48 if (pte_present(pte)) {
49 /* get the physical address the page is
50 * referring to */
51 phys_addr = (unsigned long)
52 page_to_phys(pte_page(pte));
53 /* add the offset within the page */
54 phys_addr |= (virt_addr & ~PAGE_MASK);
60 return phys_addr;
63 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
64 size_t size)
66 if (is_kseg2(addr))
67 return phys_to_dma(virt_to_phys_from_pte(addr));
68 else
69 return phys_to_dma(virt_to_phys(addr));
72 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
73 struct page *page)
75 return phys_to_dma(page_to_phys(page));
78 static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
79 dma_addr_t dma_addr)
81 return dma_to_phys(dma_addr);
84 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
85 size_t size, enum dma_data_direction direction)
89 static inline int plat_dma_supported(struct device *dev, u64 mask)
92 * we fall back to GFP_DMA when the mask isn't all 1s,
93 * so we can't guarantee allocations that must be
94 * within a tighter range than GFP_DMA..
96 if (mask < DMA_BIT_MASK(24))
97 return 0;
99 return 1;
102 static inline void plat_extra_sync_for_device(struct device *dev)
106 static inline int plat_dma_mapping_error(struct device *dev,
107 dma_addr_t dma_addr)
109 return 0;
112 static inline int plat_device_is_coherent(struct device *dev)
114 return 0;
117 #endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */