1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
4 #include <linux/config.h>
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8 * Unfortunately, that doesn't apply to PA-RISC. */
10 /* Cache flush operations */
13 #define flush_cache_mm(mm) flush_cache_all()
15 #define flush_cache_mm(mm) flush_cache_all_local()
18 #define flush_kernel_dcache_range(start,size) \
19 flush_kernel_dcache_range_asm((start), (start)+(size));
21 extern void flush_cache_all_local(void);
23 static inline void cacheflush_h_tmp_function(void *dummy
)
25 flush_cache_all_local();
28 static inline void flush_cache_all(void)
30 on_each_cpu(cacheflush_h_tmp_function
, NULL
, 1, 1);
33 #define flush_cache_vmap(start, end) flush_cache_all()
34 #define flush_cache_vunmap(start, end) flush_cache_all()
36 /* The following value needs to be tuned and probably scaled with the
40 #define FLUSH_THRESHOLD 0x80000
43 flush_user_dcache_range(unsigned long start
, unsigned long end
)
46 flush_user_dcache_range_asm(start
,end
);
48 if ((end
- start
) < FLUSH_THRESHOLD
)
49 flush_user_dcache_range_asm(start
,end
);
56 flush_user_icache_range(unsigned long start
, unsigned long end
)
59 flush_user_icache_range_asm(start
,end
);
61 if ((end
- start
) < FLUSH_THRESHOLD
)
62 flush_user_icache_range_asm(start
,end
);
64 flush_instruction_cache();
68 extern void flush_dcache_page(struct page
*page
);
70 #define flush_dcache_mmap_lock(mapping) \
71 spin_lock_irq(&(mapping)->tree_lock)
72 #define flush_dcache_mmap_unlock(mapping) \
73 spin_unlock_irq(&(mapping)->tree_lock)
75 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
77 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
79 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
81 flush_cache_page(vma, vaddr); \
82 memcpy(dst, src, len); \
83 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
86 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
88 flush_cache_page(vma, vaddr); \
89 memcpy(dst, src, len); \
92 static inline void flush_cache_range(struct vm_area_struct
*vma
,
93 unsigned long start
, unsigned long end
)
97 if (!vma
->vm_mm
->context
) {
103 if (vma
->vm_mm
->context
== sr3
) {
104 flush_user_dcache_range(start
,end
);
105 flush_user_icache_range(start
,end
);
111 /* Simple function to work out if we have an existing address translation
112 * for a user space vma. */
113 static inline pte_t
*__translation_exists(struct mm_struct
*mm
,
116 pgd_t
*pgd
= pgd_offset(mm
, addr
);
123 pmd
= pmd_offset(pgd
, addr
);
124 if(pmd_none(*pmd
) || pmd_bad(*pmd
))
127 pte
= pte_offset_map(pmd
, addr
);
129 /* The PA flush mappings show up as pte_none, but they're
130 * valid none the less */
131 if(pte_none(*pte
) && ((pte_val(*pte
) & _PAGE_FLUSH
) == 0))
135 #define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
138 /* Private function to flush a page from the cache of a non-current
139 * process. cr25 contains the Page Directory of the current user
140 * process; we're going to hijack both it and the user space %sr3 to
141 * temporarily make the non-current process current. We have to do
142 * this because cache flushing may cause a non-access tlb miss which
143 * the handlers have to fill in from the pgd of the non-current
146 flush_user_cache_page_non_current(struct vm_area_struct
*vma
,
147 unsigned long vmaddr
)
149 /* save the current process space and pgd */
150 unsigned long space
= mfsp(3), pgd
= mfctl(25);
152 /* we don't mind taking interrups since they may not
153 * do anything with user space, but we can't
154 * be preempted here */
157 /* make us current */
158 mtctl(__pa(vma
->vm_mm
->pgd
), 25);
159 mtsp(vma
->vm_mm
->context
, 3);
161 flush_user_dcache_page(vmaddr
);
162 if(vma
->vm_flags
& VM_EXEC
)
163 flush_user_icache_page(vmaddr
);
165 /* put the old current process back */
172 __flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
174 if (likely(vma
->vm_mm
->context
== mfsp(3))) {
175 flush_user_dcache_page(vmaddr
);
176 if (vma
->vm_flags
& VM_EXEC
)
177 flush_user_icache_page(vmaddr
);
179 flush_user_cache_page_non_current(vma
, vmaddr
);
184 flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
186 BUG_ON(!vma
->vm_mm
->context
);
188 if(likely(translation_exists(vma
, vmaddr
)))
189 __flush_cache_page(vma
, vmaddr
);