1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
5 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/module.h>
9 #include <asm/cacheflush.h>
10 #include <asm/proc-fns.h>
11 #include <asm/shmparam.h>
12 #include <asm/cache_info.h>
14 extern struct cache_info L1_cache_info
[2];
16 void flush_icache_range(unsigned long start
, unsigned long end
)
18 unsigned long line_size
, flags
;
19 line_size
= L1_cache_info
[DCACHE
].line_size
;
20 start
= start
& ~(line_size
- 1);
21 end
= (end
+ line_size
- 1) & ~(line_size
- 1);
22 local_irq_save(flags
);
23 cpu_cache_wbinval_range(start
, end
, 1);
24 local_irq_restore(flags
);
26 EXPORT_SYMBOL(flush_icache_range
);
28 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
32 local_irq_save(flags
);
33 kaddr
= (unsigned long)kmap_atomic(page
);
34 cpu_cache_wbinval_page(kaddr
, vma
->vm_flags
& VM_EXEC
);
35 kunmap_atomic((void *)kaddr
);
36 local_irq_restore(flags
);
39 void flush_icache_user_page(struct vm_area_struct
*vma
, struct page
*page
,
40 unsigned long addr
, int len
)
43 kaddr
= (unsigned long)kmap_atomic(page
) + (addr
& ~PAGE_MASK
);
44 flush_icache_range(kaddr
, kaddr
+ len
);
45 kunmap_atomic((void *)kaddr
);
48 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
,
52 unsigned long pfn
= pte_pfn(*pte
);
58 if (vma
->vm_mm
== current
->active_mm
) {
59 local_irq_save(flags
);
60 __nds32__mtsr_dsb(addr
, NDS32_SR_TLB_VPN
);
61 __nds32__tlbop_rwr(*pte
);
63 local_irq_restore(flags
);
65 page
= pfn_to_page(pfn
);
67 if ((test_and_clear_bit(PG_dcache_dirty
, &page
->flags
)) ||
68 (vma
->vm_flags
& VM_EXEC
)) {
70 local_irq_save(flags
);
71 kaddr
= (unsigned long)kmap_atomic(page
);
72 cpu_cache_wbinval_page(kaddr
, vma
->vm_flags
& VM_EXEC
);
73 kunmap_atomic((void *)kaddr
);
74 local_irq_restore(flags
);
77 #ifdef CONFIG_CPU_CACHE_ALIASING
78 extern pte_t
va_present(struct mm_struct
*mm
, unsigned long addr
);
80 static inline unsigned long aliasing(unsigned long addr
, unsigned long page
)
82 return ((addr
& PAGE_MASK
) ^ page
) & (SHMLBA
- 1);
85 static inline unsigned long kremap0(unsigned long uaddr
, unsigned long pa
)
87 unsigned long kaddr
, pte
;
89 #define BASE_ADDR0 0xffffc000
90 kaddr
= BASE_ADDR0
| (uaddr
& L1_cache_info
[DCACHE
].aliasing_mask
);
91 pte
= (pa
| PAGE_KERNEL
);
92 __nds32__mtsr_dsb(kaddr
, NDS32_SR_TLB_VPN
);
93 __nds32__tlbop_rwlk(pte
);
98 static inline void kunmap01(unsigned long kaddr
)
100 __nds32__tlbop_unlk(kaddr
);
101 __nds32__tlbop_inv(kaddr
);
105 static inline unsigned long kremap1(unsigned long uaddr
, unsigned long pa
)
107 unsigned long kaddr
, pte
;
109 #define BASE_ADDR1 0xffff8000
110 kaddr
= BASE_ADDR1
| (uaddr
& L1_cache_info
[DCACHE
].aliasing_mask
);
111 pte
= (pa
| PAGE_KERNEL
);
112 __nds32__mtsr_dsb(kaddr
, NDS32_SR_TLB_VPN
);
113 __nds32__tlbop_rwlk(pte
);
118 void flush_cache_mm(struct mm_struct
*mm
)
122 local_irq_save(flags
);
123 cpu_dcache_wbinval_all();
124 cpu_icache_inval_all();
125 local_irq_restore(flags
);
128 void flush_cache_dup_mm(struct mm_struct
*mm
)
132 void flush_cache_range(struct vm_area_struct
*vma
,
133 unsigned long start
, unsigned long end
)
137 if ((end
- start
) > 8 * PAGE_SIZE
) {
138 cpu_dcache_wbinval_all();
139 if (vma
->vm_flags
& VM_EXEC
)
140 cpu_icache_inval_all();
143 local_irq_save(flags
);
144 while (start
< end
) {
145 if (va_present(vma
->vm_mm
, start
))
146 cpu_cache_wbinval_page(start
, vma
->vm_flags
& VM_EXEC
);
149 local_irq_restore(flags
);
153 void flush_cache_page(struct vm_area_struct
*vma
,
154 unsigned long addr
, unsigned long pfn
)
156 unsigned long vto
, flags
;
158 local_irq_save(flags
);
159 vto
= kremap0(addr
, pfn
<< PAGE_SHIFT
);
160 cpu_cache_wbinval_page(vto
, vma
->vm_flags
& VM_EXEC
);
162 local_irq_restore(flags
);
165 void flush_cache_vmap(unsigned long start
, unsigned long end
)
167 cpu_dcache_wbinval_all();
168 cpu_icache_inval_all();
171 void flush_cache_vunmap(unsigned long start
, unsigned long end
)
173 cpu_dcache_wbinval_all();
174 cpu_icache_inval_all();
177 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
180 cpu_dcache_wbinval_page((unsigned long)vaddr
);
181 cpu_icache_inval_page((unsigned long)vaddr
);
182 copy_page(vto
, vfrom
);
183 cpu_dcache_wbinval_page((unsigned long)vto
);
184 cpu_icache_inval_page((unsigned long)vto
);
187 void clear_user_page(void *addr
, unsigned long vaddr
, struct page
*page
)
189 cpu_dcache_wbinval_page((unsigned long)vaddr
);
190 cpu_icache_inval_page((unsigned long)vaddr
);
192 cpu_dcache_wbinval_page((unsigned long)addr
);
193 cpu_icache_inval_page((unsigned long)addr
);
196 void copy_user_highpage(struct page
*to
, struct page
*from
,
197 unsigned long vaddr
, struct vm_area_struct
*vma
)
199 unsigned long vto
, vfrom
, flags
, kto
, kfrom
, pfrom
, pto
;
200 kto
= ((unsigned long)page_address(to
) & PAGE_MASK
);
201 kfrom
= ((unsigned long)page_address(from
) & PAGE_MASK
);
202 pto
= page_to_phys(to
);
203 pfrom
= page_to_phys(from
);
205 local_irq_save(flags
);
206 if (aliasing(vaddr
, (unsigned long)kfrom
))
207 cpu_dcache_wb_page((unsigned long)kfrom
);
208 vto
= kremap0(vaddr
, pto
);
209 vfrom
= kremap1(vaddr
, pfrom
);
210 copy_page((void *)vto
, (void *)vfrom
);
213 local_irq_restore(flags
);
216 EXPORT_SYMBOL(copy_user_highpage
);
218 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
220 unsigned long vto
, flags
, kto
;
222 kto
= ((unsigned long)page_address(page
) & PAGE_MASK
);
224 local_irq_save(flags
);
225 if (aliasing(kto
, vaddr
) && kto
!= 0) {
226 cpu_dcache_inval_page(kto
);
227 cpu_icache_inval_page(kto
);
229 vto
= kremap0(vaddr
, page_to_phys(page
));
230 clear_page((void *)vto
);
232 local_irq_restore(flags
);
235 EXPORT_SYMBOL(clear_user_highpage
);
237 void flush_dcache_page(struct page
*page
)
239 struct address_space
*mapping
;
241 mapping
= page_mapping(page
);
242 if (mapping
&& !mapping_mapped(mapping
))
243 set_bit(PG_dcache_dirty
, &page
->flags
);
245 unsigned long kaddr
, flags
;
247 kaddr
= (unsigned long)page_address(page
);
248 local_irq_save(flags
);
249 cpu_dcache_wbinval_page(kaddr
);
251 unsigned long vaddr
, kto
;
253 vaddr
= page
->index
<< PAGE_SHIFT
;
254 if (aliasing(vaddr
, kaddr
)) {
255 kto
= kremap0(vaddr
, page_to_phys(page
));
256 cpu_dcache_wbinval_page(kto
);
260 local_irq_restore(flags
);
263 EXPORT_SYMBOL(flush_dcache_page
);
265 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
266 unsigned long vaddr
, void *dst
, void *src
, int len
)
268 unsigned long line_size
, start
, end
, vto
, flags
;
270 local_irq_save(flags
);
271 vto
= kremap0(vaddr
, page_to_phys(page
));
272 dst
= (void *)(vto
| (vaddr
& (PAGE_SIZE
- 1)));
273 memcpy(dst
, src
, len
);
274 if (vma
->vm_flags
& VM_EXEC
) {
275 line_size
= L1_cache_info
[DCACHE
].line_size
;
276 start
= (unsigned long)dst
& ~(line_size
- 1);
278 ((unsigned long)dst
+ len
+ line_size
- 1) & ~(line_size
-
280 cpu_cache_wbinval_range(start
, end
, 1);
283 local_irq_restore(flags
);
286 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
287 unsigned long vaddr
, void *dst
, void *src
, int len
)
289 unsigned long vto
, flags
;
291 local_irq_save(flags
);
292 vto
= kremap0(vaddr
, page_to_phys(page
));
293 src
= (void *)(vto
| (vaddr
& (PAGE_SIZE
- 1)));
294 memcpy(dst
, src
, len
);
296 local_irq_restore(flags
);
299 void flush_anon_page(struct vm_area_struct
*vma
,
300 struct page
*page
, unsigned long vaddr
)
302 unsigned long kaddr
, flags
, ktmp
;
306 if (vma
->vm_mm
!= current
->active_mm
)
309 local_irq_save(flags
);
310 if (vma
->vm_flags
& VM_EXEC
)
311 cpu_icache_inval_page(vaddr
& PAGE_MASK
);
312 kaddr
= (unsigned long)page_address(page
);
313 if (aliasing(vaddr
, kaddr
)) {
314 ktmp
= kremap0(vaddr
, page_to_phys(page
));
315 cpu_dcache_wbinval_page(ktmp
);
318 local_irq_restore(flags
);
321 void flush_kernel_dcache_page(struct page
*page
)
324 local_irq_save(flags
);
325 cpu_dcache_wbinval_page((unsigned long)page_address(page
));
326 local_irq_restore(flags
);
328 EXPORT_SYMBOL(flush_kernel_dcache_page
);
330 void flush_kernel_vmap_range(void *addr
, int size
)
333 local_irq_save(flags
);
334 cpu_dcache_wb_range((unsigned long)addr
, (unsigned long)addr
+ size
);
335 local_irq_restore(flags
);
337 EXPORT_SYMBOL(flush_kernel_vmap_range
);
339 void invalidate_kernel_vmap_range(void *addr
, int size
)
342 local_irq_save(flags
);
343 cpu_dcache_inval_range((unsigned long)addr
, (unsigned long)addr
+ size
);
344 local_irq_restore(flags
);
346 EXPORT_SYMBOL(invalidate_kernel_vmap_range
);