1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
5 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/module.h>
9 #include <asm/cacheflush.h>
10 #include <asm/proc-fns.h>
11 #include <asm/shmparam.h>
12 #include <asm/cache_info.h>
14 extern struct cache_info L1_cache_info
[2];
16 void flush_icache_range(unsigned long start
, unsigned long end
)
18 unsigned long line_size
, flags
;
19 line_size
= L1_cache_info
[DCACHE
].line_size
;
20 start
= start
& ~(line_size
- 1);
21 end
= (end
+ line_size
- 1) & ~(line_size
- 1);
22 local_irq_save(flags
);
23 cpu_cache_wbinval_range(start
, end
, 1);
24 local_irq_restore(flags
);
26 EXPORT_SYMBOL(flush_icache_range
);
28 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
32 local_irq_save(flags
);
33 kaddr
= (unsigned long)kmap_atomic(page
);
34 cpu_cache_wbinval_page(kaddr
, vma
->vm_flags
& VM_EXEC
);
35 kunmap_atomic((void *)kaddr
);
36 local_irq_restore(flags
);
38 EXPORT_SYMBOL(flush_icache_page
);
40 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
41 unsigned long addr
, int len
)
44 kaddr
= (unsigned long)kmap_atomic(page
) + (addr
& ~PAGE_MASK
);
45 flush_icache_range(kaddr
, kaddr
+ len
);
46 kunmap_atomic((void *)kaddr
);
49 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
,
53 unsigned long pfn
= pte_pfn(*pte
);
59 if (vma
->vm_mm
== current
->active_mm
) {
60 local_irq_save(flags
);
61 __nds32__mtsr_dsb(addr
, NDS32_SR_TLB_VPN
);
62 __nds32__tlbop_rwr(*pte
);
64 local_irq_restore(flags
);
66 page
= pfn_to_page(pfn
);
68 if ((test_and_clear_bit(PG_dcache_dirty
, &page
->flags
)) ||
69 (vma
->vm_flags
& VM_EXEC
)) {
71 local_irq_save(flags
);
72 kaddr
= (unsigned long)kmap_atomic(page
);
73 cpu_cache_wbinval_page(kaddr
, vma
->vm_flags
& VM_EXEC
);
74 kunmap_atomic((void *)kaddr
);
75 local_irq_restore(flags
);
78 #ifdef CONFIG_CPU_CACHE_ALIASING
79 extern pte_t
va_present(struct mm_struct
*mm
, unsigned long addr
);
81 static inline unsigned long aliasing(unsigned long addr
, unsigned long page
)
83 return ((addr
& PAGE_MASK
) ^ page
) & (SHMLBA
- 1);
86 static inline unsigned long kremap0(unsigned long uaddr
, unsigned long pa
)
88 unsigned long kaddr
, pte
;
90 #define BASE_ADDR0 0xffffc000
91 kaddr
= BASE_ADDR0
| (uaddr
& L1_cache_info
[DCACHE
].aliasing_mask
);
92 pte
= (pa
| PAGE_KERNEL
);
93 __nds32__mtsr_dsb(kaddr
, NDS32_SR_TLB_VPN
);
94 __nds32__tlbop_rwlk(pte
);
99 static inline void kunmap01(unsigned long kaddr
)
101 __nds32__tlbop_unlk(kaddr
);
102 __nds32__tlbop_inv(kaddr
);
106 static inline unsigned long kremap1(unsigned long uaddr
, unsigned long pa
)
108 unsigned long kaddr
, pte
;
110 #define BASE_ADDR1 0xffff8000
111 kaddr
= BASE_ADDR1
| (uaddr
& L1_cache_info
[DCACHE
].aliasing_mask
);
112 pte
= (pa
| PAGE_KERNEL
);
113 __nds32__mtsr_dsb(kaddr
, NDS32_SR_TLB_VPN
);
114 __nds32__tlbop_rwlk(pte
);
119 void flush_cache_mm(struct mm_struct
*mm
)
123 local_irq_save(flags
);
124 cpu_dcache_wbinval_all();
125 cpu_icache_inval_all();
126 local_irq_restore(flags
);
129 void flush_cache_dup_mm(struct mm_struct
*mm
)
133 void flush_cache_range(struct vm_area_struct
*vma
,
134 unsigned long start
, unsigned long end
)
138 if ((end
- start
) > 8 * PAGE_SIZE
) {
139 cpu_dcache_wbinval_all();
140 if (vma
->vm_flags
& VM_EXEC
)
141 cpu_icache_inval_all();
144 local_irq_save(flags
);
145 while (start
< end
) {
146 if (va_present(vma
->vm_mm
, start
))
147 cpu_cache_wbinval_page(start
, vma
->vm_flags
& VM_EXEC
);
150 local_irq_restore(flags
);
154 void flush_cache_page(struct vm_area_struct
*vma
,
155 unsigned long addr
, unsigned long pfn
)
157 unsigned long vto
, flags
;
159 local_irq_save(flags
);
160 vto
= kremap0(addr
, pfn
<< PAGE_SHIFT
);
161 cpu_cache_wbinval_page(vto
, vma
->vm_flags
& VM_EXEC
);
163 local_irq_restore(flags
);
166 void flush_cache_vmap(unsigned long start
, unsigned long end
)
168 cpu_dcache_wbinval_all();
169 cpu_icache_inval_all();
172 void flush_cache_vunmap(unsigned long start
, unsigned long end
)
174 cpu_dcache_wbinval_all();
175 cpu_icache_inval_all();
178 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
181 cpu_dcache_wbinval_page((unsigned long)vaddr
);
182 cpu_icache_inval_page((unsigned long)vaddr
);
183 copy_page(vto
, vfrom
);
184 cpu_dcache_wbinval_page((unsigned long)vto
);
185 cpu_icache_inval_page((unsigned long)vto
);
188 void clear_user_page(void *addr
, unsigned long vaddr
, struct page
*page
)
190 cpu_dcache_wbinval_page((unsigned long)vaddr
);
191 cpu_icache_inval_page((unsigned long)vaddr
);
193 cpu_dcache_wbinval_page((unsigned long)addr
);
194 cpu_icache_inval_page((unsigned long)addr
);
197 void copy_user_highpage(struct page
*to
, struct page
*from
,
198 unsigned long vaddr
, struct vm_area_struct
*vma
)
200 unsigned long vto
, vfrom
, flags
, kto
, kfrom
, pfrom
, pto
;
201 kto
= ((unsigned long)page_address(to
) & PAGE_MASK
);
202 kfrom
= ((unsigned long)page_address(from
) & PAGE_MASK
);
203 pto
= page_to_phys(to
);
204 pfrom
= page_to_phys(from
);
206 local_irq_save(flags
);
207 if (aliasing(vaddr
, (unsigned long)kfrom
))
208 cpu_dcache_wb_page((unsigned long)kfrom
);
209 vto
= kremap0(vaddr
, pto
);
210 vfrom
= kremap1(vaddr
, pfrom
);
211 copy_page((void *)vto
, (void *)vfrom
);
214 local_irq_restore(flags
);
217 EXPORT_SYMBOL(copy_user_highpage
);
219 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
221 unsigned long vto
, flags
, kto
;
223 kto
= ((unsigned long)page_address(page
) & PAGE_MASK
);
225 local_irq_save(flags
);
226 if (aliasing(kto
, vaddr
) && kto
!= 0) {
227 cpu_dcache_inval_page(kto
);
228 cpu_icache_inval_page(kto
);
230 vto
= kremap0(vaddr
, page_to_phys(page
));
231 clear_page((void *)vto
);
233 local_irq_restore(flags
);
236 EXPORT_SYMBOL(clear_user_highpage
);
238 void flush_dcache_page(struct page
*page
)
240 struct address_space
*mapping
;
242 mapping
= page_mapping(page
);
243 if (mapping
&& !mapping_mapped(mapping
))
244 set_bit(PG_dcache_dirty
, &page
->flags
);
246 unsigned long kaddr
, flags
;
248 kaddr
= (unsigned long)page_address(page
);
249 local_irq_save(flags
);
250 cpu_dcache_wbinval_page(kaddr
);
252 unsigned long vaddr
, kto
;
254 vaddr
= page
->index
<< PAGE_SHIFT
;
255 if (aliasing(vaddr
, kaddr
)) {
256 kto
= kremap0(vaddr
, page_to_phys(page
));
257 cpu_dcache_wbinval_page(kto
);
261 local_irq_restore(flags
);
264 EXPORT_SYMBOL(flush_dcache_page
);
266 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
267 unsigned long vaddr
, void *dst
, void *src
, int len
)
269 unsigned long line_size
, start
, end
, vto
, flags
;
271 local_irq_save(flags
);
272 vto
= kremap0(vaddr
, page_to_phys(page
));
273 dst
= (void *)(vto
| (vaddr
& (PAGE_SIZE
- 1)));
274 memcpy(dst
, src
, len
);
275 if (vma
->vm_flags
& VM_EXEC
) {
276 line_size
= L1_cache_info
[DCACHE
].line_size
;
277 start
= (unsigned long)dst
& ~(line_size
- 1);
279 ((unsigned long)dst
+ len
+ line_size
- 1) & ~(line_size
-
281 cpu_cache_wbinval_range(start
, end
, 1);
284 local_irq_restore(flags
);
287 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
288 unsigned long vaddr
, void *dst
, void *src
, int len
)
290 unsigned long vto
, flags
;
292 local_irq_save(flags
);
293 vto
= kremap0(vaddr
, page_to_phys(page
));
294 src
= (void *)(vto
| (vaddr
& (PAGE_SIZE
- 1)));
295 memcpy(dst
, src
, len
);
297 local_irq_restore(flags
);
300 void flush_anon_page(struct vm_area_struct
*vma
,
301 struct page
*page
, unsigned long vaddr
)
303 unsigned long kaddr
, flags
, ktmp
;
307 if (vma
->vm_mm
!= current
->active_mm
)
310 local_irq_save(flags
);
311 if (vma
->vm_flags
& VM_EXEC
)
312 cpu_icache_inval_page(vaddr
& PAGE_MASK
);
313 kaddr
= (unsigned long)page_address(page
);
314 if (aliasing(vaddr
, kaddr
)) {
315 ktmp
= kremap0(vaddr
, page_to_phys(page
));
316 cpu_dcache_wbinval_page(ktmp
);
319 local_irq_restore(flags
);
322 void flush_kernel_dcache_page(struct page
*page
)
325 local_irq_save(flags
);
326 cpu_dcache_wbinval_page((unsigned long)page_address(page
));
327 local_irq_restore(flags
);
329 EXPORT_SYMBOL(flush_kernel_dcache_page
);
331 void flush_kernel_vmap_range(void *addr
, int size
)
334 local_irq_save(flags
);
335 cpu_dcache_wb_range((unsigned long)addr
, (unsigned long)addr
+ size
);
336 local_irq_restore(flags
);
338 EXPORT_SYMBOL(flush_kernel_vmap_range
);
340 void invalidate_kernel_vmap_range(void *addr
, int size
)
343 local_irq_save(flags
);
344 cpu_dcache_inval_range((unsigned long)addr
, (unsigned long)addr
+ size
);
345 local_irq_restore(flags
);
347 EXPORT_SYMBOL(invalidate_kernel_vmap_range
);