1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
6 * Copyright (C) 2002 - 2010 Paul Mundt
9 #include <linux/init.h>
10 #include <linux/mutex.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
18 void (*local_flush_cache_all
)(void *args
) = cache_noop
;
19 void (*local_flush_cache_mm
)(void *args
) = cache_noop
;
20 void (*local_flush_cache_dup_mm
)(void *args
) = cache_noop
;
21 void (*local_flush_cache_page
)(void *args
) = cache_noop
;
22 void (*local_flush_cache_range
)(void *args
) = cache_noop
;
23 void (*local_flush_dcache_page
)(void *args
) = cache_noop
;
24 void (*local_flush_icache_range
)(void *args
) = cache_noop
;
25 void (*local_flush_icache_page
)(void *args
) = cache_noop
;
26 void (*local_flush_cache_sigtramp
)(void *args
) = cache_noop
;
28 void (*__flush_wback_region
)(void *start
, int size
);
29 EXPORT_SYMBOL(__flush_wback_region
);
30 void (*__flush_purge_region
)(void *start
, int size
);
31 EXPORT_SYMBOL(__flush_purge_region
);
32 void (*__flush_invalidate_region
)(void *start
, int size
);
33 EXPORT_SYMBOL(__flush_invalidate_region
);
35 static inline void noop__flush_region(void *start
, int size
)
39 static inline void cacheop_on_each_cpu(void (*func
) (void *info
), void *info
,
44 /* Needing IPI for cross-core flush is SHX3-specific. */
45 #ifdef CONFIG_CPU_SHX3
47 * It's possible that this gets called early on when IRQs are
48 * still disabled due to ioremapping by the boot CPU, so don't
49 * even attempt IPIs unless there are other CPUs online.
51 if (num_online_cpus() > 1)
52 smp_call_function(func
, info
, wait
);
60 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
61 unsigned long vaddr
, void *dst
, const void *src
,
64 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(page
) &&
65 test_bit(PG_dcache_clean
, &page
->flags
)) {
66 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
67 memcpy(vto
, src
, len
);
70 memcpy(dst
, src
, len
);
71 if (boot_cpu_data
.dcache
.n_aliases
)
72 clear_bit(PG_dcache_clean
, &page
->flags
);
75 if (vma
->vm_flags
& VM_EXEC
)
76 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
79 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
80 unsigned long vaddr
, void *dst
, const void *src
,
83 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(page
) &&
84 test_bit(PG_dcache_clean
, &page
->flags
)) {
85 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
86 memcpy(dst
, vfrom
, len
);
87 kunmap_coherent(vfrom
);
89 memcpy(dst
, src
, len
);
90 if (boot_cpu_data
.dcache
.n_aliases
)
91 clear_bit(PG_dcache_clean
, &page
->flags
);
95 void copy_user_highpage(struct page
*to
, struct page
*from
,
96 unsigned long vaddr
, struct vm_area_struct
*vma
)
100 vto
= kmap_atomic(to
);
102 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(from
) &&
103 test_bit(PG_dcache_clean
, &from
->flags
)) {
104 vfrom
= kmap_coherent(from
, vaddr
);
105 copy_page(vto
, vfrom
);
106 kunmap_coherent(vfrom
);
108 vfrom
= kmap_atomic(from
);
109 copy_page(vto
, vfrom
);
110 kunmap_atomic(vfrom
);
113 if (pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
) ||
114 (vma
->vm_flags
& VM_EXEC
))
115 __flush_purge_region(vto
, PAGE_SIZE
);
118 /* Make sure this page is cleared on other CPU's too before using it */
121 EXPORT_SYMBOL(copy_user_highpage
);
123 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
125 void *kaddr
= kmap_atomic(page
);
129 if (pages_do_alias((unsigned long)kaddr
, vaddr
& PAGE_MASK
))
130 __flush_purge_region(kaddr
, PAGE_SIZE
);
132 kunmap_atomic(kaddr
);
134 EXPORT_SYMBOL(clear_user_highpage
);
136 void __update_cache(struct vm_area_struct
*vma
,
137 unsigned long address
, pte_t pte
)
140 unsigned long pfn
= pte_pfn(pte
);
142 if (!boot_cpu_data
.dcache
.n_aliases
)
145 page
= pfn_to_page(pfn
);
146 if (pfn_valid(pfn
)) {
147 int dirty
= !test_and_set_bit(PG_dcache_clean
, &page
->flags
);
149 __flush_purge_region(page_address(page
), PAGE_SIZE
);
153 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
155 unsigned long addr
= (unsigned long) page_address(page
);
157 if (pages_do_alias(addr
, vmaddr
)) {
158 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(page
) &&
159 test_bit(PG_dcache_clean
, &page
->flags
)) {
162 kaddr
= kmap_coherent(page
, vmaddr
);
163 /* XXX.. For now kunmap_coherent() does a purge */
164 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
165 kunmap_coherent(kaddr
);
167 __flush_purge_region((void *)addr
, PAGE_SIZE
);
171 void flush_cache_all(void)
173 cacheop_on_each_cpu(local_flush_cache_all
, NULL
, 1);
175 EXPORT_SYMBOL(flush_cache_all
);
177 void flush_cache_mm(struct mm_struct
*mm
)
179 if (boot_cpu_data
.dcache
.n_aliases
== 0)
182 cacheop_on_each_cpu(local_flush_cache_mm
, mm
, 1);
185 void flush_cache_dup_mm(struct mm_struct
*mm
)
187 if (boot_cpu_data
.dcache
.n_aliases
== 0)
190 cacheop_on_each_cpu(local_flush_cache_dup_mm
, mm
, 1);
193 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
,
196 struct flusher_data data
;
202 cacheop_on_each_cpu(local_flush_cache_page
, (void *)&data
, 1);
205 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
208 struct flusher_data data
;
214 cacheop_on_each_cpu(local_flush_cache_range
, (void *)&data
, 1);
216 EXPORT_SYMBOL(flush_cache_range
);
218 void flush_dcache_page(struct page
*page
)
220 cacheop_on_each_cpu(local_flush_dcache_page
, page
, 1);
222 EXPORT_SYMBOL(flush_dcache_page
);
224 void flush_icache_range(unsigned long start
, unsigned long end
)
226 struct flusher_data data
;
232 cacheop_on_each_cpu(local_flush_icache_range
, (void *)&data
, 1);
234 EXPORT_SYMBOL(flush_icache_range
);
236 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
238 /* Nothing uses the VMA, so just pass the struct page along */
239 cacheop_on_each_cpu(local_flush_icache_page
, page
, 1);
242 void flush_cache_sigtramp(unsigned long address
)
244 cacheop_on_each_cpu(local_flush_cache_sigtramp
, (void *)address
, 1);
247 static void compute_alias(struct cache_info
*c
)
250 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
254 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
257 static void __init
emit_cache_params(void)
259 printk(KERN_NOTICE
"I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
260 boot_cpu_data
.icache
.ways
,
261 boot_cpu_data
.icache
.sets
,
262 boot_cpu_data
.icache
.way_incr
);
263 printk(KERN_NOTICE
"I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
264 boot_cpu_data
.icache
.entry_mask
,
265 boot_cpu_data
.icache
.alias_mask
,
266 boot_cpu_data
.icache
.n_aliases
);
267 printk(KERN_NOTICE
"D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
268 boot_cpu_data
.dcache
.ways
,
269 boot_cpu_data
.dcache
.sets
,
270 boot_cpu_data
.dcache
.way_incr
);
271 printk(KERN_NOTICE
"D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
272 boot_cpu_data
.dcache
.entry_mask
,
273 boot_cpu_data
.dcache
.alias_mask
,
274 boot_cpu_data
.dcache
.n_aliases
);
277 * Emit Secondary Cache parameters if the CPU has a probed L2.
279 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
280 printk(KERN_NOTICE
"S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
281 boot_cpu_data
.scache
.ways
,
282 boot_cpu_data
.scache
.sets
,
283 boot_cpu_data
.scache
.way_incr
);
284 printk(KERN_NOTICE
"S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
285 boot_cpu_data
.scache
.entry_mask
,
286 boot_cpu_data
.scache
.alias_mask
,
287 boot_cpu_data
.scache
.n_aliases
);
291 void __init
cpu_cache_init(void)
293 unsigned int cache_disabled
= 0;
296 cache_disabled
= !(__raw_readl(SH_CCR
) & CCR_CACHE_ENABLE
);
299 compute_alias(&boot_cpu_data
.icache
);
300 compute_alias(&boot_cpu_data
.dcache
);
301 compute_alias(&boot_cpu_data
.scache
);
303 __flush_wback_region
= noop__flush_region
;
304 __flush_purge_region
= noop__flush_region
;
305 __flush_invalidate_region
= noop__flush_region
;
308 * No flushing is necessary in the disabled cache case so we can
309 * just keep the noop functions in local_flush_..() and __flush_..()
311 if (unlikely(cache_disabled
))
314 if (boot_cpu_data
.type
== CPU_J2
) {
315 extern void __weak
j2_cache_init(void);
318 } else if (boot_cpu_data
.family
== CPU_FAMILY_SH2
) {
319 extern void __weak
sh2_cache_init(void);
324 if (boot_cpu_data
.family
== CPU_FAMILY_SH2A
) {
325 extern void __weak
sh2a_cache_init(void);
330 if (boot_cpu_data
.family
== CPU_FAMILY_SH3
) {
331 extern void __weak
sh3_cache_init(void);
335 if ((boot_cpu_data
.type
== CPU_SH7705
) &&
336 (boot_cpu_data
.dcache
.sets
== 512)) {
337 extern void __weak
sh7705_cache_init(void);
343 if ((boot_cpu_data
.family
== CPU_FAMILY_SH4
) ||
344 (boot_cpu_data
.family
== CPU_FAMILY_SH4A
) ||
345 (boot_cpu_data
.family
== CPU_FAMILY_SH4AL_DSP
)) {
346 extern void __weak
sh4_cache_init(void);
350 if ((boot_cpu_data
.type
== CPU_SH7786
) ||
351 (boot_cpu_data
.type
== CPU_SHX3
)) {
352 extern void __weak
shx3_cache_init(void);
358 if (boot_cpu_data
.family
== CPU_FAMILY_SH5
) {
359 extern void __weak
sh5_cache_init(void);