4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/init.h>
11 #include <linux/mutex.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
19 void (*local_flush_cache_all
)(void *args
) = cache_noop
;
20 void (*local_flush_cache_mm
)(void *args
) = cache_noop
;
21 void (*local_flush_cache_dup_mm
)(void *args
) = cache_noop
;
22 void (*local_flush_cache_page
)(void *args
) = cache_noop
;
23 void (*local_flush_cache_range
)(void *args
) = cache_noop
;
24 void (*local_flush_dcache_page
)(void *args
) = cache_noop
;
25 void (*local_flush_icache_range
)(void *args
) = cache_noop
;
26 void (*local_flush_icache_page
)(void *args
) = cache_noop
;
27 void (*local_flush_cache_sigtramp
)(void *args
) = cache_noop
;
29 void (*__flush_wback_region
)(void *start
, int size
);
30 EXPORT_SYMBOL(__flush_wback_region
);
31 void (*__flush_purge_region
)(void *start
, int size
);
32 EXPORT_SYMBOL(__flush_purge_region
);
33 void (*__flush_invalidate_region
)(void *start
, int size
);
34 EXPORT_SYMBOL(__flush_invalidate_region
);
36 static inline void noop__flush_region(void *start
, int size
)
40 static inline void cacheop_on_each_cpu(void (*func
) (void *info
), void *info
,
45 /* Needing IPI for cross-core flush is SHX3-specific. */
46 #ifdef CONFIG_CPU_SHX3
48 * It's possible that this gets called early on when IRQs are
49 * still disabled due to ioremapping by the boot CPU, so don't
50 * even attempt IPIs unless there are other CPUs online.
52 if (num_online_cpus() > 1)
53 smp_call_function(func
, info
, wait
);
61 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
62 unsigned long vaddr
, void *dst
, const void *src
,
65 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(page
) &&
66 test_bit(PG_dcache_clean
, &page
->flags
)) {
67 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
68 memcpy(vto
, src
, len
);
71 memcpy(dst
, src
, len
);
72 if (boot_cpu_data
.dcache
.n_aliases
)
73 clear_bit(PG_dcache_clean
, &page
->flags
);
76 if (vma
->vm_flags
& VM_EXEC
)
77 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
80 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
81 unsigned long vaddr
, void *dst
, const void *src
,
84 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(page
) &&
85 test_bit(PG_dcache_clean
, &page
->flags
)) {
86 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
87 memcpy(dst
, vfrom
, len
);
88 kunmap_coherent(vfrom
);
90 memcpy(dst
, src
, len
);
91 if (boot_cpu_data
.dcache
.n_aliases
)
92 clear_bit(PG_dcache_clean
, &page
->flags
);
96 void copy_user_highpage(struct page
*to
, struct page
*from
,
97 unsigned long vaddr
, struct vm_area_struct
*vma
)
101 vto
= kmap_atomic(to
);
103 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(from
) &&
104 test_bit(PG_dcache_clean
, &from
->flags
)) {
105 vfrom
= kmap_coherent(from
, vaddr
);
106 copy_page(vto
, vfrom
);
107 kunmap_coherent(vfrom
);
109 vfrom
= kmap_atomic(from
);
110 copy_page(vto
, vfrom
);
111 kunmap_atomic(vfrom
);
114 if (pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
) ||
115 (vma
->vm_flags
& VM_EXEC
))
116 __flush_purge_region(vto
, PAGE_SIZE
);
119 /* Make sure this page is cleared on other CPU's too before using it */
122 EXPORT_SYMBOL(copy_user_highpage
);
124 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
126 void *kaddr
= kmap_atomic(page
);
130 if (pages_do_alias((unsigned long)kaddr
, vaddr
& PAGE_MASK
))
131 __flush_purge_region(kaddr
, PAGE_SIZE
);
133 kunmap_atomic(kaddr
);
135 EXPORT_SYMBOL(clear_user_highpage
);
137 void __update_cache(struct vm_area_struct
*vma
,
138 unsigned long address
, pte_t pte
)
141 unsigned long pfn
= pte_pfn(pte
);
143 if (!boot_cpu_data
.dcache
.n_aliases
)
146 page
= pfn_to_page(pfn
);
147 if (pfn_valid(pfn
)) {
148 int dirty
= !test_and_set_bit(PG_dcache_clean
, &page
->flags
);
150 __flush_purge_region(page_address(page
), PAGE_SIZE
);
154 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
156 unsigned long addr
= (unsigned long) page_address(page
);
158 if (pages_do_alias(addr
, vmaddr
)) {
159 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapcount(page
) &&
160 test_bit(PG_dcache_clean
, &page
->flags
)) {
163 kaddr
= kmap_coherent(page
, vmaddr
);
164 /* XXX.. For now kunmap_coherent() does a purge */
165 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
166 kunmap_coherent(kaddr
);
168 __flush_purge_region((void *)addr
, PAGE_SIZE
);
172 void flush_cache_all(void)
174 cacheop_on_each_cpu(local_flush_cache_all
, NULL
, 1);
176 EXPORT_SYMBOL(flush_cache_all
);
178 void flush_cache_mm(struct mm_struct
*mm
)
180 if (boot_cpu_data
.dcache
.n_aliases
== 0)
183 cacheop_on_each_cpu(local_flush_cache_mm
, mm
, 1);
186 void flush_cache_dup_mm(struct mm_struct
*mm
)
188 if (boot_cpu_data
.dcache
.n_aliases
== 0)
191 cacheop_on_each_cpu(local_flush_cache_dup_mm
, mm
, 1);
194 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
,
197 struct flusher_data data
;
203 cacheop_on_each_cpu(local_flush_cache_page
, (void *)&data
, 1);
206 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
209 struct flusher_data data
;
215 cacheop_on_each_cpu(local_flush_cache_range
, (void *)&data
, 1);
217 EXPORT_SYMBOL(flush_cache_range
);
219 void flush_dcache_page(struct page
*page
)
221 cacheop_on_each_cpu(local_flush_dcache_page
, page
, 1);
223 EXPORT_SYMBOL(flush_dcache_page
);
225 void flush_icache_range(unsigned long start
, unsigned long end
)
227 struct flusher_data data
;
233 cacheop_on_each_cpu(local_flush_icache_range
, (void *)&data
, 1);
235 EXPORT_SYMBOL(flush_icache_range
);
237 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
239 /* Nothing uses the VMA, so just pass the struct page along */
240 cacheop_on_each_cpu(local_flush_icache_page
, page
, 1);
243 void flush_cache_sigtramp(unsigned long address
)
245 cacheop_on_each_cpu(local_flush_cache_sigtramp
, (void *)address
, 1);
248 static void compute_alias(struct cache_info
*c
)
251 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
255 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
258 static void __init
emit_cache_params(void)
260 printk(KERN_NOTICE
"I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
261 boot_cpu_data
.icache
.ways
,
262 boot_cpu_data
.icache
.sets
,
263 boot_cpu_data
.icache
.way_incr
);
264 printk(KERN_NOTICE
"I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
265 boot_cpu_data
.icache
.entry_mask
,
266 boot_cpu_data
.icache
.alias_mask
,
267 boot_cpu_data
.icache
.n_aliases
);
268 printk(KERN_NOTICE
"D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
269 boot_cpu_data
.dcache
.ways
,
270 boot_cpu_data
.dcache
.sets
,
271 boot_cpu_data
.dcache
.way_incr
);
272 printk(KERN_NOTICE
"D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
273 boot_cpu_data
.dcache
.entry_mask
,
274 boot_cpu_data
.dcache
.alias_mask
,
275 boot_cpu_data
.dcache
.n_aliases
);
278 * Emit Secondary Cache parameters if the CPU has a probed L2.
280 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
281 printk(KERN_NOTICE
"S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
282 boot_cpu_data
.scache
.ways
,
283 boot_cpu_data
.scache
.sets
,
284 boot_cpu_data
.scache
.way_incr
);
285 printk(KERN_NOTICE
"S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
286 boot_cpu_data
.scache
.entry_mask
,
287 boot_cpu_data
.scache
.alias_mask
,
288 boot_cpu_data
.scache
.n_aliases
);
292 void __init
cpu_cache_init(void)
294 unsigned int cache_disabled
= 0;
297 cache_disabled
= !(__raw_readl(SH_CCR
) & CCR_CACHE_ENABLE
);
300 compute_alias(&boot_cpu_data
.icache
);
301 compute_alias(&boot_cpu_data
.dcache
);
302 compute_alias(&boot_cpu_data
.scache
);
304 __flush_wback_region
= noop__flush_region
;
305 __flush_purge_region
= noop__flush_region
;
306 __flush_invalidate_region
= noop__flush_region
;
309 * No flushing is necessary in the disabled cache case so we can
310 * just keep the noop functions in local_flush_..() and __flush_..()
312 if (unlikely(cache_disabled
))
315 if (boot_cpu_data
.type
== CPU_J2
) {
316 extern void __weak
j2_cache_init(void);
319 } else if (boot_cpu_data
.family
== CPU_FAMILY_SH2
) {
320 extern void __weak
sh2_cache_init(void);
325 if (boot_cpu_data
.family
== CPU_FAMILY_SH2A
) {
326 extern void __weak
sh2a_cache_init(void);
331 if (boot_cpu_data
.family
== CPU_FAMILY_SH3
) {
332 extern void __weak
sh3_cache_init(void);
336 if ((boot_cpu_data
.type
== CPU_SH7705
) &&
337 (boot_cpu_data
.dcache
.sets
== 512)) {
338 extern void __weak
sh7705_cache_init(void);
344 if ((boot_cpu_data
.family
== CPU_FAMILY_SH4
) ||
345 (boot_cpu_data
.family
== CPU_FAMILY_SH4A
) ||
346 (boot_cpu_data
.family
== CPU_FAMILY_SH4AL_DSP
)) {
347 extern void __weak
sh4_cache_init(void);
351 if ((boot_cpu_data
.type
== CPU_SH7786
) ||
352 (boot_cpu_data
.type
== CPU_SHX3
)) {
353 extern void __weak
shx3_cache_init(void);
359 if (boot_cpu_data
.family
== CPU_FAMILY_SH5
) {
360 extern void __weak
sh5_cache_init(void);