4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/init.h>
11 #include <linux/mutex.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
19 void (*local_flush_cache_all
)(void *args
) = cache_noop
;
20 void (*local_flush_cache_mm
)(void *args
) = cache_noop
;
21 void (*local_flush_cache_dup_mm
)(void *args
) = cache_noop
;
22 void (*local_flush_cache_page
)(void *args
) = cache_noop
;
23 void (*local_flush_cache_range
)(void *args
) = cache_noop
;
24 void (*local_flush_dcache_page
)(void *args
) = cache_noop
;
25 void (*local_flush_icache_range
)(void *args
) = cache_noop
;
26 void (*local_flush_icache_page
)(void *args
) = cache_noop
;
27 void (*local_flush_cache_sigtramp
)(void *args
) = cache_noop
;
29 void (*__flush_wback_region
)(void *start
, int size
);
30 EXPORT_SYMBOL(__flush_wback_region
);
31 void (*__flush_purge_region
)(void *start
, int size
);
32 EXPORT_SYMBOL(__flush_purge_region
);
33 void (*__flush_invalidate_region
)(void *start
, int size
);
34 EXPORT_SYMBOL(__flush_invalidate_region
);
36 static inline void noop__flush_region(void *start
, int size
)
40 static inline void cacheop_on_each_cpu(void (*func
) (void *info
), void *info
,
44 smp_call_function(func
, info
, wait
);
49 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
50 unsigned long vaddr
, void *dst
, const void *src
,
53 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(page
) &&
54 !test_bit(PG_dcache_dirty
, &page
->flags
)) {
55 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
56 memcpy(vto
, src
, len
);
59 memcpy(dst
, src
, len
);
60 if (boot_cpu_data
.dcache
.n_aliases
)
61 set_bit(PG_dcache_dirty
, &page
->flags
);
64 if (vma
->vm_flags
& VM_EXEC
)
65 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
68 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
69 unsigned long vaddr
, void *dst
, const void *src
,
72 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(page
) &&
73 !test_bit(PG_dcache_dirty
, &page
->flags
)) {
74 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
75 memcpy(dst
, vfrom
, len
);
76 kunmap_coherent(vfrom
);
78 memcpy(dst
, src
, len
);
79 if (boot_cpu_data
.dcache
.n_aliases
)
80 set_bit(PG_dcache_dirty
, &page
->flags
);
84 void copy_user_highpage(struct page
*to
, struct page
*from
,
85 unsigned long vaddr
, struct vm_area_struct
*vma
)
89 vto
= kmap_atomic(to
, KM_USER1
);
91 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(from
) &&
92 !test_bit(PG_dcache_dirty
, &from
->flags
)) {
93 vfrom
= kmap_coherent(from
, vaddr
);
94 copy_page(vto
, vfrom
);
95 kunmap_coherent(vfrom
);
97 vfrom
= kmap_atomic(from
, KM_USER0
);
98 copy_page(vto
, vfrom
);
99 kunmap_atomic(vfrom
, KM_USER0
);
102 if (pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
))
103 __flush_purge_region(vto
, PAGE_SIZE
);
105 kunmap_atomic(vto
, KM_USER1
);
106 /* Make sure this page is cleared on other CPU's too before using it */
109 EXPORT_SYMBOL(copy_user_highpage
);
111 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
113 void *kaddr
= kmap_atomic(page
, KM_USER0
);
117 if (pages_do_alias((unsigned long)kaddr
, vaddr
& PAGE_MASK
))
118 __flush_purge_region(kaddr
, PAGE_SIZE
);
120 kunmap_atomic(kaddr
, KM_USER0
);
122 EXPORT_SYMBOL(clear_user_highpage
);
124 void __update_cache(struct vm_area_struct
*vma
,
125 unsigned long address
, pte_t pte
)
128 unsigned long pfn
= pte_pfn(pte
);
130 if (!boot_cpu_data
.dcache
.n_aliases
)
133 page
= pfn_to_page(pfn
);
134 if (pfn_valid(pfn
)) {
135 int dirty
= test_and_clear_bit(PG_dcache_dirty
, &page
->flags
);
137 __flush_purge_region(page_address(page
), PAGE_SIZE
);
141 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
143 unsigned long addr
= (unsigned long) page_address(page
);
145 if (pages_do_alias(addr
, vmaddr
)) {
146 if (boot_cpu_data
.dcache
.n_aliases
&& page_mapped(page
) &&
147 !test_bit(PG_dcache_dirty
, &page
->flags
)) {
150 kaddr
= kmap_coherent(page
, vmaddr
);
151 /* XXX.. For now kunmap_coherent() does a purge */
152 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
153 kunmap_coherent(kaddr
);
155 __flush_purge_region((void *)addr
, PAGE_SIZE
);
159 void flush_cache_all(void)
161 cacheop_on_each_cpu(local_flush_cache_all
, NULL
, 1);
163 EXPORT_SYMBOL(flush_cache_all
);
165 void flush_cache_mm(struct mm_struct
*mm
)
167 if (boot_cpu_data
.dcache
.n_aliases
== 0)
170 cacheop_on_each_cpu(local_flush_cache_mm
, mm
, 1);
173 void flush_cache_dup_mm(struct mm_struct
*mm
)
175 if (boot_cpu_data
.dcache
.n_aliases
== 0)
178 cacheop_on_each_cpu(local_flush_cache_dup_mm
, mm
, 1);
181 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
,
184 struct flusher_data data
;
190 cacheop_on_each_cpu(local_flush_cache_page
, (void *)&data
, 1);
193 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
196 struct flusher_data data
;
202 cacheop_on_each_cpu(local_flush_cache_range
, (void *)&data
, 1);
204 EXPORT_SYMBOL(flush_cache_range
);
206 void flush_dcache_page(struct page
*page
)
208 cacheop_on_each_cpu(local_flush_dcache_page
, page
, 1);
210 EXPORT_SYMBOL(flush_dcache_page
);
212 void flush_icache_range(unsigned long start
, unsigned long end
)
214 struct flusher_data data
;
220 cacheop_on_each_cpu(local_flush_icache_range
, (void *)&data
, 1);
223 void flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
225 /* Nothing uses the VMA, so just pass the struct page along */
226 cacheop_on_each_cpu(local_flush_icache_page
, page
, 1);
229 void flush_cache_sigtramp(unsigned long address
)
231 cacheop_on_each_cpu(local_flush_cache_sigtramp
, (void *)address
, 1);
234 static void compute_alias(struct cache_info
*c
)
236 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
237 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
240 static void __init
emit_cache_params(void)
242 printk(KERN_NOTICE
"I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
243 boot_cpu_data
.icache
.ways
,
244 boot_cpu_data
.icache
.sets
,
245 boot_cpu_data
.icache
.way_incr
);
246 printk(KERN_NOTICE
"I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
247 boot_cpu_data
.icache
.entry_mask
,
248 boot_cpu_data
.icache
.alias_mask
,
249 boot_cpu_data
.icache
.n_aliases
);
250 printk(KERN_NOTICE
"D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
251 boot_cpu_data
.dcache
.ways
,
252 boot_cpu_data
.dcache
.sets
,
253 boot_cpu_data
.dcache
.way_incr
);
254 printk(KERN_NOTICE
"D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
255 boot_cpu_data
.dcache
.entry_mask
,
256 boot_cpu_data
.dcache
.alias_mask
,
257 boot_cpu_data
.dcache
.n_aliases
);
260 * Emit Secondary Cache parameters if the CPU has a probed L2.
262 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
263 printk(KERN_NOTICE
"S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
264 boot_cpu_data
.scache
.ways
,
265 boot_cpu_data
.scache
.sets
,
266 boot_cpu_data
.scache
.way_incr
);
267 printk(KERN_NOTICE
"S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
268 boot_cpu_data
.scache
.entry_mask
,
269 boot_cpu_data
.scache
.alias_mask
,
270 boot_cpu_data
.scache
.n_aliases
);
274 void __init
cpu_cache_init(void)
276 unsigned int cache_disabled
= 0;
279 cache_disabled
= !(__raw_readl(CCR
) & CCR_CACHE_ENABLE
);
282 compute_alias(&boot_cpu_data
.icache
);
283 compute_alias(&boot_cpu_data
.dcache
);
284 compute_alias(&boot_cpu_data
.scache
);
286 __flush_wback_region
= noop__flush_region
;
287 __flush_purge_region
= noop__flush_region
;
288 __flush_invalidate_region
= noop__flush_region
;
291 * No flushing is necessary in the disabled cache case so we can
292 * just keep the noop functions in local_flush_..() and __flush_..()
294 if (unlikely(cache_disabled
))
297 if (boot_cpu_data
.family
== CPU_FAMILY_SH2
) {
298 extern void __weak
sh2_cache_init(void);
303 if (boot_cpu_data
.family
== CPU_FAMILY_SH2A
) {
304 extern void __weak
sh2a_cache_init(void);
309 if (boot_cpu_data
.family
== CPU_FAMILY_SH3
) {
310 extern void __weak
sh3_cache_init(void);
314 if ((boot_cpu_data
.type
== CPU_SH7705
) &&
315 (boot_cpu_data
.dcache
.sets
== 512)) {
316 extern void __weak
sh7705_cache_init(void);
322 if ((boot_cpu_data
.family
== CPU_FAMILY_SH4
) ||
323 (boot_cpu_data
.family
== CPU_FAMILY_SH4A
) ||
324 (boot_cpu_data
.family
== CPU_FAMILY_SH4AL_DSP
)) {
325 extern void __weak
sh4_cache_init(void);
330 if (boot_cpu_data
.family
== CPU_FAMILY_SH5
) {
331 extern void __weak
sh5_cache_init(void);