2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10 * Cache and TLB management
14 #include <linux/init.h>
15 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
31 int split_tlb __read_mostly
;
32 int dcache_stride __read_mostly
;
33 int icache_stride __read_mostly
;
34 EXPORT_SYMBOL(dcache_stride
);
36 void flush_dcache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
37 EXPORT_SYMBOL(flush_dcache_page_asm
);
38 void flush_icache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
41 /* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
46 DEFINE_SPINLOCK(pa_tlb_lock
);
48 struct pdc_cache_info cache_info __read_mostly
;
50 static struct pdc_btlb_info btlb_info __read_mostly
;
55 flush_data_cache(void)
57 on_each_cpu(flush_data_cache_local
, NULL
, 1);
60 flush_instruction_cache(void)
62 on_each_cpu(flush_instruction_cache_local
, NULL
, 1);
67 flush_cache_all_local(void)
69 flush_instruction_cache_local(NULL
);
70 flush_data_cache_local(NULL
);
72 EXPORT_SYMBOL(flush_cache_all_local
);
75 update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
)
77 struct page
*page
= pte_page(*ptep
);
79 if (pfn_valid(page_to_pfn(page
)) && page_mapping(page
) &&
80 test_bit(PG_dcache_dirty
, &page
->flags
)) {
82 flush_kernel_dcache_page(page
);
83 clear_bit(PG_dcache_dirty
, &page
->flags
);
84 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page
);
89 show_cache_info(struct seq_file
*m
)
93 seq_printf(m
, "I-cache\t\t: %ld KB\n",
94 cache_info
.ic_size
/1024 );
95 if (cache_info
.dc_loop
!= 1)
96 snprintf(buf
, 32, "%lu-way associative", cache_info
.dc_loop
);
97 seq_printf(m
, "D-cache\t\t: %ld KB (%s%s, %s)\n",
98 cache_info
.dc_size
/1024,
99 (cache_info
.dc_conf
.cc_wt
? "WT":"WB"),
100 (cache_info
.dc_conf
.cc_sh
? ", shared I/D":""),
101 ((cache_info
.dc_loop
== 1) ? "direct mapped" : buf
));
102 seq_printf(m
, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
105 cache_info
.dt_conf
.tc_sh
? " - shared with ITLB":""
109 /* BTLB - Block TLB */
110 if (btlb_info
.max_size
==0) {
111 seq_printf(m
, "BTLB\t\t: not supported\n" );
114 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
115 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
116 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
117 btlb_info
.max_size
, (int)4096,
118 btlb_info
.max_size
>>8,
119 btlb_info
.fixed_range_info
.num_i
,
120 btlb_info
.fixed_range_info
.num_d
,
121 btlb_info
.fixed_range_info
.num_comb
,
122 btlb_info
.variable_range_info
.num_i
,
123 btlb_info
.variable_range_info
.num_d
,
124 btlb_info
.variable_range_info
.num_comb
131 parisc_cache_init(void)
133 if (pdc_cache_info(&cache_info
) < 0)
134 panic("parisc_cache_init: pdc_cache_info failed");
137 printk("ic_size %lx dc_size %lx it_size %lx\n",
142 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
144 cache_info
.dc_stride
,
148 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
149 *(unsigned long *) (&cache_info
.dc_conf
),
150 cache_info
.dc_conf
.cc_alias
,
151 cache_info
.dc_conf
.cc_block
,
152 cache_info
.dc_conf
.cc_line
,
153 cache_info
.dc_conf
.cc_shift
);
154 printk(" wt %d sh %d cst %d hv %d\n",
155 cache_info
.dc_conf
.cc_wt
,
156 cache_info
.dc_conf
.cc_sh
,
157 cache_info
.dc_conf
.cc_cst
,
158 cache_info
.dc_conf
.cc_hv
);
160 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162 cache_info
.ic_stride
,
166 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info
.ic_conf
),
168 cache_info
.ic_conf
.cc_alias
,
169 cache_info
.ic_conf
.cc_block
,
170 cache_info
.ic_conf
.cc_line
,
171 cache_info
.ic_conf
.cc_shift
);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info
.ic_conf
.cc_wt
,
174 cache_info
.ic_conf
.cc_sh
,
175 cache_info
.ic_conf
.cc_cst
,
176 cache_info
.ic_conf
.cc_hv
);
178 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
179 cache_info
.dt_conf
.tc_sh
,
180 cache_info
.dt_conf
.tc_page
,
181 cache_info
.dt_conf
.tc_cst
,
182 cache_info
.dt_conf
.tc_aid
,
183 cache_info
.dt_conf
.tc_pad1
);
185 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
186 cache_info
.it_conf
.tc_sh
,
187 cache_info
.it_conf
.tc_page
,
188 cache_info
.it_conf
.tc_cst
,
189 cache_info
.it_conf
.tc_aid
,
190 cache_info
.it_conf
.tc_pad1
);
194 if (cache_info
.dt_conf
.tc_sh
== 0 || cache_info
.dt_conf
.tc_sh
== 2) {
195 if (cache_info
.dt_conf
.tc_sh
== 2)
196 printk(KERN_WARNING
"Unexpected TLB configuration. "
197 "Will flush I/D separately (could be optimized).\n");
202 /* "New and Improved" version from Jim Hull
203 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
204 * The following CAFL_STRIDE is an optimized version, see
205 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
208 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
209 dcache_stride
= CAFL_STRIDE(cache_info
.dc_conf
);
210 icache_stride
= CAFL_STRIDE(cache_info
.ic_conf
);
214 if (pdc_btlb_info(&btlb_info
) < 0) {
215 memset(&btlb_info
, 0, sizeof btlb_info
);
219 if ((boot_cpu_data
.pdc
.capabilities
& PDC_MODEL_NVA_MASK
) ==
220 PDC_MODEL_NVA_UNSUPPORTED
) {
221 printk(KERN_WARNING
"parisc_cache_init: Only equivalent aliasing supported!\n");
223 panic("SMP kernel required to avoid non-equivalent aliasing");
228 void disable_sr_hashing(void)
230 int srhash_type
, retval
;
231 unsigned long space_bits
;
233 switch (boot_cpu_data
.cpu_type
) {
234 case pcx
: /* We shouldn't get this far. setup.c should prevent it. */
241 srhash_type
= SRHASH_PCXST
;
245 srhash_type
= SRHASH_PCXL
;
248 case pcxl2
: /* pcxl2 doesn't support space register hashing */
251 default: /* Currently all PA2.0 machines use the same ins. sequence */
252 srhash_type
= SRHASH_PA20
;
256 disable_sr_hashing_asm(srhash_type
);
258 retval
= pdc_spaceid_bits(&space_bits
);
259 /* If this procedure isn't implemented, don't panic. */
260 if (retval
< 0 && retval
!= PDC_BAD_OPTION
)
261 panic("pdc_spaceid_bits call failed.\n");
263 panic("SpaceID hashing is still on!\n");
267 __flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
,
268 unsigned long physaddr
)
270 flush_dcache_page_asm(physaddr
, vmaddr
);
271 if (vma
->vm_flags
& VM_EXEC
)
272 flush_icache_page_asm(physaddr
, vmaddr
);
275 void flush_dcache_page(struct page
*page
)
277 struct address_space
*mapping
= page_mapping(page
);
278 struct vm_area_struct
*mpnt
;
279 struct prio_tree_iter iter
;
280 unsigned long offset
;
281 unsigned long addr
, old_addr
= 0;
284 if (mapping
&& !mapping_mapped(mapping
)) {
285 set_bit(PG_dcache_dirty
, &page
->flags
);
289 flush_kernel_dcache_page(page
);
294 pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
296 /* We have carefully arranged in arch_get_unmapped_area() that
297 * *any* mappings of a file are always congruently mapped (whether
298 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
299 * to flush one address here for them all to become coherent */
301 flush_dcache_mmap_lock(mapping
);
302 vma_prio_tree_foreach(mpnt
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
303 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
304 addr
= mpnt
->vm_start
+ offset
;
306 /* The TLB is the engine of coherence on parisc: The
307 * CPU is entitled to speculate any page with a TLB
308 * mapping, so here we kill the mapping then flush the
309 * page along a special flush only alias mapping.
310 * This guarantees that the page is no-longer in the
311 * cache for any process and nor may it be
312 * speculatively read in (until the user or kernel
313 * specifically accesses it, of course) */
315 flush_tlb_page(mpnt
, addr
);
316 if (old_addr
== 0 || (old_addr
& (SHMLBA
- 1)) != (addr
& (SHMLBA
- 1))) {
317 __flush_cache_page(mpnt
, addr
, page_to_phys(page
));
319 printk(KERN_ERR
"INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr
, addr
, mpnt
->vm_file
? (char *)mpnt
->vm_file
->f_path
.dentry
->d_name
.name
: "(null)");
323 flush_dcache_mmap_unlock(mapping
);
325 EXPORT_SYMBOL(flush_dcache_page
);
327 /* Defined in arch/parisc/kernel/pacache.S */
328 EXPORT_SYMBOL(flush_kernel_dcache_range_asm
);
329 EXPORT_SYMBOL(flush_kernel_dcache_page_asm
);
330 EXPORT_SYMBOL(flush_data_cache_local
);
331 EXPORT_SYMBOL(flush_kernel_icache_range_asm
);
333 void clear_user_page_asm(void *page
, unsigned long vaddr
)
336 /* This function is implemented in assembly in pacache.S */
337 extern void __clear_user_page_asm(void *page
, unsigned long vaddr
);
339 purge_tlb_start(flags
);
340 __clear_user_page_asm(page
, vaddr
);
341 purge_tlb_end(flags
);
344 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
345 int parisc_cache_flush_threshold __read_mostly
= FLUSH_THRESHOLD
;
347 void __init
parisc_setup_cache_timing(void)
349 unsigned long rangetime
, alltime
;
354 alltime
= mfctl(16) - alltime
;
356 size
= (unsigned long)(_end
- _text
);
357 rangetime
= mfctl(16);
358 flush_kernel_dcache_range((unsigned long)_text
, size
);
359 rangetime
= mfctl(16) - rangetime
;
361 printk(KERN_DEBUG
"Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
362 alltime
, size
, rangetime
);
364 /* Racy, but if we see an intermediate value, it's ok too... */
365 parisc_cache_flush_threshold
= size
* alltime
/ rangetime
;
367 parisc_cache_flush_threshold
= (parisc_cache_flush_threshold
+ L1_CACHE_BYTES
- 1) &~ (L1_CACHE_BYTES
- 1);
368 if (!parisc_cache_flush_threshold
)
369 parisc_cache_flush_threshold
= FLUSH_THRESHOLD
;
371 if (parisc_cache_flush_threshold
> cache_info
.dc_size
)
372 parisc_cache_flush_threshold
= cache_info
.dc_size
;
374 printk(KERN_INFO
"Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold
, num_online_cpus());
377 extern void purge_kernel_dcache_page(unsigned long);
378 extern void clear_user_page_asm(void *page
, unsigned long vaddr
);
380 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
384 purge_kernel_dcache_page((unsigned long)page
);
385 purge_tlb_start(flags
);
387 purge_tlb_end(flags
);
388 clear_user_page_asm(page
, vaddr
);
390 EXPORT_SYMBOL(clear_user_page
);
392 void flush_kernel_dcache_page_addr(void *addr
)
396 flush_kernel_dcache_page_asm(addr
);
397 purge_tlb_start(flags
);
399 purge_tlb_end(flags
);
401 EXPORT_SYMBOL(flush_kernel_dcache_page_addr
);
403 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
406 /* no coherency needed (all in kmap/kunmap) */
407 copy_user_page_asm(vto
, vfrom
);
408 if (!parisc_requires_coherency())
409 flush_kernel_dcache_page_asm(vto
);
411 EXPORT_SYMBOL(copy_user_page
);
415 void kunmap_parisc(void *addr
)
417 if (parisc_requires_coherency())
418 flush_kernel_dcache_page_addr(addr
);
420 EXPORT_SYMBOL(kunmap_parisc
);
423 void __flush_tlb_range(unsigned long sid
, unsigned long start
,
426 unsigned long npages
;
428 npages
= ((end
- (start
& PAGE_MASK
)) + (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
429 if (npages
>= 512) /* 2MB of space: arbitrary, should be tuned */
435 purge_tlb_start(flags
);
448 purge_tlb_end(flags
);
452 static void cacheflush_h_tmp_function(void *dummy
)
454 flush_cache_all_local();
457 void flush_cache_all(void)
459 on_each_cpu(cacheflush_h_tmp_function
, NULL
, 1);
462 void flush_cache_mm(struct mm_struct
*mm
)
467 flush_cache_all_local();
472 flush_user_dcache_range(unsigned long start
, unsigned long end
)
474 if ((end
- start
) < parisc_cache_flush_threshold
)
475 flush_user_dcache_range_asm(start
,end
);
481 flush_user_icache_range(unsigned long start
, unsigned long end
)
483 if ((end
- start
) < parisc_cache_flush_threshold
)
484 flush_user_icache_range_asm(start
,end
);
486 flush_instruction_cache();
490 void flush_cache_range(struct vm_area_struct
*vma
,
491 unsigned long start
, unsigned long end
)
495 BUG_ON(!vma
->vm_mm
->context
);
498 if (vma
->vm_mm
->context
== sr3
) {
499 flush_user_dcache_range(start
,end
);
500 flush_user_icache_range(start
,end
);
507 flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
, unsigned long pfn
)
509 BUG_ON(!vma
->vm_mm
->context
);
511 flush_tlb_page(vma
, vmaddr
);
512 __flush_cache_page(vma
, vmaddr
, page_to_phys(pfn_to_page(pfn
)));