[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / sparc64 / mm / tlb.c
blob90ca99d0b89cd041fe61640c4ad47ab2a2f421c8
1 /* arch/sparc64/mm/tlb.c
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
19 /* Heavily inspired by the ppc64 code. */
21 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
22 { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
24 void flush_tlb_pending(void)
26 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
28 if (mp->tlb_nr) {
29 if (CTX_VALID(mp->mm->context)) {
30 #ifdef CONFIG_SMP
31 smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
32 &mp->vaddrs[0]);
33 #else
34 __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
35 mp->tlb_nr, &mp->vaddrs[0]);
36 #endif
38 mp->tlb_nr = 0;
42 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
44 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
45 unsigned long nr;
47 vaddr &= PAGE_MASK;
48 if (pte_exec(orig))
49 vaddr |= 0x1UL;
51 if (pte_dirty(orig)) {
52 unsigned long paddr, pfn = pte_pfn(orig);
53 struct address_space *mapping;
54 struct page *page;
56 if (!pfn_valid(pfn))
57 goto no_cache_flush;
59 page = pfn_to_page(pfn);
60 if (PageReserved(page))
61 goto no_cache_flush;
63 /* A real file page? */
64 mapping = page_mapping(page);
65 if (!mapping)
66 goto no_cache_flush;
68 paddr = (unsigned long) page_address(page);
69 if ((paddr ^ vaddr) & (1 << 13))
70 flush_dcache_page_all(mm, page);
73 no_cache_flush:
75 if (mp->tlb_frozen)
76 return;
78 nr = mp->tlb_nr;
80 if (unlikely(nr != 0 && mm != mp->mm)) {
81 flush_tlb_pending();
82 nr = 0;
85 if (nr == 0)
86 mp->mm = mm;
88 mp->vaddrs[nr] = vaddr;
89 mp->tlb_nr = ++nr;
90 if (nr >= TLB_BATCH_NR)
91 flush_tlb_pending();
94 void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
96 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
97 unsigned long nr = mp->tlb_nr;
98 long s = start, e = end, vpte_base;
100 if (mp->tlb_frozen)
101 return;
103 /* If start is greater than end, that is a real problem. */
104 BUG_ON(start > end);
106 /* However, straddling the VA space hole is quite normal. */
107 s &= PMD_MASK;
108 e = (e + PMD_SIZE - 1) & PMD_MASK;
110 vpte_base = (tlb_type == spitfire ?
111 VPTE_BASE_SPITFIRE :
112 VPTE_BASE_CHEETAH);
114 if (unlikely(nr != 0 && mm != mp->mm)) {
115 flush_tlb_pending();
116 nr = 0;
119 if (nr == 0)
120 mp->mm = mm;
122 start = vpte_base + (s >> (PAGE_SHIFT - 3));
123 end = vpte_base + (e >> (PAGE_SHIFT - 3));
125 /* If the request straddles the VA space hole, we
126 * need to swap start and end. The reason this
127 * occurs is that "vpte_base" is the center of
128 * the linear page table mapping area. Thus,
129 * high addresses with the sign bit set map to
130 * addresses below vpte_base and non-sign bit
131 * addresses map to addresses above vpte_base.
133 if (end < start) {
134 unsigned long tmp = start;
136 start = end;
137 end = tmp;
140 while (start < end) {
141 mp->vaddrs[nr] = start;
142 mp->tlb_nr = ++nr;
143 if (nr >= TLB_BATCH_NR) {
144 flush_tlb_pending();
145 nr = 0;
147 start += PAGE_SIZE;
149 if (nr)
150 flush_tlb_pending();