1 /* arch/sparc64/mm/tlb.c
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
10 #include <linux/swap.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
19 /* Heavily inspired by the ppc64 code. */
21 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
) =
22 { NULL
, 0, 0, 0, 0, 0, { 0 }, { NULL
}, };
24 void flush_tlb_pending(void)
26 struct mmu_gather
*mp
= &__get_cpu_var(mmu_gathers
);
29 if (CTX_VALID(mp
->mm
->context
)) {
31 smp_flush_tlb_pending(mp
->mm
, mp
->tlb_nr
,
34 __flush_tlb_pending(CTX_HWBITS(mp
->mm
->context
),
35 mp
->tlb_nr
, &mp
->vaddrs
[0]);
42 void tlb_batch_add(struct mm_struct
*mm
, unsigned long vaddr
, pte_t
*ptep
, pte_t orig
)
44 struct mmu_gather
*mp
= &__get_cpu_var(mmu_gathers
);
51 if (pte_dirty(orig
)) {
52 unsigned long paddr
, pfn
= pte_pfn(orig
);
53 struct address_space
*mapping
;
59 page
= pfn_to_page(pfn
);
60 if (PageReserved(page
))
63 /* A real file page? */
64 mapping
= page_mapping(page
);
68 paddr
= (unsigned long) page_address(page
);
69 if ((paddr
^ vaddr
) & (1 << 13))
70 flush_dcache_page_all(mm
, page
);
80 if (unlikely(nr
!= 0 && mm
!= mp
->mm
)) {
88 mp
->vaddrs
[nr
] = vaddr
;
90 if (nr
>= TLB_BATCH_NR
)
94 void flush_tlb_pgtables(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
96 struct mmu_gather
*mp
= &__get_cpu_var(mmu_gathers
);
97 unsigned long nr
= mp
->tlb_nr
;
98 long s
= start
, e
= end
, vpte_base
;
103 /* If start is greater than end, that is a real problem. */
106 /* However, straddling the VA space hole is quite normal. */
108 e
= (e
+ PMD_SIZE
- 1) & PMD_MASK
;
110 vpte_base
= (tlb_type
== spitfire
?
114 if (unlikely(nr
!= 0 && mm
!= mp
->mm
)) {
122 start
= vpte_base
+ (s
>> (PAGE_SHIFT
- 3));
123 end
= vpte_base
+ (e
>> (PAGE_SHIFT
- 3));
125 /* If the request straddles the VA space hole, we
126 * need to swap start and end. The reason this
127 * occurs is that "vpte_base" is the center of
128 * the linear page table mapping area. Thus,
129 * high addresses with the sign bit set map to
130 * addresses below vpte_base and non-sign bit
131 * addresses map to addresses above vpte_base.
134 unsigned long tmp
= start
;
140 while (start
< end
) {
141 mp
->vaddrs
[nr
] = start
;
143 if (nr
>= TLB_BATCH_NR
) {