1 /* arch/sparc64/mm/tlb.c
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
20 /* Heavily inspired by the ppc64 code. */
22 static DEFINE_PER_CPU(struct tlb_batch
, tlb_batch
);
24 void flush_tlb_pending(void)
26 struct tlb_batch
*tb
= &get_cpu_var(tlb_batch
);
31 if (CTX_VALID(tb
->mm
->context
)) {
33 smp_flush_tlb_pending(tb
->mm
, tb
->tlb_nr
,
36 __flush_tlb_pending(CTX_HWBITS(tb
->mm
->context
),
37 tb
->tlb_nr
, &tb
->vaddrs
[0]);
43 put_cpu_var(tlb_batch
);
46 static void tlb_batch_add_one(struct mm_struct
*mm
, unsigned long vaddr
,
49 struct tlb_batch
*tb
= &get_cpu_var(tlb_batch
);
58 if (unlikely(nr
!= 0 && mm
!= tb
->mm
)) {
66 tb
->vaddrs
[nr
] = vaddr
;
68 if (nr
>= TLB_BATCH_NR
)
71 put_cpu_var(tlb_batch
);
74 void tlb_batch_add(struct mm_struct
*mm
, unsigned long vaddr
,
75 pte_t
*ptep
, pte_t orig
, int fullmm
)
77 if (tlb_type
!= hypervisor
&&
79 unsigned long paddr
, pfn
= pte_pfn(orig
);
80 struct address_space
*mapping
;
86 page
= pfn_to_page(pfn
);
87 if (PageReserved(page
))
90 /* A real file page? */
91 mapping
= page_mapping(page
);
95 paddr
= (unsigned long) page_address(page
);
96 if ((paddr
^ vaddr
) & (1 << 13))
97 flush_dcache_page_all(mm
, page
);
102 tlb_batch_add_one(mm
, vaddr
, pte_exec(orig
));
105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
106 static void tlb_batch_pmd_scan(struct mm_struct
*mm
, unsigned long vaddr
,
107 pmd_t pmd
, bool exec
)
112 pte
= pte_offset_map(&pmd
, vaddr
);
113 end
= vaddr
+ HPAGE_SIZE
;
114 while (vaddr
< end
) {
115 if (pte_val(*pte
) & _PAGE_VALID
)
116 tlb_batch_add_one(mm
, vaddr
, exec
);
123 void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
124 pmd_t
*pmdp
, pmd_t pmd
)
133 if ((pmd_val(pmd
) ^ pmd_val(orig
)) & PMD_ISHUGE
) {
134 if (pmd_val(pmd
) & PMD_ISHUGE
)
135 mm
->context
.huge_pte_count
++;
137 mm
->context
.huge_pte_count
--;
138 if (mm
->context
.huge_pte_count
== 1)
142 if (!pmd_none(orig
)) {
143 bool exec
= ((pmd_val(orig
) & PMD_HUGE_EXEC
) != 0);
146 if (pmd_val(orig
) & PMD_ISHUGE
)
147 tlb_batch_add_one(mm
, addr
, exec
);
149 tlb_batch_pmd_scan(mm
, addr
, orig
, exec
);
153 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pgtable_t pgtable
)
155 struct list_head
*lh
= (struct list_head
*) pgtable
;
157 assert_spin_locked(&mm
->page_table_lock
);
160 if (!mm
->pmd_huge_pte
)
163 list_add(lh
, (struct list_head
*) mm
->pmd_huge_pte
);
164 mm
->pmd_huge_pte
= pgtable
;
167 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
)
169 struct list_head
*lh
;
172 assert_spin_locked(&mm
->page_table_lock
);
175 pgtable
= mm
->pmd_huge_pte
;
176 lh
= (struct list_head
*) pgtable
;
178 mm
->pmd_huge_pte
= NULL
;
180 mm
->pmd_huge_pte
= (pgtable_t
) lh
->next
;
183 pte_val(pgtable
[0]) = 0;
184 pte_val(pgtable
[1]) = 0;
188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */