4 #include <linux/pagemap.h>
5 #include <linux/swap.h>
6 #include <asm/percpu.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
12 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14 /* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
19 unsigned int need_flush
; /* Really unmapped some ptes? */
22 unsigned int fullmm
; /* non-zero means full mm flush */
25 static inline void __tlb_remove_tlb_entry(struct mmu_gather
*tlb
, pte_t
*ptep
,
26 unsigned long address
)
28 if (tlb
->start
> address
)
30 if (tlb
->end
< address
+ PAGE_SIZE
)
31 tlb
->end
= address
+ PAGE_SIZE
;
34 static inline void init_tlb_gather(struct mmu_gather
*tlb
)
38 tlb
->start
= TASK_SIZE
;
48 tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
53 tlb
->fullmm
= !(start
| (end
+1));
58 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
62 tlb_flush_mmu_tlbonly(struct mmu_gather
*tlb
)
64 flush_tlb_mm_range(tlb
->mm
, tlb
->start
, tlb
->end
);
68 tlb_flush_mmu_free(struct mmu_gather
*tlb
)
74 tlb_flush_mmu(struct mmu_gather
*tlb
)
79 tlb_flush_mmu_tlbonly(tlb
);
80 tlb_flush_mmu_free(tlb
);
84 * Called at the end of the shootdown operation to free up any resources
88 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
92 /* keep the page table cache within bounds */
97 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
98 * while handling the additional races in SMP caused by other CPUs
99 * caching valid mappings in their TLBs.
101 static inline int __tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
104 free_page_and_swap_cache(page
);
105 return 1; /* avoid calling tlb_flush_mmu */
108 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
110 __tlb_remove_page(tlb
, page
);
114 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
116 * Record the fact that pte's were really umapped in ->need_flush, so we can
117 * later optimise away the tlb invalidate. This helps when userspace is
118 * unmapping already-unmapped pages, which happens quite a lot.
120 #define tlb_remove_tlb_entry(tlb, ptep, address) \
122 tlb->need_flush = 1; \
123 __tlb_remove_tlb_entry(tlb, ptep, address); \
126 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
128 #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
130 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
132 #define tlb_migrate_finish(mm) do {} while (0)