4 #include <linux/pagemap.h>
5 #include <linux/swap.h>
6 #include <asm/percpu.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
12 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14 /* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
19 unsigned int need_flush
; /* Really unmapped some ptes? */
22 unsigned int fullmm
; /* non-zero means full mm flush */
25 static inline void __tlb_remove_tlb_entry(struct mmu_gather
*tlb
, pte_t
*ptep
,
26 unsigned long address
)
28 if (tlb
->start
> address
)
30 if (tlb
->end
< address
+ PAGE_SIZE
)
31 tlb
->end
= address
+ PAGE_SIZE
;
34 static inline void init_tlb_gather(struct mmu_gather
*tlb
)
38 tlb
->start
= TASK_SIZE
;
48 tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
, unsigned int full_mm_flush
)
51 tlb
->fullmm
= full_mm_flush
;
56 extern void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
60 tlb_flush_mmu(struct mmu_gather
*tlb
)
65 flush_tlb_mm_range(tlb
->mm
, tlb
->start
, tlb
->end
);
70 * Called at the end of the shootdown operation to free up any resources
74 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
78 /* keep the page table cache within bounds */
83 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
84 * while handling the additional races in SMP caused by other CPUs
85 * caching valid mappings in their TLBs.
87 static inline int __tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
90 free_page_and_swap_cache(page
);
91 return 1; /* avoid calling tlb_flush_mmu */
94 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
96 __tlb_remove_page(tlb
, page
);
100 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
102 * Record the fact that pte's were really umapped in ->need_flush, so we can
103 * later optimise away the tlb invalidate. This helps when userspace is
104 * unmapping already-unmapped pages, which happens quite a lot.
106 #define tlb_remove_tlb_entry(tlb, ptep, address) \
108 tlb->need_flush = 1; \
109 __tlb_remove_tlb_entry(tlb, ptep, address); \
112 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
114 #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
116 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
118 #define tlb_migrate_finish(mm) do {} while (0)