2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #ifndef _ASM_TILE_MMU_CONTEXT_H
16 #define _ASM_TILE_MMU_CONTEXT_H
18 #include <linux/smp.h>
19 #include <asm/setup.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/homecache.h>
25 #include <asm-generic/mm_hooks.h>
28 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
34 * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
35 * also call hv_install_context().
37 static inline void __install_page_table(pgd_t
*pgdir
, int asid
, pgprot_t prot
)
39 /* FIXME: DIRECTIO should not always be set. FIXME. */
40 int rc
= hv_install_context(__pa(pgdir
), prot
, asid
,
41 HV_CTX_DIRECTIO
| CTX_PAGE_FLAG
);
43 panic("hv_install_context failed: %d", rc
);
46 static inline void install_page_table(pgd_t
*pgdir
, int asid
)
48 pte_t
*ptep
= virt_to_kpte((unsigned long)pgdir
);
49 __install_page_table(pgdir
, asid
, *ptep
);
53 * "Lazy" TLB mode is entered when we are switching to a kernel task,
54 * which borrows the mm of the previous task. The goal of this
55 * optimization is to avoid having to install a new page table. On
56 * early x86 machines (where the concept originated) you couldn't do
57 * anything short of a full page table install for invalidation, so
58 * handling a remote TLB invalidate required doing a page table
59 * re-install. Someone clearly decided that it was silly to keep
60 * doing this while in "lazy" TLB mode, so the optimization involves
61 * installing the swapper page table instead the first time one
62 * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
63 * the kernel task doesn't need to take any more interrupts. At that
64 * point it's then necessary to explicitly reinstall it when context
65 * switching back to the original mm.
67 * On Tile, we have to do a page-table install whenever DMA is enabled,
68 * so in that case lazy mode doesn't help anyway. And more generally,
69 * we have efficient per-page TLB shootdown, and don't expect to spend
70 * that much time in kernel tasks in general, so just leaving the
71 * kernel task borrowing the old page table, but handling TLB
72 * shootdowns, is a reasonable thing to do. And importantly, this
73 * lets us use the hypervisor's internal APIs for TLB shootdown, which
74 * means we don't have to worry about having TLB shootdowns blocked
75 * when Linux is disabling interrupts; see the page migration code for
76 * an example of where it's important for TLB shootdowns to complete
77 * even when interrupts are disabled at the Linux level.
79 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*t
)
81 #if CHIP_HAS_TILE_DMA()
83 * We have to do an "identity" page table switch in order to
84 * clear any pending DMA interrupts.
86 if (current
->thread
.tile_dma_state
.enabled
)
87 install_page_table(mm
->pgd
, __get_cpu_var(current_asid
));
91 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
92 struct task_struct
*tsk
)
94 if (likely(prev
!= next
)) {
96 int cpu
= smp_processor_id();
99 int asid
= __get_cpu_var(current_asid
) + 1;
100 if (asid
> max_asid
) {
104 __get_cpu_var(current_asid
) = asid
;
106 /* Clear cpu from the old mm, and set it in the new one. */
107 cpumask_clear_cpu(cpu
, mm_cpumask(prev
));
108 cpumask_set_cpu(cpu
, mm_cpumask(next
));
110 /* Re-load page tables */
111 install_page_table(next
->pgd
, asid
);
113 /* See how we should set the red/black cache info */
114 check_mm_caching(prev
, next
);
117 * Since we're changing to a new mm, we have to flush
118 * the icache in case some physical page now being mapped
119 * has subsequently been repurposed and has new code.
126 static inline void activate_mm(struct mm_struct
*prev_mm
,
127 struct mm_struct
*next_mm
)
129 switch_mm(prev_mm
, next_mm
, NULL
);
132 #define destroy_context(mm) do { } while (0)
133 #define deactivate_mm(tsk, mm) do { } while (0)
135 #endif /* _ASM_TILE_MMU_CONTEXT_H */