mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / cris / arch-v32 / mm / tlb.c
blob9e4b5ab4971d20d4d7b03951adce23eb19844967
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Low level TLB handling.
5 * Copyright (C) 2000-2003, Axis Communications AB.
7 * Authors: Bjorn Wesen <bjornw@axis.com>
8 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
9 */
10 #include <linux/mm_types.h>
12 #include <asm/tlb.h>
13 #include <asm/mmu_context.h>
14 #include <arch/hwregs/asm/mmu_defs_asm.h>
15 #include <arch/hwregs/supp_reg.h>
17 #define UPDATE_TLB_SEL_IDX(val) \
18 do { \
19 unsigned long tlb_sel; \
21 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
22 SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
23 } while(0)
25 #define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \
26 do { \
27 SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \
28 SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \
29 } while(0)
32 * The TLB can host up to 256 different mm contexts at the same time. The running
33 * context is found in the PID register. Each TLB entry contains a page_id that
34 * has to match the PID register to give a hit. page_id_map keeps track of which
35 * mm's is assigned to which page_id's, making sure it's known when to
36 * invalidate TLB entries.
38 * The last page_id is never running, it is used as an invalid page_id so that
39 * it's possible to make TLB entries that will nerver match.
41 * Note; the flushes needs to be atomic otherwise an interrupt hander that uses
42 * vmalloc'ed memory might cause a TLB load in the middle of a flush.
45 /* Flush all TLB entries. */
46 void
47 __flush_tlb_all(void)
49 int i;
50 int mmu;
51 unsigned long flags;
52 unsigned long mmu_tlb_hi;
53 unsigned long mmu_tlb_sel;
56 * Mask with 0xf so similar TLB entries aren't written in the same 4-way
57 * entry group.
59 local_irq_save(flags);
61 for (mmu = 1; mmu <= 2; mmu++) {
62 SUPP_BANK_SEL(mmu); /* Select the MMU */
63 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
64 /* Store invalid entry */
65 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
67 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
68 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
70 SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
71 SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
72 SUPP_REG_WR(RW_MM_TLB_LO, 0);
76 local_irq_restore(flags);
79 /* Flush an entire user address space. */
80 void
81 __flush_tlb_mm(struct mm_struct *mm)
83 int i;
84 int mmu;
85 unsigned long flags;
86 unsigned long page_id;
87 unsigned long tlb_hi;
88 unsigned long mmu_tlb_hi;
90 page_id = mm->context.page_id;
92 if (page_id == NO_CONTEXT)
93 return;
95 /* Mark the TLB entries that match the page_id as invalid. */
96 local_irq_save(flags);
98 for (mmu = 1; mmu <= 2; mmu++) {
99 SUPP_BANK_SEL(mmu);
100 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
101 UPDATE_TLB_SEL_IDX(i);
103 /* Get the page_id */
104 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
106 /* Check if the page_id match. */
107 if ((tlb_hi & 0xff) == page_id) {
108 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
109 INVALID_PAGEID)
110 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
111 i & 0xf));
113 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
118 local_irq_restore(flags);
121 /* Invalidate a single page. */
122 void
123 __flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
125 int i;
126 int mmu;
127 unsigned long page_id;
128 unsigned long flags;
129 unsigned long tlb_hi;
130 unsigned long mmu_tlb_hi;
132 page_id = vma->vm_mm->context.page_id;
134 if (page_id == NO_CONTEXT)
135 return;
137 addr &= PAGE_MASK;
140 * Invalidate those TLB entries that match both the mm context and the
141 * requested virtual address.
143 local_irq_save(flags);
145 for (mmu = 1; mmu <= 2; mmu++) {
146 SUPP_BANK_SEL(mmu);
147 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
148 UPDATE_TLB_SEL_IDX(i);
149 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
151 /* Check if page_id and address matches */
152 if (((tlb_hi & 0xff) == page_id) &&
153 ((tlb_hi & PAGE_MASK) == addr)) {
154 mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
155 INVALID_PAGEID) | addr;
157 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
162 local_irq_restore(flags);
166 * Initialize the context related info for a new mm_struct
167 * instance.
171 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
173 mm->context.page_id = NO_CONTEXT;
174 return 0;
177 static DEFINE_SPINLOCK(mmu_context_lock);
179 /* Called in schedule() just before actually doing the switch_to. */
180 void
181 switch_mm(struct mm_struct *prev, struct mm_struct *next,
182 struct task_struct *tsk)
184 if (prev != next) {
185 int cpu = smp_processor_id();
187 /* Make sure there is a MMU context. */
188 spin_lock(&mmu_context_lock);
189 get_mmu_context(next);
190 cpumask_set_cpu(cpu, mm_cpumask(next));
191 spin_unlock(&mmu_context_lock);
194 * Remember the pgd for the fault handlers. Keep a separate
195 * copy of it because current and active_mm might be invalid
196 * at points where * there's still a need to derefer the pgd.
198 per_cpu(current_pgd, cpu) = next->pgd;
200 /* Switch context in the MMU. */
201 if (tsk && task_thread_info(tsk)) {
202 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id |
203 task_thread_info(tsk)->tls);
204 } else {
205 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);