1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/atomic.h>
3 #include <linux/mmu_context.h>
4 #include <linux/percpu.h>
5 #include <linux/spinlock.h>
7 static DEFINE_RAW_SPINLOCK(cpu_mmid_lock
);
9 static atomic64_t mmid_version
;
10 static unsigned int num_mmids
;
11 static unsigned long *mmid_map
;
13 static DEFINE_PER_CPU(u64
, reserved_mmids
);
14 static cpumask_t tlb_flush_pending
;
16 static bool asid_versions_eq(int cpu
, u64 a
, u64 b
)
18 return ((a
^ b
) & asid_version_mask(cpu
)) == 0;
21 void get_new_mmu_context(struct mm_struct
*mm
)
27 * This function is specific to ASIDs, and should not be called when
30 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM
) && cpu_has_mmid
))
33 cpu
= smp_processor_id();
34 asid
= asid_cache(cpu
);
36 if (!((asid
+= cpu_asid_inc()) & cpu_asid_mask(&cpu_data
[cpu
]))) {
37 if (cpu_has_vtag_icache
)
39 local_flush_tlb_all(); /* start new asid cycle */
42 set_cpu_context(cpu
, mm
, asid
);
43 asid_cache(cpu
) = asid
;
45 EXPORT_SYMBOL_GPL(get_new_mmu_context
);
47 void check_mmu_context(struct mm_struct
*mm
)
49 unsigned int cpu
= smp_processor_id();
52 * This function is specific to ASIDs, and should not be called when
55 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM
) && cpu_has_mmid
))
58 /* Check if our ASID is of an older version and thus invalid */
59 if (!asid_versions_eq(cpu
, cpu_context(cpu
, mm
), asid_cache(cpu
)))
60 get_new_mmu_context(mm
);
62 EXPORT_SYMBOL_GPL(check_mmu_context
);
64 static void flush_context(void)
69 /* Update the list of reserved MMIDs and the MMID bitmap */
70 bitmap_clear(mmid_map
, 0, num_mmids
);
72 /* Reserve an MMID for kmap/wired entries */
73 __set_bit(MMID_KERNEL_WIRED
, mmid_map
);
75 for_each_possible_cpu(cpu
) {
76 mmid
= xchg_relaxed(&cpu_data
[cpu
].asid_cache
, 0);
79 * If this CPU has already been through a
80 * rollover, but hasn't run another task in
81 * the meantime, we must preserve its reserved
82 * MMID, as this is the only trace we have of
83 * the process it is still running.
86 mmid
= per_cpu(reserved_mmids
, cpu
);
88 __set_bit(mmid
& cpu_asid_mask(&cpu_data
[cpu
]), mmid_map
);
89 per_cpu(reserved_mmids
, cpu
) = mmid
;
93 * Queue a TLB invalidation for each CPU to perform on next
96 cpumask_setall(&tlb_flush_pending
);
99 static bool check_update_reserved_mmid(u64 mmid
, u64 newmmid
)
105 * Iterate over the set of reserved MMIDs looking for a match.
106 * If we find one, then we can update our mm to use newmmid
107 * (i.e. the same MMID in the current generation) but we can't
108 * exit the loop early, since we need to ensure that all copies
109 * of the old MMID are updated to reflect the mm. Failure to do
110 * so could result in us missing the reserved MMID in a future
114 for_each_possible_cpu(cpu
) {
115 if (per_cpu(reserved_mmids
, cpu
) == mmid
) {
117 per_cpu(reserved_mmids
, cpu
) = newmmid
;
124 static u64
get_new_mmid(struct mm_struct
*mm
)
126 static u32 cur_idx
= MMID_KERNEL_WIRED
+ 1;
127 u64 mmid
, version
, mmid_mask
;
129 mmid
= cpu_context(0, mm
);
130 version
= atomic64_read(&mmid_version
);
131 mmid_mask
= cpu_asid_mask(&boot_cpu_data
);
133 if (!asid_versions_eq(0, mmid
, 0)) {
134 u64 newmmid
= version
| (mmid
& mmid_mask
);
137 * If our current MMID was active during a rollover, we
138 * can continue to use it and this was just a false alarm.
140 if (check_update_reserved_mmid(mmid
, newmmid
)) {
146 * We had a valid MMID in a previous life, so try to re-use
149 if (!__test_and_set_bit(mmid
& mmid_mask
, mmid_map
)) {
155 /* Allocate a free MMID */
156 mmid
= find_next_zero_bit(mmid_map
, num_mmids
, cur_idx
);
157 if (mmid
!= num_mmids
)
160 /* We're out of MMIDs, so increment the global version */
161 version
= atomic64_add_return_relaxed(asid_first_version(0),
164 /* Note currently active MMIDs & mark TLBs as requiring flushes */
167 /* We have more MMIDs than CPUs, so this will always succeed */
168 mmid
= find_first_zero_bit(mmid_map
, num_mmids
);
171 __set_bit(mmid
, mmid_map
);
175 set_cpu_context(0, mm
, mmid
);
179 void check_switch_mmu_context(struct mm_struct
*mm
)
181 unsigned int cpu
= smp_processor_id();
182 u64 ctx
, old_active_mmid
;
186 check_mmu_context(mm
);
187 write_c0_entryhi(cpu_asid(cpu
, mm
));
192 * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
195 * The memory ordering here is subtle. If our active_mmids is non-zero
196 * and the MMID matches the current version, then we update the CPU's
197 * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
200 * - We get a zero back from the cmpxchg and end up waiting on
201 * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
202 * with the rollover and so we are forced to see the updated
205 * - We get a valid MMID back from the cmpxchg, which means the
206 * relaxed xchg in flush_context will treat us as reserved
207 * because atomic RmWs are totally ordered for a given location.
209 ctx
= cpu_context(cpu
, mm
);
210 old_active_mmid
= READ_ONCE(cpu_data
[cpu
].asid_cache
);
211 if (!old_active_mmid
||
212 !asid_versions_eq(cpu
, ctx
, atomic64_read(&mmid_version
)) ||
213 !cmpxchg_relaxed(&cpu_data
[cpu
].asid_cache
, old_active_mmid
, ctx
)) {
214 raw_spin_lock_irqsave(&cpu_mmid_lock
, flags
);
216 ctx
= cpu_context(cpu
, mm
);
217 if (!asid_versions_eq(cpu
, ctx
, atomic64_read(&mmid_version
)))
218 ctx
= get_new_mmid(mm
);
220 WRITE_ONCE(cpu_data
[cpu
].asid_cache
, ctx
);
221 raw_spin_unlock_irqrestore(&cpu_mmid_lock
, flags
);
225 * Invalidate the local TLB if needed. Note that we must only clear our
226 * bit in tlb_flush_pending after this is complete, so that the
227 * cpu_has_shared_ftlb_entries case below isn't misled.
229 if (cpumask_test_cpu(cpu
, &tlb_flush_pending
)) {
230 if (cpu_has_vtag_icache
)
232 local_flush_tlb_all();
233 cpumask_clear_cpu(cpu
, &tlb_flush_pending
);
236 write_c0_memorymapid(ctx
& cpu_asid_mask(&boot_cpu_data
));
239 * If this CPU shares FTLB entries with its siblings and one or more of
240 * those siblings hasn't yet invalidated its TLB following a version
241 * increase then we need to invalidate any TLB entries for our MMID
242 * that we might otherwise pick up from a sibling.
244 * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
245 * CONFIG_SMP=n kernels.
248 if (cpu_has_shared_ftlb_entries
&&
249 cpumask_intersects(&tlb_flush_pending
, &cpu_sibling_map
[cpu
])) {
250 /* Ensure we operate on the new MMID */
254 * Invalidate all TLB entries associated with the new
255 * MMID, and wait for the invalidation to complete.
263 TLBMISS_HANDLER_SETUP_PGD(mm
->pgd
);
265 EXPORT_SYMBOL_GPL(check_switch_mmu_context
);
267 static int mmid_init(void)
273 * Expect allocation after rollover to fail if we don't have at least
274 * one more MMID than CPUs.
276 num_mmids
= asid_first_version(0);
277 WARN_ON(num_mmids
<= num_possible_cpus());
279 atomic64_set(&mmid_version
, asid_first_version(0));
280 mmid_map
= kcalloc(BITS_TO_LONGS(num_mmids
), sizeof(*mmid_map
),
283 panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids
);
285 /* Reserve an MMID for kmap/wired entries */
286 __set_bit(MMID_KERNEL_WIRED
, mmid_map
);
288 pr_info("MMID allocator initialised with %u entries\n", num_mmids
);
291 early_initcall(mmid_init
);