1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
8 #include <linux/bitops.h>
9 #include <linux/cpumask.h>
11 #include <linux/percpu.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/static_key.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/switch_to.h>
22 DEFINE_STATIC_KEY_FALSE(use_asid_allocator
);
24 static unsigned long num_asids
;
26 static atomic_long_t current_version
;
28 static DEFINE_RAW_SPINLOCK(context_lock
);
29 static cpumask_t context_tlb_flush_pending
;
30 static unsigned long *context_asid_map
;
32 static DEFINE_PER_CPU(atomic_long_t
, active_context
);
33 static DEFINE_PER_CPU(unsigned long, reserved_context
);
35 static bool check_update_reserved_context(unsigned long cntx
,
36 unsigned long newcntx
)
42 * Iterate over the set of reserved CONTEXT looking for a match.
43 * If we find one, then we can update our mm to use new CONTEXT
44 * (i.e. the same CONTEXT in the current_version) but we can't
45 * exit the loop early, since we need to ensure that all copies
46 * of the old CONTEXT are updated to reflect the mm. Failure to do
47 * so could result in us missing the reserved CONTEXT in a future
50 for_each_possible_cpu(cpu
) {
51 if (per_cpu(reserved_context
, cpu
) == cntx
) {
53 per_cpu(reserved_context
, cpu
) = newcntx
;
60 static void __flush_context(void)
65 /* Must be called with context_lock held */
66 lockdep_assert_held(&context_lock
);
68 /* Update the list of reserved ASIDs and the ASID bitmap. */
69 bitmap_zero(context_asid_map
, num_asids
);
71 /* Mark already active ASIDs as used */
72 for_each_possible_cpu(i
) {
73 cntx
= atomic_long_xchg_relaxed(&per_cpu(active_context
, i
), 0);
75 * If this CPU has already been through a rollover, but
76 * hasn't run another task in the meantime, we must preserve
77 * its reserved CONTEXT, as this is the only trace we have of
78 * the process it is still running.
81 cntx
= per_cpu(reserved_context
, i
);
83 __set_bit(cntx2asid(cntx
), context_asid_map
);
84 per_cpu(reserved_context
, i
) = cntx
;
87 /* Mark ASID #0 as used because it is used at boot-time */
88 __set_bit(0, context_asid_map
);
90 /* Queue a TLB invalidation for each CPU on next context-switch */
91 cpumask_setall(&context_tlb_flush_pending
);
94 static unsigned long __new_context(struct mm_struct
*mm
)
96 static u32 cur_idx
= 1;
97 unsigned long cntx
= atomic_long_read(&mm
->context
.id
);
98 unsigned long asid
, ver
= atomic_long_read(¤t_version
);
100 /* Must be called with context_lock held */
101 lockdep_assert_held(&context_lock
);
104 unsigned long newcntx
= ver
| cntx2asid(cntx
);
107 * If our current CONTEXT was active during a rollover, we
108 * can continue to use it and this was just a false alarm.
110 if (check_update_reserved_context(cntx
, newcntx
))
114 * We had a valid CONTEXT in a previous life, so try to
115 * re-use it if possible.
117 if (!__test_and_set_bit(cntx2asid(cntx
), context_asid_map
))
122 * Allocate a free ASID. If we can't find one then increment
123 * current_version and flush all ASIDs.
125 asid
= find_next_zero_bit(context_asid_map
, num_asids
, cur_idx
);
126 if (asid
!= num_asids
)
129 /* We're out of ASIDs, so increment current_version */
130 ver
= atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS
), ¤t_version
);
132 /* Flush everything */
135 /* We have more ASIDs than CPUs, so this will always succeed */
136 asid
= find_next_zero_bit(context_asid_map
, num_asids
, 1);
139 __set_bit(asid
, context_asid_map
);
144 static void set_mm_asid(struct mm_struct
*mm
, unsigned int cpu
)
147 bool need_flush_tlb
= false;
148 unsigned long cntx
, old_active_cntx
;
150 cntx
= atomic_long_read(&mm
->context
.id
);
153 * If our active_context is non-zero and the context matches the
154 * current_version, then we update the active_context entry with a
157 * Following is how we handle racing with a concurrent rollover:
159 * - We get a zero back from the cmpxchg and end up waiting on the
160 * lock. Taking the lock synchronises with the rollover and so
161 * we are forced to see the updated verion.
163 * - We get a valid context back from the cmpxchg then we continue
164 * using old ASID because __flush_context() would have marked ASID
165 * of active_context as used and next context switch we will
166 * allocate new context.
168 old_active_cntx
= atomic_long_read(&per_cpu(active_context
, cpu
));
169 if (old_active_cntx
&&
170 (cntx2version(cntx
) == atomic_long_read(¤t_version
)) &&
171 atomic_long_cmpxchg_relaxed(&per_cpu(active_context
, cpu
),
172 old_active_cntx
, cntx
))
175 raw_spin_lock_irqsave(&context_lock
, flags
);
177 /* Check that our ASID belongs to the current_version. */
178 cntx
= atomic_long_read(&mm
->context
.id
);
179 if (cntx2version(cntx
) != atomic_long_read(¤t_version
)) {
180 cntx
= __new_context(mm
);
181 atomic_long_set(&mm
->context
.id
, cntx
);
184 if (cpumask_test_and_clear_cpu(cpu
, &context_tlb_flush_pending
))
185 need_flush_tlb
= true;
187 atomic_long_set(&per_cpu(active_context
, cpu
), cntx
);
189 raw_spin_unlock_irqrestore(&context_lock
, flags
);
192 csr_write(CSR_SATP
, virt_to_pfn(mm
->pgd
) |
193 (cntx2asid(cntx
) << SATP_ASID_SHIFT
) |
197 local_flush_tlb_all();
200 static void set_mm_noasid(struct mm_struct
*mm
)
202 /* Switch the page table and blindly nuke entire local TLB */
203 csr_write(CSR_SATP
, virt_to_pfn(mm
->pgd
) | satp_mode
);
204 local_flush_tlb_all_asid(0);
207 static inline void set_mm(struct mm_struct
*prev
,
208 struct mm_struct
*next
, unsigned int cpu
)
211 * The mm_cpumask indicates which harts' TLBs contain the virtual
212 * address mapping of the mm. Compared to noasid, using asid
213 * can't guarantee that stale TLB entries are invalidated because
214 * the asid mechanism wouldn't flush TLB for every switch_mm for
215 * performance. So when using asid, keep all CPUs footmarks in
216 * cpumask() until mm reset.
218 cpumask_set_cpu(cpu
, mm_cpumask(next
));
219 if (static_branch_unlikely(&use_asid_allocator
)) {
220 set_mm_asid(next
, cpu
);
222 cpumask_clear_cpu(cpu
, mm_cpumask(prev
));
227 static int __init
asids_init(void)
229 unsigned long asid_bits
, old
;
231 /* Figure-out number of ASID bits in HW */
232 old
= csr_read(CSR_SATP
);
233 asid_bits
= old
| (SATP_ASID_MASK
<< SATP_ASID_SHIFT
);
234 csr_write(CSR_SATP
, asid_bits
);
235 asid_bits
= (csr_read(CSR_SATP
) >> SATP_ASID_SHIFT
) & SATP_ASID_MASK
;
236 asid_bits
= fls_long(asid_bits
);
237 csr_write(CSR_SATP
, old
);
240 * In the process of determining number of ASID bits (above)
241 * we polluted the TLB of current HART so let's do TLB flushed
242 * to remove unwanted TLB enteries.
244 local_flush_tlb_all();
246 /* Pre-compute ASID details */
248 num_asids
= 1 << asid_bits
;
252 * Use ASID allocator only if number of HW ASIDs are
253 * at-least twice more than CPUs
255 if (num_asids
> (2 * num_possible_cpus())) {
256 atomic_long_set(¤t_version
, BIT(SATP_ASID_BITS
));
258 context_asid_map
= bitmap_zalloc(num_asids
, GFP_KERNEL
);
259 if (!context_asid_map
)
260 panic("Failed to allocate bitmap for %lu ASIDs\n",
263 __set_bit(0, context_asid_map
);
265 static_branch_enable(&use_asid_allocator
);
267 pr_info("ASID allocator using %lu bits (%lu entries)\n",
268 asid_bits
, num_asids
);
270 pr_info("ASID allocator disabled (%lu bits)\n", asid_bits
);
275 early_initcall(asids_init
);
277 static inline void set_mm(struct mm_struct
*prev
,
278 struct mm_struct
*next
, unsigned int cpu
)
280 /* Nothing to do here when there is no MMU */
285 * When necessary, performs a deferred icache flush for the given MM context,
286 * on the local CPU. RISC-V has no direct mechanism for instruction cache
287 * shoot downs, so instead we send an IPI that informs the remote harts they
288 * need to flush their local instruction caches. To avoid pathologically slow
289 * behavior in a common case (a bunch of single-hart processes on a many-hart
290 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
291 * executing a MM context and instead schedule a deferred local instruction
292 * cache flush to be performed before execution resumes on each hart. This
293 * actually performs that local instruction cache flush, which implicitly only
294 * refers to the current hart.
296 * The "cpu" argument must be the current local CPU number.
298 static inline void flush_icache_deferred(struct mm_struct
*mm
, unsigned int cpu
,
299 struct task_struct
*task
)
302 if (cpumask_test_and_clear_cpu(cpu
, &mm
->context
.icache_stale_mask
)) {
304 * Ensure the remote hart's writes are visible to this hart.
305 * This pairs with a barrier in flush_icache_mm.
310 * If cache will be flushed in switch_to, no need to flush here.
312 if (!(task
&& switch_to_should_flush_icache(task
)))
313 local_flush_icache_all();
318 void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
319 struct task_struct
*task
)
323 if (unlikely(prev
== next
))
326 membarrier_arch_switch_mm(prev
, next
, task
);
329 * Mark the current MM context as inactive, and the next as
330 * active. This is at least used by the icache flushing
331 * routines in order to determine who should be flushed.
333 cpu
= smp_processor_id();
335 set_mm(prev
, next
, cpu
);
337 flush_icache_deferred(next
, cpu
, task
);