1 // SPDX-License-Identifier: GPL-2.0
3 * Generic ASID allocator.
5 * Based on arch/arm/mm/context.c
7 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/slab.h>
12 #include <linux/mm_types.h>
16 #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
18 #define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
19 #define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
22 #define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
24 static void flush_context(struct asid_info
*info
)
29 /* Update the list of reserved ASIDs and the ASID bitmap. */
30 bitmap_clear(info
->map
, 0, NUM_CTXT_ASIDS(info
));
32 for_each_possible_cpu(i
) {
33 asid
= atomic64_xchg_relaxed(&active_asid(info
, i
), 0);
35 * If this CPU has already been through a
36 * rollover, but hasn't run another task in
37 * the meantime, we must preserve its reserved
38 * ASID, as this is the only trace we have of
39 * the process it is still running.
42 asid
= reserved_asid(info
, i
);
43 __set_bit(asid2idx(info
, asid
), info
->map
);
44 reserved_asid(info
, i
) = asid
;
48 * Queue a TLB invalidation for each CPU to perform on next
51 cpumask_setall(&info
->flush_pending
);
54 static bool check_update_reserved_asid(struct asid_info
*info
, u64 asid
,
61 * Iterate over the set of reserved ASIDs looking for a match.
62 * If we find one, then we can update our mm to use newasid
63 * (i.e. the same ASID in the current generation) but we can't
64 * exit the loop early, since we need to ensure that all copies
65 * of the old ASID are updated to reflect the mm. Failure to do
66 * so could result in us missing the reserved ASID in a future
69 for_each_possible_cpu(cpu
) {
70 if (reserved_asid(info
, cpu
) == asid
) {
72 reserved_asid(info
, cpu
) = newasid
;
79 static u64
new_context(struct asid_info
*info
, atomic64_t
*pasid
,
82 static u32 cur_idx
= 1;
83 u64 asid
= atomic64_read(pasid
);
84 u64 generation
= atomic64_read(&info
->generation
);
87 u64 newasid
= generation
| (asid
& ~ASID_MASK(info
));
90 * If our current ASID was active during a rollover, we
91 * can continue to use it and this was just a false alarm.
93 if (check_update_reserved_asid(info
, asid
, newasid
))
97 * We had a valid ASID in a previous life, so try to re-use
100 if (!__test_and_set_bit(asid2idx(info
, asid
), info
->map
))
105 * Allocate a free ASID. If we can't find one, take a note of the
106 * currently active ASIDs and mark the TLBs as requiring flushes. We
107 * always count from ASID #2 (index 1), as we use ASID #0 when setting
108 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
111 asid
= find_next_zero_bit(info
->map
, NUM_CTXT_ASIDS(info
), cur_idx
);
112 if (asid
!= NUM_CTXT_ASIDS(info
))
115 /* We're out of ASIDs, so increment the global generation count */
116 generation
= atomic64_add_return_relaxed(ASID_FIRST_VERSION(info
),
120 /* We have more ASIDs than CPUs, so this will always succeed */
121 asid
= find_next_zero_bit(info
->map
, NUM_CTXT_ASIDS(info
), 1);
124 __set_bit(asid
, info
->map
);
126 cpumask_clear(mm_cpumask(mm
));
127 return idx2asid(info
, asid
) | generation
;
131 * Generate a new ASID for the context.
133 * @pasid: Pointer to the current ASID batch allocated. It will be updated
134 * with the new ASID batch.
135 * @cpu: current CPU ID. Must have been acquired through get_cpu()
137 void asid_new_context(struct asid_info
*info
, atomic64_t
*pasid
,
138 unsigned int cpu
, struct mm_struct
*mm
)
143 raw_spin_lock_irqsave(&info
->lock
, flags
);
144 /* Check that our ASID belongs to the current generation. */
145 asid
= atomic64_read(pasid
);
146 if ((asid
^ atomic64_read(&info
->generation
)) >> info
->bits
) {
147 asid
= new_context(info
, pasid
, mm
);
148 atomic64_set(pasid
, asid
);
151 if (cpumask_test_and_clear_cpu(cpu
, &info
->flush_pending
))
152 info
->flush_cpu_ctxt_cb();
154 atomic64_set(&active_asid(info
, cpu
), asid
);
155 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
156 raw_spin_unlock_irqrestore(&info
->lock
, flags
);
160 * Initialize the ASID allocator
162 * @info: Pointer to the asid allocator structure
163 * @bits: Number of ASIDs available
164 * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
165 * allocated contiguously for a given context. This value should be a power of
168 int asid_allocator_init(struct asid_info
*info
,
169 u32 bits
, unsigned int asid_per_ctxt
,
170 void (*flush_cpu_ctxt_cb
)(void))
173 info
->ctxt_shift
= ilog2(asid_per_ctxt
);
174 info
->flush_cpu_ctxt_cb
= flush_cpu_ctxt_cb
;
176 * Expect allocation after rollover to fail if we don't have at least
177 * one more ASID than CPUs. ASID #0 is always reserved.
179 WARN_ON(NUM_CTXT_ASIDS(info
) - 1 <= num_possible_cpus());
180 atomic64_set(&info
->generation
, ASID_FIRST_VERSION(info
));
181 info
->map
= kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info
)),
182 sizeof(*info
->map
), GFP_KERNEL
);
186 raw_spin_lock_init(&info
->lock
);