2 * Based on arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/bitops.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
25 #include <asm/cpufeature.h>
26 #include <asm/mmu_context.h>
27 #include <asm/tlbflush.h>
30 static DEFINE_RAW_SPINLOCK(cpu_asid_lock
);
32 static atomic64_t asid_generation
;
33 static unsigned long *asid_map
;
35 static DEFINE_PER_CPU(atomic64_t
, active_asids
);
36 static DEFINE_PER_CPU(u64
, reserved_asids
);
37 static cpumask_t tlb_flush_pending
;
39 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
40 #define ASID_FIRST_VERSION (1UL << asid_bits)
41 #define NUM_USER_ASIDS ASID_FIRST_VERSION
43 static void flush_context(unsigned int cpu
)
48 /* Update the list of reserved ASIDs and the ASID bitmap. */
49 bitmap_clear(asid_map
, 0, NUM_USER_ASIDS
);
52 * Ensure the generation bump is observed before we xchg the
57 for_each_possible_cpu(i
) {
58 asid
= atomic64_xchg_relaxed(&per_cpu(active_asids
, i
), 0);
60 * If this CPU has already been through a
61 * rollover, but hasn't run another task in
62 * the meantime, we must preserve its reserved
63 * ASID, as this is the only trace we have of
64 * the process it is still running.
67 asid
= per_cpu(reserved_asids
, i
);
68 __set_bit(asid
& ~ASID_MASK
, asid_map
);
69 per_cpu(reserved_asids
, i
) = asid
;
72 /* Queue a TLB invalidate and flush the I-cache if necessary. */
73 cpumask_setall(&tlb_flush_pending
);
75 if (icache_is_aivivt())
79 static bool check_update_reserved_asid(u64 asid
, u64 newasid
)
85 * Iterate over the set of reserved ASIDs looking for a match.
86 * If we find one, then we can update our mm to use newasid
87 * (i.e. the same ASID in the current generation) but we can't
88 * exit the loop early, since we need to ensure that all copies
89 * of the old ASID are updated to reflect the mm. Failure to do
90 * so could result in us missing the reserved ASID in a future
93 for_each_possible_cpu(cpu
) {
94 if (per_cpu(reserved_asids
, cpu
) == asid
) {
96 per_cpu(reserved_asids
, cpu
) = newasid
;
103 static u64
new_context(struct mm_struct
*mm
, unsigned int cpu
)
105 static u32 cur_idx
= 1;
106 u64 asid
= atomic64_read(&mm
->context
.id
);
107 u64 generation
= atomic64_read(&asid_generation
);
110 u64 newasid
= generation
| (asid
& ~ASID_MASK
);
113 * If our current ASID was active during a rollover, we
114 * can continue to use it and this was just a false alarm.
116 if (check_update_reserved_asid(asid
, newasid
))
120 * We had a valid ASID in a previous life, so try to re-use
124 if (!__test_and_set_bit(asid
, asid_map
))
129 * Allocate a free ASID. If we can't find one, take a note of the
130 * currently active ASIDs and mark the TLBs as requiring flushes.
131 * We always count from ASID #1, as we use ASID #0 when setting a
132 * reserved TTBR0 for the init_mm.
134 asid
= find_next_zero_bit(asid_map
, NUM_USER_ASIDS
, cur_idx
);
135 if (asid
!= NUM_USER_ASIDS
)
138 /* We're out of ASIDs, so increment the global generation count */
139 generation
= atomic64_add_return_relaxed(ASID_FIRST_VERSION
,
143 /* We have at least 1 ASID per CPU, so this will always succeed */
144 asid
= find_next_zero_bit(asid_map
, NUM_USER_ASIDS
, 1);
147 __set_bit(asid
, asid_map
);
149 return asid
| generation
;
152 void check_and_switch_context(struct mm_struct
*mm
, unsigned int cpu
)
157 asid
= atomic64_read(&mm
->context
.id
);
160 * The memory ordering here is subtle. We rely on the control
161 * dependency between the generation read and the update of
162 * active_asids to ensure that we are synchronised with a
163 * parallel rollover (i.e. this pairs with the smp_wmb() in
166 if (!((asid
^ atomic64_read(&asid_generation
)) >> asid_bits
)
167 && atomic64_xchg_relaxed(&per_cpu(active_asids
, cpu
), asid
))
168 goto switch_mm_fastpath
;
170 raw_spin_lock_irqsave(&cpu_asid_lock
, flags
);
171 /* Check that our ASID belongs to the current generation. */
172 asid
= atomic64_read(&mm
->context
.id
);
173 if ((asid
^ atomic64_read(&asid_generation
)) >> asid_bits
) {
174 asid
= new_context(mm
, cpu
);
175 atomic64_set(&mm
->context
.id
, asid
);
178 if (cpumask_test_and_clear_cpu(cpu
, &tlb_flush_pending
))
179 local_flush_tlb_all();
181 atomic64_set(&per_cpu(active_asids
, cpu
), asid
);
182 raw_spin_unlock_irqrestore(&cpu_asid_lock
, flags
);
185 cpu_switch_mm(mm
->pgd
, mm
);
188 static int asids_init(void)
190 int fld
= cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1
), 4);
194 pr_warn("Unknown ASID size (%d); assuming 8-bit\n", fld
);
203 /* If we end up with more CPUs than ASIDs, expect things to crash */
204 WARN_ON(NUM_USER_ASIDS
< num_possible_cpus());
205 atomic64_set(&asid_generation
, ASID_FIRST_VERSION
);
206 asid_map
= kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS
) * sizeof(*asid_map
),
209 panic("Failed to allocate bitmap for %lu ASIDs\n",
212 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS
);
215 early_initcall(asids_init
);