1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 ARM Ltd.
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
9 #include <linux/prctl.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/string.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/thread_info.h>
16 #include <linux/types.h>
17 #include <linux/uio.h>
19 #include <asm/barrier.h>
20 #include <asm/cpufeature.h>
22 #include <asm/mte-kasan.h>
23 #include <asm/ptrace.h>
24 #include <asm/sysreg.h>
26 u64 gcr_kernel_excl __ro_after_init
;
28 static void mte_sync_page_tags(struct page
*page
, pte_t
*ptep
, bool check_swap
)
30 pte_t old_pte
= READ_ONCE(*ptep
);
32 if (check_swap
&& is_swap_pte(old_pte
)) {
33 swp_entry_t entry
= pte_to_swp_entry(old_pte
);
35 if (!non_swap_entry(entry
) && mte_restore_tags(entry
, page
))
39 page_kasan_tag_reset(page
);
41 * We need smp_wmb() in between setting the flags and clearing the
42 * tags because if another thread reads page->flags and builds a
43 * tagged address out of it, there is an actual dependency to the
44 * memory access, but on the current thread we do not guarantee that
45 * the new page->flags are visible before the tags were updated.
48 mte_clear_page_tags(page_address(page
));
51 void mte_sync_tags(pte_t
*ptep
, pte_t pte
)
53 struct page
*page
= pte_page(pte
);
54 long i
, nr_pages
= compound_nr(page
);
55 bool check_swap
= nr_pages
== 1;
57 /* if PG_mte_tagged is set, tags have already been initialised */
58 for (i
= 0; i
< nr_pages
; i
++, page
++) {
59 if (!test_and_set_bit(PG_mte_tagged
, &page
->flags
))
60 mte_sync_page_tags(page
, ptep
, check_swap
);
64 int memcmp_pages(struct page
*page1
, struct page
*page2
)
69 addr1
= page_address(page1
);
70 addr2
= page_address(page2
);
71 ret
= memcmp(addr1
, addr2
, PAGE_SIZE
);
73 if (!system_supports_mte() || ret
)
77 * If the page content is identical but at least one of the pages is
78 * tagged, return non-zero to avoid KSM merging. If only one of the
79 * pages is tagged, set_pte_at() may zero or change the tags of the
80 * other page via mte_sync_tags().
82 if (test_bit(PG_mte_tagged
, &page1
->flags
) ||
83 test_bit(PG_mte_tagged
, &page2
->flags
))
84 return addr1
!= addr2
;
89 u8
mte_get_mem_tag(void *addr
)
91 if (!system_supports_mte())
94 asm(__MTE_PREAMBLE
"ldg %0, [%0]"
97 return mte_get_ptr_tag(addr
);
100 u8
mte_get_random_tag(void)
104 if (!system_supports_mte())
107 asm(__MTE_PREAMBLE
"irg %0, %0"
110 return mte_get_ptr_tag(addr
);
113 void *mte_set_mem_tag_range(void *addr
, size_t size
, u8 tag
)
117 if ((!system_supports_mte()) || (size
== 0))
120 /* Make sure that size is MTE granule aligned. */
121 WARN_ON(size
& (MTE_GRANULE_SIZE
- 1));
123 /* Make sure that the address is MTE granule aligned. */
124 WARN_ON((u64
)addr
& (MTE_GRANULE_SIZE
- 1));
127 ptr
= (void *)__tag_set(ptr
, tag
);
129 mte_assign_mem_tag_range(ptr
, size
);
134 void mte_init_tags(u64 max_tag
)
136 static bool gcr_kernel_excl_initialized
;
138 if (!gcr_kernel_excl_initialized
) {
140 * The format of the tags in KASAN is 0xFF and in MTE is 0xF.
141 * This conversion extracts an MTE tag from a KASAN tag.
143 u64 incl
= GENMASK(FIELD_GET(MTE_TAG_MASK
>> MTE_TAG_SHIFT
,
146 gcr_kernel_excl
= ~incl
& SYS_GCR_EL1_EXCL_MASK
;
147 gcr_kernel_excl_initialized
= true;
150 /* Enable the kernel exclude mask for random tags generation. */
151 write_sysreg_s(SYS_GCR_EL1_RRND
| gcr_kernel_excl
, SYS_GCR_EL1
);
154 void mte_enable_kernel(void)
156 /* Enable MTE Sync Mode for EL1. */
157 sysreg_clear_set(sctlr_el1
, SCTLR_ELx_TCF_MASK
, SCTLR_ELx_TCF_SYNC
);
161 static void update_sctlr_el1_tcf0(u64 tcf0
)
163 /* ISB required for the kernel uaccess routines */
164 sysreg_clear_set(sctlr_el1
, SCTLR_EL1_TCF0_MASK
, tcf0
);
168 static void set_sctlr_el1_tcf0(u64 tcf0
)
171 * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
172 * optimisation. Disable preemption so that it does not see
173 * the variable update before the SCTLR_EL1.TCF0 one.
176 current
->thread
.sctlr_tcf0
= tcf0
;
177 update_sctlr_el1_tcf0(tcf0
);
181 static void update_gcr_el1_excl(u64 excl
)
185 * Note that the mask controlled by the user via prctl() is an
186 * include while GCR_EL1 accepts an exclude mask.
187 * No need for ISB since this only affects EL0 currently, implicit
190 sysreg_clear_set_s(SYS_GCR_EL1
, SYS_GCR_EL1_EXCL_MASK
, excl
);
193 static void set_gcr_el1_excl(u64 excl
)
195 current
->thread
.gcr_user_excl
= excl
;
198 * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
199 * by mte_set_user_gcr() in kernel_exit,
203 void flush_mte_state(void)
205 if (!system_supports_mte())
208 /* clear any pending asynchronous tag fault */
210 write_sysreg_s(0, SYS_TFSRE0_EL1
);
211 clear_thread_flag(TIF_MTE_ASYNC_FAULT
);
212 /* disable tag checking */
213 set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE
);
214 /* reset tag generation mask */
215 set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK
);
218 void mte_thread_switch(struct task_struct
*next
)
220 if (!system_supports_mte())
223 /* avoid expensive SCTLR_EL1 accesses if no change */
224 if (current
->thread
.sctlr_tcf0
!= next
->thread
.sctlr_tcf0
)
225 update_sctlr_el1_tcf0(next
->thread
.sctlr_tcf0
);
228 void mte_suspend_exit(void)
230 if (!system_supports_mte())
233 update_gcr_el1_excl(gcr_kernel_excl
);
236 long set_mte_ctrl(struct task_struct
*task
, unsigned long arg
)
239 u64 gcr_excl
= ~((arg
& PR_MTE_TAG_MASK
) >> PR_MTE_TAG_SHIFT
) &
240 SYS_GCR_EL1_EXCL_MASK
;
242 if (!system_supports_mte())
245 switch (arg
& PR_MTE_TCF_MASK
) {
246 case PR_MTE_TCF_NONE
:
247 tcf0
= SCTLR_EL1_TCF0_NONE
;
249 case PR_MTE_TCF_SYNC
:
250 tcf0
= SCTLR_EL1_TCF0_SYNC
;
252 case PR_MTE_TCF_ASYNC
:
253 tcf0
= SCTLR_EL1_TCF0_ASYNC
;
259 if (task
!= current
) {
260 task
->thread
.sctlr_tcf0
= tcf0
;
261 task
->thread
.gcr_user_excl
= gcr_excl
;
263 set_sctlr_el1_tcf0(tcf0
);
264 set_gcr_el1_excl(gcr_excl
);
270 long get_mte_ctrl(struct task_struct
*task
)
273 u64 incl
= ~task
->thread
.gcr_user_excl
& SYS_GCR_EL1_EXCL_MASK
;
275 if (!system_supports_mte())
278 ret
= incl
<< PR_MTE_TAG_SHIFT
;
280 switch (task
->thread
.sctlr_tcf0
) {
281 case SCTLR_EL1_TCF0_NONE
:
282 ret
|= PR_MTE_TCF_NONE
;
284 case SCTLR_EL1_TCF0_SYNC
:
285 ret
|= PR_MTE_TCF_SYNC
;
287 case SCTLR_EL1_TCF0_ASYNC
:
288 ret
|= PR_MTE_TCF_ASYNC
;
296 * Access MTE tags in another process' address space as given in mm. Update
297 * the number of tags copied. Return 0 if any tags copied, error otherwise.
298 * Inspired by __access_remote_vm().
300 static int __access_remote_tags(struct mm_struct
*mm
, unsigned long addr
,
301 struct iovec
*kiov
, unsigned int gup_flags
)
303 struct vm_area_struct
*vma
;
304 void __user
*buf
= kiov
->iov_base
;
305 size_t len
= kiov
->iov_len
;
307 int write
= gup_flags
& FOLL_WRITE
;
309 if (!access_ok(buf
, len
))
312 if (mmap_read_lock_killable(mm
))
316 unsigned long tags
, offset
;
318 struct page
*page
= NULL
;
320 ret
= get_user_pages_remote(mm
, addr
, 1, gup_flags
, &page
,
326 * Only copy tags if the page has been mapped as PROT_MTE
327 * (PG_mte_tagged set). Otherwise the tags are not valid and
328 * not accessible to user. Moreover, an mprotect(PROT_MTE)
329 * would cause the existing tags to be cleared if the page
330 * was never mapped with PROT_MTE.
332 if (!test_bit(PG_mte_tagged
, &page
->flags
)) {
338 /* limit access to the end of the page */
339 offset
= offset_in_page(addr
);
340 tags
= min(len
, (PAGE_SIZE
- offset
) / MTE_GRANULE_SIZE
);
342 maddr
= page_address(page
);
344 tags
= mte_copy_tags_from_user(maddr
+ offset
, buf
, tags
);
345 set_page_dirty_lock(page
);
347 tags
= mte_copy_tags_to_user(buf
, maddr
+ offset
, tags
);
351 /* error accessing the tracer's buffer */
357 addr
+= tags
* MTE_GRANULE_SIZE
;
359 mmap_read_unlock(mm
);
361 /* return an error if no tags copied */
362 kiov
->iov_len
= buf
- kiov
->iov_base
;
363 if (!kiov
->iov_len
) {
364 /* check for error accessing the tracee's address space */
375 * Copy MTE tags in another process' address space at 'addr' to/from tracer's
376 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
378 static int access_remote_tags(struct task_struct
*tsk
, unsigned long addr
,
379 struct iovec
*kiov
, unsigned int gup_flags
)
381 struct mm_struct
*mm
;
384 mm
= get_task_mm(tsk
);
388 if (!tsk
->ptrace
|| (current
!= tsk
->parent
) ||
389 ((get_dumpable(mm
) != SUID_DUMP_USER
) &&
390 !ptracer_capable(tsk
, mm
->user_ns
))) {
395 ret
= __access_remote_tags(mm
, addr
, kiov
, gup_flags
);
401 int mte_ptrace_copy_tags(struct task_struct
*child
, long request
,
402 unsigned long addr
, unsigned long data
)
406 struct iovec __user
*uiov
= (void __user
*)data
;
407 unsigned int gup_flags
= FOLL_FORCE
;
409 if (!system_supports_mte())
412 if (get_user(kiov
.iov_base
, &uiov
->iov_base
) ||
413 get_user(kiov
.iov_len
, &uiov
->iov_len
))
416 if (request
== PTRACE_POKEMTETAGS
)
417 gup_flags
|= FOLL_WRITE
;
419 /* align addr to the MTE tag granule */
420 addr
&= MTE_GRANULE_MASK
;
422 ret
= access_remote_tags(child
, addr
, &kiov
, gup_flags
);
424 ret
= put_user(kiov
.iov_len
, &uiov
->iov_len
);