Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / include / asm / assembler.h
blob3d8d534a7a77c22f47818e645ff0ba0698bd6ba4
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8 #ifndef __ASSEMBLY__
9 #error "Only include this from assembly code"
10 #endif
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
15 #include <linux/export.h>
17 #include <asm/alternative.h>
18 #include <asm/asm-bug.h>
19 #include <asm/asm-extable.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cputype.h>
23 #include <asm/debug-monitors.h>
24 #include <asm/page.h>
25 #include <asm/pgtable-hwdef.h>
26 #include <asm/ptrace.h>
27 #include <asm/thread_info.h>
30 * Provide a wxN alias for each wN register so what we can paste a xN
31 * reference after a 'w' to obtain the 32-bit version.
33 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
34 wx\n .req w\n
35 .endr
37 .macro disable_daif
38 msr daifset, #0xf
39 .endm
42 * Save/restore interrupts.
44 .macro save_and_disable_irq, flags
45 mrs \flags, daif
46 msr daifset, #3
47 .endm
49 .macro restore_irq, flags
50 msr daif, \flags
51 .endm
53 .macro disable_step_tsk, flgs, tmp
54 tbz \flgs, #TIF_SINGLESTEP, 9990f
55 mrs \tmp, mdscr_el1
56 bic \tmp, \tmp, #DBG_MDSCR_SS
57 msr mdscr_el1, \tmp
58 isb // Take effect before a subsequent clear of DAIF.D
59 9990:
60 .endm
62 /* call with daif masked */
63 .macro enable_step_tsk, flgs, tmp
64 tbz \flgs, #TIF_SINGLESTEP, 9990f
65 mrs \tmp, mdscr_el1
66 orr \tmp, \tmp, #DBG_MDSCR_SS
67 msr mdscr_el1, \tmp
68 9990:
69 .endm
72 * RAS Error Synchronization barrier
74 .macro esb
75 #ifdef CONFIG_ARM64_RAS_EXTN
76 hint #16
77 #else
78 nop
79 #endif
80 .endm
83 * Value prediction barrier
85 .macro csdb
86 hint #20
87 .endm
90 * Clear Branch History instruction
92 .macro clearbhb
93 hint #22
94 .endm
97 * Speculation barrier
99 .macro sb
100 alternative_if_not ARM64_HAS_SB
101 dsb nsh
103 alternative_else
104 SB_BARRIER_INSN
106 alternative_endif
107 .endm
110 * NOP sequence
112 .macro nops, num
113 .rept \num
115 .endr
116 .endm
119 * Register aliases.
121 lr .req x30 // link register
124 * Vector entry
126 .macro ventry label
127 .align 7
128 b \label
129 .endm
132 * Select code when configured for BE.
134 #ifdef CONFIG_CPU_BIG_ENDIAN
135 #define CPU_BE(code...) code
136 #else
137 #define CPU_BE(code...)
138 #endif
141 * Select code when configured for LE.
143 #ifdef CONFIG_CPU_BIG_ENDIAN
144 #define CPU_LE(code...)
145 #else
146 #define CPU_LE(code...) code
147 #endif
150 * Define a macro that constructs a 64-bit value by concatenating two
151 * 32-bit registers. Note that on big endian systems the order of the
152 * registers is swapped.
154 #ifndef CONFIG_CPU_BIG_ENDIAN
155 .macro regs_to_64, rd, lbits, hbits
156 #else
157 .macro regs_to_64, rd, hbits, lbits
158 #endif
159 orr \rd, \lbits, \hbits, lsl #32
160 .endm
163 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
164 * <symbol> is within the range +/- 4 GB of the PC.
167 * @dst: destination register (64 bit wide)
168 * @sym: name of the symbol
170 .macro adr_l, dst, sym
171 adrp \dst, \sym
172 add \dst, \dst, :lo12:\sym
173 .endm
176 * @dst: destination register (32 or 64 bit wide)
177 * @sym: name of the symbol
178 * @tmp: optional 64-bit scratch register to be used if <dst> is a
179 * 32-bit wide register, in which case it cannot be used to hold
180 * the address
182 .macro ldr_l, dst, sym, tmp=
183 .ifb \tmp
184 adrp \dst, \sym
185 ldr \dst, [\dst, :lo12:\sym]
186 .else
187 adrp \tmp, \sym
188 ldr \dst, [\tmp, :lo12:\sym]
189 .endif
190 .endm
193 * @src: source register (32 or 64 bit wide)
194 * @sym: name of the symbol
195 * @tmp: mandatory 64-bit scratch register to calculate the address
196 * while <src> needs to be preserved.
198 .macro str_l, src, sym, tmp
199 adrp \tmp, \sym
200 str \src, [\tmp, :lo12:\sym]
201 .endm
204 * @dst: destination register
206 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
207 .macro get_this_cpu_offset, dst
208 mrs \dst, tpidr_el2
209 .endm
210 #else
211 .macro get_this_cpu_offset, dst
212 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
213 mrs \dst, tpidr_el1
214 alternative_else
215 mrs \dst, tpidr_el2
216 alternative_endif
217 .endm
219 .macro set_this_cpu_offset, src
220 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
221 msr tpidr_el1, \src
222 alternative_else
223 msr tpidr_el2, \src
224 alternative_endif
225 .endm
226 #endif
229 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
230 * @sym: The name of the per-cpu variable
231 * @tmp: scratch register
233 .macro adr_this_cpu, dst, sym, tmp
234 adrp \tmp, \sym
235 add \dst, \tmp, #:lo12:\sym
236 get_this_cpu_offset \tmp
237 add \dst, \dst, \tmp
238 .endm
241 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
242 * @sym: The name of the per-cpu variable
243 * @tmp: scratch register
245 .macro ldr_this_cpu dst, sym, tmp
246 adr_l \dst, \sym
247 get_this_cpu_offset \tmp
248 ldr \dst, [\dst, \tmp]
249 .endm
252 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
253 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
255 .macro read_ctr, reg
256 #ifndef __KVM_NVHE_HYPERVISOR__
257 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
258 mrs \reg, ctr_el0 // read CTR
260 alternative_else
261 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
262 alternative_endif
263 #else
264 alternative_if_not ARM64_KVM_PROTECTED_MODE
265 ASM_BUG()
266 alternative_else_nop_endif
267 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
268 movz \reg, #0
269 movk \reg, #0, lsl #16
270 movk \reg, #0, lsl #32
271 movk \reg, #0, lsl #48
272 alternative_cb_end
273 #endif
274 .endm
278 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
279 * from the CTR register.
281 .macro raw_dcache_line_size, reg, tmp
282 mrs \tmp, ctr_el0 // read CTR
283 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
284 mov \reg, #4 // bytes per word
285 lsl \reg, \reg, \tmp // actual cache line size
286 .endm
289 * dcache_line_size - get the safe D-cache line size across all CPUs
291 .macro dcache_line_size, reg, tmp
292 read_ctr \tmp
293 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
294 mov \reg, #4 // bytes per word
295 lsl \reg, \reg, \tmp // actual cache line size
296 .endm
299 * raw_icache_line_size - get the minimum I-cache line size on this CPU
300 * from the CTR register.
302 .macro raw_icache_line_size, reg, tmp
303 mrs \tmp, ctr_el0 // read CTR
304 and \tmp, \tmp, #0xf // cache line size encoding
305 mov \reg, #4 // bytes per word
306 lsl \reg, \reg, \tmp // actual cache line size
307 .endm
310 * icache_line_size - get the safe I-cache line size across all CPUs
312 .macro icache_line_size, reg, tmp
313 read_ctr \tmp
314 and \tmp, \tmp, #0xf // cache line size encoding
315 mov \reg, #4 // bytes per word
316 lsl \reg, \reg, \tmp // actual cache line size
317 .endm
320 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
322 .macro tcr_set_t0sz, valreg, t0sz
323 bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
324 .endm
327 * tcr_set_t1sz - update TCR.T1SZ
329 .macro tcr_set_t1sz, valreg, t1sz
330 bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
331 .endm
334 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
335 * ID_AA64MMFR0_EL1.PARange value
337 * tcr: register with the TCR_ELx value to be updated
338 * pos: IPS or PS bitfield position
339 * tmp{0,1}: temporary registers
341 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
342 mrs \tmp0, ID_AA64MMFR0_EL1
343 // Narrow PARange to fit the PS field in TCR_ELx
344 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
345 mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
346 cmp \tmp0, \tmp1
347 csel \tmp0, \tmp1, \tmp0, hi
348 bfi \tcr, \tmp0, \pos, #3
349 .endm
351 .macro __dcache_op_workaround_clean_cache, op, addr
352 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
353 dc \op, \addr
354 alternative_else
355 dc civac, \addr
356 alternative_endif
357 .endm
360 * Macro to perform a data cache maintenance for the interval
361 * [start, end) with dcache line size explicitly provided.
363 * op: operation passed to dc instruction
364 * domain: domain used in dsb instruciton
365 * start: starting virtual address of the region
366 * end: end virtual address of the region
367 * linesz: dcache line size
368 * fixup: optional label to branch to on user fault
369 * Corrupts: start, end, tmp
371 .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
372 sub \tmp, \linesz, #1
373 bic \start, \start, \tmp
374 .Ldcache_op\@:
375 .ifc \op, cvau
376 __dcache_op_workaround_clean_cache \op, \start
377 .else
378 .ifc \op, cvac
379 __dcache_op_workaround_clean_cache \op, \start
380 .else
381 .ifc \op, cvap
382 sys 3, c7, c12, 1, \start // dc cvap
383 .else
384 .ifc \op, cvadp
385 sys 3, c7, c13, 1, \start // dc cvadp
386 .else
387 dc \op, \start
388 .endif
389 .endif
390 .endif
391 .endif
392 add \start, \start, \linesz
393 cmp \start, \end
394 b.lo .Ldcache_op\@
395 dsb \domain
397 _cond_uaccess_extable .Ldcache_op\@, \fixup
398 .endm
401 * Macro to perform a data cache maintenance for the interval
402 * [start, end)
404 * op: operation passed to dc instruction
405 * domain: domain used in dsb instruciton
406 * start: starting virtual address of the region
407 * end: end virtual address of the region
408 * fixup: optional label to branch to on user fault
409 * Corrupts: start, end, tmp1, tmp2
411 .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
412 dcache_line_size \tmp1, \tmp2
413 dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
414 .endm
417 * Macro to perform an instruction cache maintenance for the interval
418 * [start, end)
420 * start, end: virtual addresses describing the region
421 * fixup: optional label to branch to on user fault
422 * Corrupts: tmp1, tmp2
424 .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
425 icache_line_size \tmp1, \tmp2
426 sub \tmp2, \tmp1, #1
427 bic \tmp2, \start, \tmp2
428 .Licache_op\@:
429 ic ivau, \tmp2 // invalidate I line PoU
430 add \tmp2, \tmp2, \tmp1
431 cmp \tmp2, \end
432 b.lo .Licache_op\@
433 dsb ish
436 _cond_uaccess_extable .Licache_op\@, \fixup
437 .endm
440 * load_ttbr1 - install @pgtbl as a TTBR1 page table
441 * pgtbl preserved
442 * tmp1/tmp2 clobbered, either may overlap with pgtbl
444 .macro load_ttbr1, pgtbl, tmp1, tmp2
445 phys_to_ttbr \tmp1, \pgtbl
446 offset_ttbr1 \tmp1, \tmp2
447 msr ttbr1_el1, \tmp1
449 .endm
452 * To prevent the possibility of old and new partial table walks being visible
453 * in the tlb, switch the ttbr to a zero page when we invalidate the old
454 * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
455 * Even switching to our copied tables will cause a changed output address at
456 * each stage of the walk.
458 .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
459 phys_to_ttbr \tmp, \zero_page
460 msr ttbr1_el1, \tmp
462 tlbi vmalle1
463 dsb nsh
464 load_ttbr1 \page_table, \tmp, \tmp2
465 .endm
468 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
470 .macro reset_pmuserenr_el0, tmpreg
471 mrs \tmpreg, id_aa64dfr0_el1
472 ubfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
473 cmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_NI
474 ccmp \tmpreg, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
475 b.eq 9000f // Skip if no PMU present or IMP_DEF
476 msr pmuserenr_el0, xzr // Disable PMU access from EL0
477 9000:
478 .endm
481 * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
483 .macro reset_amuserenr_el0, tmpreg
484 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
485 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
486 cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
487 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
488 .Lskip_\@:
489 .endm
491 * copy_page - copy src to dest using temp registers t1-t8
493 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
494 9998: ldp \t1, \t2, [\src]
495 ldp \t3, \t4, [\src, #16]
496 ldp \t5, \t6, [\src, #32]
497 ldp \t7, \t8, [\src, #48]
498 add \src, \src, #64
499 stnp \t1, \t2, [\dest]
500 stnp \t3, \t4, [\dest, #16]
501 stnp \t5, \t6, [\dest, #32]
502 stnp \t7, \t8, [\dest, #48]
503 add \dest, \dest, #64
504 tst \src, #(PAGE_SIZE - 1)
505 b.ne 9998b
506 .endm
509 * Annotate a function as being unsuitable for kprobes.
511 #ifdef CONFIG_KPROBES
512 #define NOKPROBE(x) \
513 .pushsection "_kprobe_blacklist", "aw"; \
514 .quad x; \
515 .popsection;
516 #else
517 #define NOKPROBE(x)
518 #endif
520 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
521 #define EXPORT_SYMBOL_NOKASAN(name)
522 #else
523 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
524 #endif
527 * Emit a 64-bit absolute little endian symbol reference in a way that
528 * ensures that it will be resolved at build time, even when building a
529 * PIE binary. This requires cooperation from the linker script, which
530 * must emit the lo32/hi32 halves individually.
532 .macro le64sym, sym
533 .long \sym\()_lo32
534 .long \sym\()_hi32
535 .endm
538 * mov_q - move an immediate constant into a 64-bit register using
539 * between 2 and 4 movz/movk instructions (depending on the
540 * magnitude and sign of the operand)
542 .macro mov_q, reg, val
543 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
544 movz \reg, :abs_g1_s:\val
545 .else
546 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
547 movz \reg, :abs_g2_s:\val
548 .else
549 movz \reg, :abs_g3:\val
550 movk \reg, :abs_g2_nc:\val
551 .endif
552 movk \reg, :abs_g1_nc:\val
553 .endif
554 movk \reg, :abs_g0_nc:\val
555 .endm
558 * Return the current task_struct.
560 .macro get_current_task, rd
561 mrs \rd, sp_el0
562 .endm
565 * If the kernel is built for 52-bit virtual addressing but the hardware only
566 * supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
567 * but we have to add an offset so that the TTBR1 address corresponds with the
568 * pgdir entry that covers the lowest 48-bit addressable VA.
570 * Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an
571 * additional paging level, and on LPA2/16k pages, we would end up with a root
572 * level table with only 2 entries, which is suboptimal in terms of TLB
573 * utilization, so there we fall back to 47 bits of translation if LPA2 is not
574 * supported.
576 * orr is used as it can cover the immediate value (and is idempotent).
577 * ttbr: Value of ttbr to set, modified.
579 .macro offset_ttbr1, ttbr, tmp
580 #if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
581 mrs \tmp, tcr_el1
582 and \tmp, \tmp, #TCR_T1SZ_MASK
583 cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
584 orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
585 csel \ttbr, \tmp, \ttbr, eq
586 #endif
587 .endm
590 * Arrange a physical address in a TTBR register, taking care of 52-bit
591 * addresses.
593 * phys: physical address, preserved
594 * ttbr: returns the TTBR value
596 .macro phys_to_ttbr, ttbr, phys
597 #ifdef CONFIG_ARM64_PA_BITS_52
598 orr \ttbr, \phys, \phys, lsr #46
599 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
600 #else
601 mov \ttbr, \phys
602 #endif
603 .endm
605 .macro phys_to_pte, pte, phys
606 #ifdef CONFIG_ARM64_PA_BITS_52
607 orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
608 and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
609 #else
610 mov \pte, \phys
611 #endif
612 .endm
615 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
617 .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
618 #ifdef CONFIG_FUJITSU_ERRATUM_010001
619 mrs \tmp1, midr_el1
621 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
622 and \tmp1, \tmp1, \tmp2
623 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
624 cmp \tmp1, \tmp2
625 b.ne 10f
627 mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
628 bic \tcr, \tcr, \tmp2
630 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
631 .endm
634 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
635 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
637 .macro pre_disable_mmu_workaround
638 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
640 #endif
641 .endm
644 * frame_push - Push @regcount callee saved registers to the stack,
645 * starting at x19, as well as x29/x30, and set x29 to
646 * the new value of sp. Add @extra bytes of stack space
647 * for locals.
649 .macro frame_push, regcount:req, extra
650 __frame st, \regcount, \extra
651 .endm
654 * frame_pop - Pop the callee saved registers from the stack that were
655 * pushed in the most recent call to frame_push, as well
656 * as x29/x30 and any extra stack space that may have been
657 * allocated.
659 .macro frame_pop
660 __frame ld
661 .endm
663 .macro __frame_regs, reg1, reg2, op, num
664 .if .Lframe_regcount == \num
665 \op\()r \reg1, [sp, #(\num + 1) * 8]
666 .elseif .Lframe_regcount > \num
667 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
668 .endif
669 .endm
671 .macro __frame, op, regcount, extra=0
672 .ifc \op, st
673 .if (\regcount) < 0 || (\regcount) > 10
674 .error "regcount should be in the range [0 ... 10]"
675 .endif
676 .if ((\extra) % 16) != 0
677 .error "extra should be a multiple of 16 bytes"
678 .endif
679 .ifdef .Lframe_regcount
680 .if .Lframe_regcount != -1
681 .error "frame_push/frame_pop may not be nested"
682 .endif
683 .endif
684 .set .Lframe_regcount, \regcount
685 .set .Lframe_extra, \extra
686 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
687 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
688 mov x29, sp
689 .endif
691 __frame_regs x19, x20, \op, 1
692 __frame_regs x21, x22, \op, 3
693 __frame_regs x23, x24, \op, 5
694 __frame_regs x25, x26, \op, 7
695 __frame_regs x27, x28, \op, 9
697 .ifc \op, ld
698 .if .Lframe_regcount == -1
699 .error "frame_push/frame_pop may not be nested"
700 .endif
701 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
702 .set .Lframe_regcount, -1
703 .endif
704 .endm
707 * Set SCTLR_ELx to the @reg value, and invalidate the local icache
708 * in the process. This is called when setting the MMU on.
710 .macro set_sctlr, sreg, reg
711 msr \sreg, \reg
714 * Invalidate the local I-cache so that any instructions fetched
715 * speculatively from the PoC are discarded, since they may have
716 * been dynamically patched at the PoU.
718 ic iallu
719 dsb nsh
721 .endm
723 .macro set_sctlr_el1, reg
724 set_sctlr sctlr_el1, \reg
725 .endm
727 .macro set_sctlr_el2, reg
728 set_sctlr sctlr_el2, \reg
729 .endm
732 * Check whether asm code should yield as soon as it is able. This is
733 * the case if we are currently running in task context, and the
734 * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
735 * is stored negated in the top word of the thread_info::preempt_count
736 * field)
738 .macro cond_yield, lbl:req, tmp:req, tmp2
739 #ifdef CONFIG_PREEMPT_VOLUNTARY
740 get_current_task \tmp
741 ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
743 * If we are serving a softirq, there is no point in yielding: the
744 * softirq will not be preempted no matter what we do, so we should
745 * run to completion as quickly as we can. The preempt_count field will
746 * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
747 * catch this case too.
749 cbz \tmp, \lbl
750 #endif
751 .endm
754 * Branch Target Identifier (BTI)
756 .macro bti, targets
757 .equ .L__bti_targets_c, 34
758 .equ .L__bti_targets_j, 36
759 .equ .L__bti_targets_jc,38
760 hint #.L__bti_targets_\targets
761 .endm
764 * This macro emits a program property note section identifying
765 * architecture features which require special handling, mainly for
766 * use in assembly files included in the VDSO.
769 #define NT_GNU_PROPERTY_TYPE_0 5
770 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
772 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
773 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
775 #ifdef CONFIG_ARM64_BTI_KERNEL
776 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \
777 ((GNU_PROPERTY_AARCH64_FEATURE_1_BTI | \
778 GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
779 #endif
781 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
782 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
783 .pushsection .note.gnu.property, "a"
784 .align 3
785 .long 2f - 1f
786 .long 6f - 3f
787 .long NT_GNU_PROPERTY_TYPE_0
788 1: .string "GNU"
790 .align 3
791 3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
792 .long 5f - 4f
795 * This is described with an array of char in the Linux API
796 * spec but the text and all other usage (including binutils,
797 * clang and GCC) treat this as a 32 bit value so no swizzling
798 * is required for big endian.
800 .long \feat
802 .align 3
804 .popsection
805 .endm
807 #else
808 .macro emit_aarch64_feature_1_and, feat=0
809 .endm
811 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
813 .macro __mitigate_spectre_bhb_loop tmp
814 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
815 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
816 mov \tmp, #32 // Patched to correct the immediate
817 alternative_cb_end
818 .Lspectre_bhb_loop\@:
819 b . + 4
820 subs \tmp, \tmp, #1
821 b.ne .Lspectre_bhb_loop\@
823 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
824 .endm
826 .macro mitigate_spectre_bhb_loop tmp
827 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
828 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
829 b .L_spectre_bhb_loop_done\@ // Patched to NOP
830 alternative_cb_end
831 __mitigate_spectre_bhb_loop \tmp
832 .L_spectre_bhb_loop_done\@:
833 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
834 .endm
836 /* Save/restores x0-x3 to the stack */
837 .macro __mitigate_spectre_bhb_fw
838 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
839 stp x0, x1, [sp, #-16]!
840 stp x2, x3, [sp, #-16]!
841 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
842 alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
843 nop // Patched to SMC/HVC #0
844 alternative_cb_end
845 ldp x2, x3, [sp], #16
846 ldp x0, x1, [sp], #16
847 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
848 .endm
850 .macro mitigate_spectre_bhb_clear_insn
851 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
852 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
853 /* Patched to NOP when not supported */
854 clearbhb
856 alternative_cb_end
857 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
858 .endm
859 #endif /* __ASM_ASSEMBLER_H */