1 #include <linux/errno.h>
2 #include <linux/linkage.h>
3 #include <asm/asm-offsets.h>
4 #include <asm/assembler.h>
8 * Implementation of MPIDR_EL1 hash algorithm through shifting
11 * @dst: register containing hash result
12 * @rs0: register containing affinity level 0 bit shift
13 * @rs1: register containing affinity level 1 bit shift
14 * @rs2: register containing affinity level 2 bit shift
15 * @rs3: register containing affinity level 3 bit shift
16 * @mpidr: register containing MPIDR_EL1 value
17 * @mask: register containing MPIDR mask
23 *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
24 * u32 aff0, aff1, aff2, aff3;
25 * u64 mpidr_masked = mpidr & mask;
26 * aff0 = mpidr_masked & 0xff;
27 * aff1 = mpidr_masked & 0xff00;
28 * aff2 = mpidr_masked & 0xff0000;
29 * aff2 = mpidr_masked & 0xff00000000;
30 * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
32 * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
33 * Output register: dst
34 * Note: input and output registers must be disjoint register sets
35 (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
37 .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
38 and \mpidr, \mpidr, \mask // mask out MPIDR bits
39 and \dst, \mpidr, #0xff // mask=aff0
40 lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
41 and \mask, \mpidr, #0xff00 // mask = aff1
42 lsr \mask ,\mask, \rs1
43 orr \dst, \dst, \mask // dst|=(aff1>>rs1)
44 and \mask, \mpidr, #0xff0000 // mask = aff2
45 lsr \mask ,\mask, \rs2
46 orr \dst, \dst, \mask // dst|=(aff2>>rs2)
47 and \mask, \mpidr, #0xff00000000 // mask = aff3
48 lsr \mask ,\mask, \rs3
49 orr \dst, \dst, \mask // dst|=(aff3>>rs3)
52 * Save CPU state for a suspend and execute the suspend finisher.
53 * On success it will return 0 through cpu_resume - ie through a CPU
54 * soft/hard reboot from the reset vector.
55 * On failure it returns the suspend finisher return value or force
56 * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
57 * is not allowed to return, if it does this must be considered failure).
58 * It saves callee registers, and allocates space on the kernel stack
59 * to save the CPU specific registers + some other data for resume.
61 * x0 = suspend finisher argument
62 * x1 = suspend finisher function pointer
64 ENTRY(__cpu_suspend_enter)
65 stp x29, lr, [sp, #-96]!
66 stp x19, x20, [sp,#16]
67 stp x21, x22, [sp,#32]
68 stp x23, x24, [sp,#48]
69 stp x25, x26, [sp,#64]
70 stp x27, x28, [sp,#80]
72 * Stash suspend finisher and its argument in x20 and x19
77 sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
80 * x0 now points to struct cpu_suspend_ctx allocated on the stack
82 str x2, [x0, #CPU_CTX_SP]
83 ldr x1, =sleep_save_sp
84 ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
87 ldr x10, [x9, #MPIDR_HASH_MASK]
89 * Following code relies on the struct mpidr_hash
92 ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
93 ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
94 compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
95 add x1, x1, x8, lsl #3
98 * Grab suspend finisher in x20 and its argument in x19
103 * We are ready for power down, fire off the suspend finisher
104 * in x1, with argument in x0
108 * Never gets here, unless suspend finisher fails.
109 * Successful cpu_suspend should return from cpu_resume, returning
110 * through this code path is considered an error
111 * If the return value is set to 0 force x0 = -EOPNOTSUPP
112 * to make sure a proper error condition is propagated
117 add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
118 ldp x19, x20, [sp, #16]
119 ldp x21, x22, [sp, #32]
120 ldp x23, x24, [sp, #48]
121 ldp x25, x26, [sp, #64]
122 ldp x27, x28, [sp, #80]
123 ldp x29, lr, [sp], #96
125 ENDPROC(__cpu_suspend_enter)
129 * x0 must contain the sctlr value retrieved from restored context
131 .pushsection ".idmap.text", "ax"
132 ENTRY(cpu_resume_mmu)
133 ldr x3, =cpu_resume_after_mmu
134 msr sctlr_el1, x0 // restore sctlr_el1
137 * Invalidate the local I-cache so that any instructions fetched
138 * speculatively from the PoC are discarded, since they may have
139 * been dynamically patched at the PoU.
144 br x3 // global jump to virtual address
145 ENDPROC(cpu_resume_mmu)
147 cpu_resume_after_mmu:
148 mov x0, #0 // return zero on success
149 ldp x19, x20, [sp, #16]
150 ldp x21, x22, [sp, #32]
151 ldp x23, x24, [sp, #48]
152 ldp x25, x26, [sp, #64]
153 ldp x27, x28, [sp, #80]
154 ldp x29, lr, [sp], #96
156 ENDPROC(cpu_resume_after_mmu)
159 bl el2_setup // if in EL2 drop to EL1 cleanly
162 add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
163 /* retrieve mpidr_hash members to compute the hash */
164 ldr x2, [x8, #MPIDR_HASH_MASK]
165 ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
166 ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
167 compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
168 /* x7 contains hash index, let's use it to grab context pointer */
169 ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
170 ldr x0, [x0, x7, lsl #3]
171 /* load sp from context */
172 ldr x2, [x0, #CPU_CTX_SP]
173 /* load physical address of identity map page table in x1 */
174 adrp x1, idmap_pg_dir
177 * cpu_do_resume expects x0 to contain context physical address
178 * pointer and x1 to contain physical address of 1:1 page tables
180 bl cpu_do_resume // PC relative jump, MMU off
181 b cpu_resume_mmu // Resume MMU, never returns