1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
5 #include <asm/book3s/32/mmu-hash.h>
9 .macro kuep_update_sr gpr1
, gpr2
/* NEVER use r0 as gpr2 due to addis */
10 101: mtsrin \gpr1
, \gpr2
11 addi \gpr1
, \gpr1
, 0x111 /* next VSID */
12 rlwinm \gpr1
, \gpr1
, 0, 0xf0ffffff /* clear VSID overflow */
13 addis \gpr2
, \gpr2
, 0x1000 /* address of next segment */
18 .macro kuep_lock gpr1
, gpr2
19 #ifdef CONFIG_PPC_KUEP
20 li \gpr1
, NUM_USER_SEGMENTS
24 oris \gpr1
, \gpr1
, SR_NX@h
/* set Nx */
25 kuep_update_sr \gpr1
, \gpr2
29 .macro kuep_unlock gpr1
, gpr2
30 #ifdef CONFIG_PPC_KUEP
31 li \gpr1
, NUM_USER_SEGMENTS
35 rlwinm \gpr1
, \gpr1
, 0, ~SR_NX
/* Clear Nx */
36 kuep_update_sr \gpr1
, \gpr2
40 #ifdef CONFIG_PPC_KUAP
42 .macro kuap_update_sr gpr1
, gpr2
, gpr3
/* NEVER use r0 as gpr2 due to addis */
43 101: mtsrin \gpr1
, \gpr2
44 addi \gpr1
, \gpr1
, 0x111 /* next VSID */
45 rlwinm \gpr1
, \gpr1
, 0, 0xf0ffffff /* clear VSID overflow */
46 addis \gpr2
, \gpr2
, 0x1000 /* address of next segment */
52 .macro kuap_save_and_lock sp
, thread
, gpr1
, gpr2
, gpr3
53 lwz \gpr2
, KUAP(\thread
)
54 rlwinm
. \gpr3
, \gpr2
, 28, 0xf0000000
55 stw \gpr2
, STACK_REGS_KUAP(\sp
)
58 stw \gpr1
, KUAP(\thread
)
60 oris \gpr1
, \gpr1
, SR_KS@h
/* set Ks */
61 kuap_update_sr \gpr1
, \gpr2
, \gpr3
65 .macro kuap_restore sp
, current
, gpr1
, gpr2
, gpr3
66 lwz \gpr2
, STACK_REGS_KUAP(\sp
)
67 rlwinm
. \gpr3
, \gpr2
, 28, 0xf0000000
68 stw \gpr2
, THREAD
+ KUAP(\current
)
71 rlwinm \gpr1
, \gpr1
, 0, ~SR_KS
/* Clear Ks */
72 kuap_update_sr \gpr1
, \gpr2
, \gpr3
76 .macro kuap_check current
, gpr
77 #ifdef CONFIG_PPC_KUAP_DEBUG
78 lwz \gpr2
, KUAP(thread
)
80 EMIT_BUG_ENTRY
999b
, __FILE__
, __LINE__
, (BUGFLAG_WARNING
| BUGFLAG_ONCE
)
84 #endif /* CONFIG_PPC_KUAP */
86 #else /* !__ASSEMBLY__ */
88 #ifdef CONFIG_PPC_KUAP
90 #include <linux/sched.h>
92 static inline void kuap_update_sr(u32 sr
, u32 addr
, u32 end
)
94 addr
&= 0xf0000000; /* align addr to start of segment */
95 barrier(); /* make sure thread.kuap is updated before playing with SRs */
98 sr
+= 0x111; /* next VSID */
99 sr
&= 0xf0ffffff; /* clear VSID overflow */
100 addr
+= 0x10000000; /* address of next segment */
102 isync(); /* Context sync required after mtsrin() */
105 static inline void allow_user_access(void __user
*to
, const void __user
*from
, u32 size
)
109 if (__builtin_constant_p(to
) && to
== NULL
)
112 addr
= (__force u32
)to
;
114 if (!addr
|| addr
>= TASK_SIZE
|| !size
)
117 end
= min(addr
+ size
, TASK_SIZE
);
118 current
->thread
.kuap
= (addr
& 0xf0000000) | ((((end
- 1) >> 28) + 1) & 0xf);
119 kuap_update_sr(mfsrin(addr
) & ~SR_KS
, addr
, end
); /* Clear Ks */
122 static inline void prevent_user_access(void __user
*to
, const void __user
*from
, u32 size
)
124 u32 addr
= (__force u32
)to
;
125 u32 end
= min(addr
+ size
, TASK_SIZE
);
127 if (!addr
|| addr
>= TASK_SIZE
|| !size
)
130 current
->thread
.kuap
= 0;
131 kuap_update_sr(mfsrin(addr
) | SR_KS
, addr
, end
); /* set Ks */
134 static inline bool bad_kuap_fault(struct pt_regs
*regs
, bool is_write
)
139 return WARN(!regs
->kuap
, "Bug: write fault blocked by segment registers !");
142 #endif /* CONFIG_PPC_KUAP */
144 #endif /* __ASSEMBLY__ */
146 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */