1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* thread_info.h: common low-level thread information accessors
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds
8 #ifndef _LINUX_THREAD_INFO_H
9 #define _LINUX_THREAD_INFO_H
11 #include <linux/types.h>
12 #include <linux/limits.h>
13 #include <linux/bug.h>
14 #include <linux/restart_block.h>
15 #include <linux/errno.h>
17 #ifdef CONFIG_THREAD_INFO_IN_TASK
19 * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20 * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21 * including <asm/current.h> can cause a circular dependency on some platforms.
23 #include <asm/current.h>
24 #define current_thread_info() ((struct thread_info *)current)
27 #include <linux/bitops.h>
30 * For per-arch arch_within_stack_frames() implementations, defined in
40 #ifdef CONFIG_GENERIC_ENTRY
41 enum syscall_work_bit
{
42 SYSCALL_WORK_BIT_SECCOMP
,
43 SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT
,
44 SYSCALL_WORK_BIT_SYSCALL_TRACE
,
45 SYSCALL_WORK_BIT_SYSCALL_EMU
,
46 SYSCALL_WORK_BIT_SYSCALL_AUDIT
,
47 SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH
,
48 SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP
,
51 #define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
52 #define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53 #define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54 #define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55 #define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56 #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57 #define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
60 #include <asm/thread_info.h>
62 #ifndef TIF_NEED_RESCHED_LAZY
63 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
64 #error Inconsistent PREEMPT_LAZY
66 #define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
67 #define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
72 #ifndef arch_set_restart_data
73 #define arch_set_restart_data(restart) do { } while (0)
76 static inline long set_restart_fn(struct restart_block
*restart
,
77 long (*fn
)(struct restart_block
*))
80 arch_set_restart_data(restart
);
81 return -ERESTART_RESTARTBLOCK
;
85 #define THREAD_ALIGN THREAD_SIZE
88 #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
91 * flag set/clear/test wrappers
92 * - pass TIF_xxxx constants to these functions
95 static inline void set_ti_thread_flag(struct thread_info
*ti
, int flag
)
97 set_bit(flag
, (unsigned long *)&ti
->flags
);
100 static inline void clear_ti_thread_flag(struct thread_info
*ti
, int flag
)
102 clear_bit(flag
, (unsigned long *)&ti
->flags
);
105 static inline void update_ti_thread_flag(struct thread_info
*ti
, int flag
,
109 set_ti_thread_flag(ti
, flag
);
111 clear_ti_thread_flag(ti
, flag
);
114 static inline int test_and_set_ti_thread_flag(struct thread_info
*ti
, int flag
)
116 return test_and_set_bit(flag
, (unsigned long *)&ti
->flags
);
119 static inline int test_and_clear_ti_thread_flag(struct thread_info
*ti
, int flag
)
121 return test_and_clear_bit(flag
, (unsigned long *)&ti
->flags
);
124 static inline int test_ti_thread_flag(struct thread_info
*ti
, int flag
)
126 return test_bit(flag
, (unsigned long *)&ti
->flags
);
130 * This may be used in noinstr code, and needs to be __always_inline to prevent
131 * inadvertent instrumentation.
133 static __always_inline
unsigned long read_ti_thread_flags(struct thread_info
*ti
)
135 return READ_ONCE(ti
->flags
);
138 #define set_thread_flag(flag) \
139 set_ti_thread_flag(current_thread_info(), flag)
140 #define clear_thread_flag(flag) \
141 clear_ti_thread_flag(current_thread_info(), flag)
142 #define update_thread_flag(flag, value) \
143 update_ti_thread_flag(current_thread_info(), flag, value)
144 #define test_and_set_thread_flag(flag) \
145 test_and_set_ti_thread_flag(current_thread_info(), flag)
146 #define test_and_clear_thread_flag(flag) \
147 test_and_clear_ti_thread_flag(current_thread_info(), flag)
148 #define test_thread_flag(flag) \
149 test_ti_thread_flag(current_thread_info(), flag)
150 #define read_thread_flags() \
151 read_ti_thread_flags(current_thread_info())
153 #define read_task_thread_flags(t) \
154 read_ti_thread_flags(task_thread_info(t))
156 #ifdef CONFIG_GENERIC_ENTRY
157 #define set_syscall_work(fl) \
158 set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
159 #define test_syscall_work(fl) \
160 test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
161 #define clear_syscall_work(fl) \
162 clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
164 #define set_task_syscall_work(t, fl) \
165 set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
166 #define test_task_syscall_work(t, fl) \
167 test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
168 #define clear_task_syscall_work(t, fl) \
169 clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
171 #else /* CONFIG_GENERIC_ENTRY */
173 #define set_syscall_work(fl) \
174 set_ti_thread_flag(current_thread_info(), TIF_##fl)
175 #define test_syscall_work(fl) \
176 test_ti_thread_flag(current_thread_info(), TIF_##fl)
177 #define clear_syscall_work(fl) \
178 clear_ti_thread_flag(current_thread_info(), TIF_##fl)
180 #define set_task_syscall_work(t, fl) \
181 set_ti_thread_flag(task_thread_info(t), TIF_##fl)
182 #define test_task_syscall_work(t, fl) \
183 test_ti_thread_flag(task_thread_info(t), TIF_##fl)
184 #define clear_task_syscall_work(t, fl) \
185 clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
186 #endif /* !CONFIG_GENERIC_ENTRY */
188 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
190 static __always_inline
bool tif_test_bit(int bit
)
192 return arch_test_bit(bit
,
193 (unsigned long *)(¤t_thread_info()->flags
));
198 static __always_inline
bool tif_test_bit(int bit
)
201 (unsigned long *)(¤t_thread_info()->flags
));
204 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
206 static __always_inline
bool tif_need_resched(void)
208 return tif_test_bit(TIF_NEED_RESCHED
);
211 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
212 static inline int arch_within_stack_frames(const void * const stack
,
213 const void * const stackend
,
214 const void *obj
, unsigned long len
)
220 #ifdef CONFIG_HARDENED_USERCOPY
221 extern void __check_object_size(const void *ptr
, unsigned long n
,
224 static __always_inline
void check_object_size(const void *ptr
, unsigned long n
,
227 if (!__builtin_constant_p(n
))
228 __check_object_size(ptr
, n
, to_user
);
231 static inline void check_object_size(const void *ptr
, unsigned long n
,
234 #endif /* CONFIG_HARDENED_USERCOPY */
236 extern void __compiletime_error("copy source size is too small")
237 __bad_copy_from(void);
238 extern void __compiletime_error("copy destination size is too small")
241 void __copy_overflow(int size
, unsigned long count
);
243 static inline void copy_overflow(int size
, unsigned long count
)
245 if (IS_ENABLED(CONFIG_BUG
))
246 __copy_overflow(size
, count
);
249 static __always_inline __must_check
bool
250 check_copy_size(const void *addr
, size_t bytes
, bool is_source
)
252 int sz
= __builtin_object_size(addr
, 0);
253 if (unlikely(sz
>= 0 && sz
< bytes
)) {
254 if (!__builtin_constant_p(bytes
))
255 copy_overflow(sz
, bytes
);
262 if (WARN_ON_ONCE(bytes
> INT_MAX
))
264 check_object_size(addr
, bytes
, is_source
);
268 #ifndef arch_setup_new_exec
269 static inline void arch_setup_new_exec(void) { }
272 void arch_task_cache_init(void); /* for CONFIG_SH */
273 void arch_release_task_struct(struct task_struct
*tsk
);
274 int arch_dup_task_struct(struct task_struct
*dst
,
275 struct task_struct
*src
);
277 #endif /* __KERNEL__ */
279 #endif /* _LINUX_THREAD_INFO_H */