1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 #ifndef _ASM_PROCESSOR_H
6 #define _ASM_PROCESSOR_H
8 #include <linux/atomic.h>
9 #include <linux/cpumask.h>
10 #include <linux/sizes.h>
13 #include <asm/cpu-info.h>
14 #include <asm/hw_breakpoint.h>
15 #include <asm/loongarch.h>
16 #include <asm/vdso/processor.h>
17 #include <uapi/asm/ptrace.h>
18 #include <uapi/asm/sigcontext.h>
22 #define TASK_SIZE 0x80000000UL
23 #define TASK_SIZE_MIN TASK_SIZE
24 #define STACK_TOP_MAX TASK_SIZE
26 #define TASK_IS_32BIT_ADDR 1
32 #define TASK_SIZE32 0x100000000UL
33 #define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
35 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
36 #define TASK_SIZE_MIN TASK_SIZE32
37 #define STACK_TOP_MAX TASK_SIZE64
39 #define TASK_SIZE_OF(tsk) \
40 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
42 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
46 #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
48 unsigned long stack_top(void);
49 #define STACK_TOP stack_top()
52 * This decides where the kernel will search for a free chunk of vm
53 * space during mmap's.
55 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
57 #define FPU_REG_WIDTH 256
58 #define FPU_ALIGN __attribute__((aligned(32)))
61 __u32 val32
[FPU_REG_WIDTH
/ 32];
62 __u64 val64
[FPU_REG_WIDTH
/ 64];
65 #define FPR_IDX(width, idx) (idx)
67 #define BUILD_FPR_ACCESS(width) \
68 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
70 return fpr->val##width[FPR_IDX(width, idx)]; \
73 static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
76 fpr->val##width[FPR_IDX(width, idx)] = val; \
82 struct loongarch_fpu
{
83 uint64_t fcc
; /* 8x8 */
86 union fpureg fpr
[NUM_FPU_REGS
];
89 struct loongarch_lbt
{
90 /* Scratch registers */
99 #define INIT_CPUMASK { \
103 #define ARCH_MIN_TASKALIGN 32
105 struct loongarch_vdso_info
;
108 * If you change thread_struct remember to change the #defines below too!
110 struct thread_struct
{
111 /* Main processor registers. */
112 unsigned long reg01
, reg03
, reg22
; /* ra sp fp */
113 unsigned long reg23
, reg24
, reg25
, reg26
; /* s0-s3 */
114 unsigned long reg27
, reg28
, reg29
, reg30
, reg31
; /* s4-s8 */
116 /* __schedule() return address / call frame address */
117 unsigned long sched_ra
;
118 unsigned long sched_cfa
;
121 unsigned long csr_prmd
;
122 unsigned long csr_crmd
;
123 unsigned long csr_euen
;
124 unsigned long csr_ecfg
;
125 unsigned long csr_badvaddr
; /* Last user fault */
127 /* Other stuff associated with the thread. */
128 unsigned long trap_nr
;
129 unsigned long error_code
;
130 unsigned long single_step
; /* Used by PTRACE_SINGLESTEP */
131 struct loongarch_vdso_info
*vdso
;
134 * FPU & vector registers, must be at the last of inherited
135 * context because they are conditionally copied at fork().
137 struct loongarch_fpu fpu FPU_ALIGN
;
138 struct loongarch_lbt lbt
; /* Also conditionally copied */
140 /* Hardware breakpoints pinned to this task. */
141 struct perf_event
*hbp_break
[LOONGARCH_MAX_BRP
];
142 struct perf_event
*hbp_watch
[LOONGARCH_MAX_WRP
];
145 #define thread_saved_ra(tsk) (tsk->thread.sched_ra)
146 #define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
148 #define INIT_THREAD { \
150 * Main processor registers \
172 * Other stuff associated with the process \
177 * FPU & vector registers \
191 enum idle_boot_override
{IDLE_NO_OVERRIDE
= 0, IDLE_HALT
, IDLE_NOMWAIT
, IDLE_POLL
};
193 extern unsigned long boot_option_idle_override
;
195 * Do necessary setup to start up a newly executed thread.
197 extern void start_thread(struct pt_regs
*regs
, unsigned long pc
, unsigned long sp
);
199 unsigned long __get_wchan(struct task_struct
*p
);
201 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
202 THREAD_SIZE - sizeof(struct pt_regs))
203 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
204 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
205 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
206 #define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
207 #define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
209 #define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
211 #ifdef CONFIG_CPU_HAS_PREFETCH
213 #define ARCH_HAS_PREFETCH
214 #define prefetch(x) __builtin_prefetch((x), 0, 1)
216 #define ARCH_HAS_PREFETCHW
217 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
221 #endif /* _ASM_PROCESSOR_H */