revert between 56095 -> 55830 in arch
[AROS.git] / arch / all-pc / exec / exec_platform.h
blobbc0299dc9e19462ecc8899a7bbd497d2793a72a2
1 /*
2 Copyright © 2017, The AROS Development Team. All rights reserved.
3 $Id$
4 */
5 #ifndef __EXEC_PLATFORM_H
6 #define __EXEC_PLATFORM_H
8 // needed to determine if this is an smp build
9 #include <aros/config.h>
10 #include <aros/atomic.h>
12 #ifndef __KERNEL_NOLIBBASE__
13 #define __KERNEL_NOLIBBASE__
14 #endif /* !__KERNEL_NOLIBBASE__ */
15 #include <proto/kernel.h>
17 #if (__WORDSIZE==64)
18 #define EXEC_REMTASK_NEEDSSWITCH
19 #endif
20 #if defined (__AROSEXEC_SMP__)
21 #define SCHEDQUANTUM_VALUE 10
22 #define SCHEDGRAN_VALUE 1
23 #else
24 #define SCHEDQUANTUM_VALUE 4
25 #define SCHEDGRAN_VALUE 1
26 #endif
28 #include "kernel_base.h"
30 #if defined(__AROSEXEC_SMP__)
31 #include "kernel_intern.h"
32 #include <aros/types/spinlock_s.h>
33 #include <utility/hooks.h>
35 #include "tls.h"
36 #include "etask.h"
38 /* special flag to get the scheduling code to unspin tasks */
39 #define TS_UNSPIN 0x10
41 extern struct Hook Exec_TaskSpinLockFailHook;
42 extern struct Hook Exec_TaskSpinLockForbidHook;
43 extern struct Hook Exec_TaskSpinLockDisableHook;
44 extern void Exec_TaskSpinUnlock(spinlock_t *);
46 struct ExecSpinSCData
48 spinlock_t *lock_ptr;
49 struct Hook *lock_obtainhook;
50 struct Hook *lock_failhook;
51 ULONG lock_mode;
54 struct Exec_PlatformData
56 spinlock_t *(*SpinLockCall)(spinlock_t *, struct Hook *, struct Hook *, ULONG);
59 #ifndef __KERNEL_NO_SPINLOCK_PROTOS__
60 extern void Kernel_49_KrnSpinInit(spinlock_t *, void *);
61 extern spinlock_t *Kernel_52_KrnSpinLock(spinlock_t *, struct Hook *, ULONG, void *);
62 extern void Kernel_53_KrnSpinUnLock(spinlock_t *, void *);
63 #endif
65 #define EXEC_SPINLOCK_INIT(a) Kernel_49_KrnSpinInit((a), NULL)
66 #define EXEC_SPINLOCK_LOCK(a,b,c) Kernel_52_KrnSpinLock((a), (b), (c), NULL)
67 #define EXEC_SPINLOCK_UNLOCK(a) Kernel_53_KrnSpinUnLock((a), NULL)
69 #if defined(AROS_NO_ATOMIC_OPERATIONS)
70 #define IDNESTCOUNT_INC \
71 do { \
72 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
73 if (__schd) \
74 __schd->IDNestCnt++; \
75 } while(0)
76 #define IDNESTCOUNT_DEC \
77 do { \
78 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
79 if (__schd) \
80 __schd->IDNestCnt--; \
81 } while(0)
82 #define TDNESTCOUNT_INC \
83 do { \
84 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
85 if (__schd) \
86 __schd->TDNestCnt++; \
87 } while(0)
88 #define TDNESTCOUNT_DEC \
89 do { \
90 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
91 if (__schd) \
92 __schd->TDNestCnt--; \
93 } while(0)
94 #define FLAG_SCHEDQUANTUM_CLEAR \
95 do { \
96 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
97 if (__schd) \
98 __schd->ScheduleFlags &= ~TLSSF_Quantum; \
99 } while(0)
100 #define FLAG_SCHEDQUANTUM_SET \
101 do { \
102 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
103 if (__schd) \
104 __schd->ScheduleFlags |= TLSSF_Quantum; \
105 } while(0)
106 #define FLAG_SCHEDSWITCH_CLEAR \
107 do { \
108 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
109 if (__schd) \
110 __schd->ScheduleFlags &= ~TLSSF_Switch; \
111 } while(0)
112 #define FLAG_SCHEDSWITCH_SET \
113 do { \
114 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
115 if (__schd) \
116 __schd->ScheduleFlags |= TLSSF_Switch; \
117 } while(0)
118 #define FLAG_SCHEDDISPATCH_CLEAR \
119 do { \
120 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
121 if (__schd) \
122 __schd->ScheduleFlags &= ~TLSSF_Dispatch; \
123 } while(0)
124 #define FLAG_SCHEDDISPATCH_SET \
125 do { \
126 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
127 if (__schd) \
128 __schd->ScheduleFlags |= TLSSF_Dispatch; \
129 } while(0)
130 #else /* !AROS_NO_ATOMIC_OPERATIONS */
131 #define IDNESTCOUNT_INC \
132 do { \
133 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
134 if (__schd) \
135 __AROS_ATOMIC_INC_B(__schd->IDNestCnt); \
136 } while(0)
137 #define IDNESTCOUNT_DEC \
138 do { \
139 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
140 if (__schd) \
141 __AROS_ATOMIC_DEC_B(__schd->IDNestCnt); \
142 } while(0)
143 #define TDNESTCOUNT_INC \
144 do { \
145 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
146 if (__schd) \
147 __AROS_ATOMIC_INC_B(__schd->TDNestCnt); \
148 } while(0)
149 #define TDNESTCOUNT_DEC \
150 do { \
151 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
152 if (__schd) \
153 __AROS_ATOMIC_DEC_B(__schd->TDNestCnt); \
154 } while(0)
155 #define FLAG_SCHEDQUANTUM_CLEAR \
156 do { \
157 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
158 if (__schd) \
159 __AROS_ATOMIC_AND_L(__schd->ScheduleFlags, ~TLSSF_Quantum); \
160 } while(0)
161 #define FLAG_SCHEDQUANTUM_SET \
162 do { \
163 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
164 if (__schd) \
165 __AROS_ATOMIC_OR_L(__schd->ScheduleFlags, TLSSF_Quantum); \
166 } while(0)
167 #define FLAG_SCHEDSWITCH_CLEAR \
168 do { \
169 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
170 if (__schd) \
171 __AROS_ATOMIC_AND_L(__schd->ScheduleFlags, ~TLSSF_Switch); \
172 } while(0)
173 #define FLAG_SCHEDSWITCH_SET \
174 do { \
175 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
176 if (__schd) \
177 __AROS_ATOMIC_OR_L(__schd->ScheduleFlags, TLSSF_Switch); \
178 } while(0)
179 #define FLAG_SCHEDDISPATCH_CLEAR \
180 do { \
181 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
182 if (__schd) \
183 __AROS_ATOMIC_AND_L(__schd->ScheduleFlags, ~TLSSF_Dispatch); \
184 } while(0)
185 #define FLAG_SCHEDDISPATCH_SET \
186 do { \
187 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
188 if (__schd) \
189 __AROS_ATOMIC_OR_L(__schd->ScheduleFlags, TLSSF_Dispatch); \
190 } while(0)
191 #endif /* !AROS_NO_ATOMIC_OPERATIONS */
192 #define SCHEDQUANTUM_SET(val) \
193 do { \
194 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
195 if (__schd) \
196 __schd->Quantum = val; \
197 } while(0)
198 #define SCHEDQUANTUM_GET \
199 ({ \
200 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
201 UWORD __ret = 0; \
202 if (__schd) \
203 __ret = (__schd->Quantum); \
204 __ret; \
206 #define SCHEDELAPSED_SET(val) \
207 do { \
208 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
209 if (__schd) \
210 __schd->Elapsed = val; \
211 } while(0)
212 #define SCHEDELAPSED_GET \
213 ({ \
214 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
215 UWORD __ret = 0; \
216 if (__schd) \
217 __ret = (__schd->Elapsed); \
218 __ret; \
220 #define IDNESTCOUNT_GET \
221 ({ \
222 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
223 LONG __ret = 0; \
224 if (__schd) \
225 __ret = (__schd->IDNestCnt); \
226 __ret; \
228 #define IDNESTCOUNT_SET(val) \
229 do { \
230 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
231 if (__schd) \
232 __schd->IDNestCnt = val; \
233 } while(0)
234 #define TDNESTCOUNT_GET \
235 ({ \
236 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
237 LONG __ret = 0; \
238 if (__schd) \
239 __ret = (__schd->TDNestCnt); \
240 __ret; \
242 #define TDNESTCOUNT_SET(val) \
243 do { \
244 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
245 if (__schd) \
246 __schd->TDNestCnt = val; \
247 } while(0)
248 #define FLAG_SCHEDQUANTUM_ISSET \
249 ({ \
250 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
251 BOOL __ret = FALSE; \
252 if (__schd) \
253 __ret = (__schd->ScheduleFlags & TLSSF_Quantum); \
254 __ret; \
256 #define FLAG_SCHEDSWITCH_ISSET \
257 ({ \
258 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
259 BOOL __ret = FALSE; \
260 if (__schd) \
261 __ret = (__schd->ScheduleFlags & TLSSF_Switch); \
262 __ret; \
264 #define FLAG_SCHEDDISPATCH_ISSET \
265 ({ \
266 tls_t *__tls = TLS_PTR_GET(); \
267 BOOL __ret = FALSE; \
268 if (__schd) \
269 __ret = (__schd->ScheduleFlags & TLSSF_Dispatch); \
270 __ret; \
272 #define GET_THIS_TASK \
273 ({ \
274 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
275 struct Task *__ret = NULL; \
276 if (__schd) \
277 __ret = __schd->RunningTask; \
278 __ret; \
280 #define SET_THIS_TASK(x) \
281 ({ \
282 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
283 if (__schd) \
285 EXEC_SPINLOCK_LOCK(&PrivExecBase(SysBase)->TaskRunningSpinLock, NULL, SPINLOCK_MODE_WRITE); \
286 __schd->RunningTask = (x); \
287 AddHead(&PrivExecBase(SysBase)->TaskRunning, (struct Node *)(x)); \
288 EXEC_SPINLOCK_UNLOCK(&PrivExecBase(SysBase)->TaskRunningSpinLock); \
292 #else /* !__AROSEXEC_SMP__ */
294 struct Exec_PlatformData
296 /* No platform-specific data on plain x86 builds */
299 #ifdef AROS_NO_ATOMIC_OPERATIONS
300 #define IDNESTCOUNT_INC SysBase->IDNestCnt++
301 #define IDNESTCOUNT_DEC SysBase->IDNestCnt--
302 #define TDNESTCOUNT_INC SysBase->TDNestCnt++
303 #define TDNESTCOUNT_DEC SysBase->TDNestCnt--
304 #define FLAG_SCHEDQUANTUM_CLEAR SysBase->SysFlags &= ~SFF_QuantumOver
305 #define FLAG_SCHEDQUANTUM_SET SysBase->SysFlags |= SFF_QuantumOver
306 #define FLAG_SCHEDSWITCH_CLEAR SysBase->AttnResched &= ~ARF_AttnSwitch
307 #define FLAG_SCHEDSWITCH_SET SysBase->AttnResched |= ARF_AttnSwitch
308 #define FLAG_SCHEDDISPATCH_CLEAR SysBase->AttnResched &= ~ARF_AttnDispatch
309 #define FLAG_SCHEDDISPATCH_SET SysBase->AttnResched |= ARF_AttnDispatch
310 #else
311 #define IDNESTCOUNT_INC AROS_ATOMIC_INC(SysBase->IDNestCnt)
312 #define IDNESTCOUNT_DEC AROS_ATOMIC_DEC(SysBase->IDNestCnt)
313 #define TDNESTCOUNT_INC AROS_ATOMIC_INC(SysBase->TDNestCnt)
314 #define TDNESTCOUNT_DEC AROS_ATOMIC_DEC(SysBase->TDNestCnt)
315 #define FLAG_SCHEDQUANTUM_CLEAR AROS_ATOMIC_AND(SysBase->SysFlags, ~SFF_QuantumOver)
316 #define FLAG_SCHEDQUANTUM_SET AROS_ATOMIC_OR(SysBase->SysFlags, SFF_QuantumOver)
317 #define FLAG_SCHEDSWITCH_CLEAR AROS_ATOMIC_AND(SysBase->AttnResched, ~ARF_AttnSwitch)
318 #define FLAG_SCHEDSWITCH_SET AROS_ATOMIC_OR(SysBase->AttnResched, ARF_AttnSwitch)
319 #define FLAG_SCHEDDISPATCH_CLEAR AROS_ATOMIC_AND(SysBase->AttnResched, ~ARF_AttnDispatch)
320 #define FLAG_SCHEDDISPATCH_SET AROS_ATOMIC_OR(SysBase->AttnResched, ARF_AttnDispatch)
321 #endif
322 #define SCHEDQUANTUM_SET(val) (SysBase->Quantum=(val))
323 #define SCHEDQUANTUM_GET (SysBase->Quantum)
324 #define SCHEDELAPSED_SET(val) (SysBase->Elapsed=(val))
325 #define SCHEDELAPSED_GET (SysBase->Elapsed)
326 #define IDNESTCOUNT_GET (SysBase->IDNestCnt)
327 #define IDNESTCOUNT_SET(val) (SysBase->IDNestCnt=(val))
328 #define TDNESTCOUNT_GET (SysBase->TDNestCnt)
329 #define TDNESTCOUNT_SET(val) (SysBase->TDNestCnt=(val))
330 #define FLAG_SCHEDQUANTUM_ISSET (SysBase->SysFlags & SFF_QuantumOver)
331 #define FLAG_SCHEDSWITCH_ISSET (SysBase->AttnResched & ARF_AttnSwitch)
332 #define FLAG_SCHEDDISPATCH_ISSET (SysBase->AttnResched & ARF_AttnDispatch)
334 #define GET_THIS_TASK (SysBase->ThisTask)
335 #define SET_THIS_TASK(x) (SysBase->ThisTask=(x))
337 #endif /* !__AROSEXEC_SMP__ */
339 struct Task *Exec_X86CreateIdleTask(APTR);
341 #include "kernel_intr.h"
343 #include "x86_syscalls.h"
345 #endif /* __EXEC_PLATFORM_H */