2 Copyright © 2017, The AROS Development Team. All rights reserved.
5 #ifndef __EXEC_PLATFORM_H
6 #define __EXEC_PLATFORM_H
8 // needed to determine if this is an smp build
9 #include <aros/config.h>
10 #include <aros/atomic.h>
12 #ifndef __KERNEL_NOLIBBASE__
13 #define __KERNEL_NOLIBBASE__
14 #endif /* !__KERNEL_NOLIBBASE__ */
15 #include <proto/kernel.h>
18 #define EXEC_REMTASK_NEEDSSWITCH
20 #if defined (__AROSEXEC_SMP__)
21 #define SCHEDQUANTUM_VALUE 10
22 #define SCHEDGRAN_VALUE 1
24 #define SCHEDQUANTUM_VALUE 4
25 #define SCHEDGRAN_VALUE 1
28 #include "kernel_base.h"
30 #if defined(__AROSEXEC_SMP__)
31 #include "kernel_intern.h"
32 #include <aros/types/spinlock_s.h>
33 #include <utility/hooks.h>
38 /* special flag to get the scheduling code to unspin tasks */
39 #define TS_UNSPIN 0x10
41 extern struct Hook Exec_TaskSpinLockFailHook
;
42 extern struct Hook Exec_TaskSpinLockForbidHook
;
43 extern struct Hook Exec_TaskSpinLockDisableHook
;
44 extern void Exec_TaskSpinUnlock(spinlock_t
*);
49 struct Hook
*lock_obtainhook
;
50 struct Hook
*lock_failhook
;
54 struct Exec_PlatformData
56 spinlock_t
*(*SpinLockCall
)(spinlock_t
*, struct Hook
*, struct Hook
*, ULONG
);
59 #ifndef __KERNEL_NO_SPINLOCK_PROTOS__
60 extern void Kernel_49_KrnSpinInit(spinlock_t
*, void *);
61 extern spinlock_t
*Kernel_52_KrnSpinLock(spinlock_t
*, struct Hook
*, ULONG
, void *);
62 extern void Kernel_53_KrnSpinUnLock(spinlock_t
*, void *);
65 #define EXEC_SPINLOCK_INIT(a) Kernel_49_KrnSpinInit((a), NULL)
66 #define EXEC_SPINLOCK_LOCK(a,b,c) Kernel_52_KrnSpinLock((a), (b), (c), NULL)
67 #define EXEC_SPINLOCK_UNLOCK(a) Kernel_53_KrnSpinUnLock((a), NULL)
69 #if defined(AROS_NO_ATOMIC_OPERATIONS)
70 #define IDNESTCOUNT_INC \
72 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
74 __schd->IDNestCnt++; \
76 #define IDNESTCOUNT_DEC \
78 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
80 __schd->IDNestCnt--; \
82 #define TDNESTCOUNT_INC \
84 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
86 __schd->TDNestCnt++; \
88 #define TDNESTCOUNT_DEC \
90 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
92 __schd->TDNestCnt--; \
94 #define FLAG_SCHEDQUANTUM_CLEAR \
96 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
98 __schd->ScheduleFlags &= ~TLSSF_Quantum; \
100 #define FLAG_SCHEDQUANTUM_SET \
102 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
104 __schd->ScheduleFlags |= TLSSF_Quantum; \
106 #define FLAG_SCHEDSWITCH_CLEAR \
108 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
110 __schd->ScheduleFlags &= ~TLSSF_Switch; \
112 #define FLAG_SCHEDSWITCH_SET \
114 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
116 __schd->ScheduleFlags |= TLSSF_Switch; \
118 #define FLAG_SCHEDDISPATCH_CLEAR \
120 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
122 __schd->ScheduleFlags &= ~TLSSF_Dispatch; \
124 #define FLAG_SCHEDDISPATCH_SET \
126 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
128 __schd->ScheduleFlags |= TLSSF_Dispatch; \
130 #else /* !AROS_NO_ATOMIC_OPERATIONS */
131 #define IDNESTCOUNT_INC \
133 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
135 __AROS_ATOMIC_INC_B(__schd->IDNestCnt); \
137 #define IDNESTCOUNT_DEC \
139 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
141 __AROS_ATOMIC_DEC_B(__schd->IDNestCnt); \
143 #define TDNESTCOUNT_INC \
145 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
147 __AROS_ATOMIC_INC_B(__schd->TDNestCnt); \
149 #define TDNESTCOUNT_DEC \
151 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
153 __AROS_ATOMIC_DEC_B(__schd->TDNestCnt); \
155 #define FLAG_SCHEDQUANTUM_CLEAR \
157 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
159 __AROS_ATOMIC_AND_L(__schd->ScheduleFlags, ~TLSSF_Quantum); \
161 #define FLAG_SCHEDQUANTUM_SET \
163 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
165 __AROS_ATOMIC_OR_L(__schd->ScheduleFlags, TLSSF_Quantum); \
167 #define FLAG_SCHEDSWITCH_CLEAR \
169 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
171 __AROS_ATOMIC_AND_L(__schd->ScheduleFlags, ~TLSSF_Switch); \
173 #define FLAG_SCHEDSWITCH_SET \
175 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
177 __AROS_ATOMIC_OR_L(__schd->ScheduleFlags, TLSSF_Switch); \
179 #define FLAG_SCHEDDISPATCH_CLEAR \
181 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
183 __AROS_ATOMIC_AND_L(__schd->ScheduleFlags, ~TLSSF_Dispatch); \
185 #define FLAG_SCHEDDISPATCH_SET \
187 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
189 __AROS_ATOMIC_OR_L(__schd->ScheduleFlags, TLSSF_Dispatch); \
191 #endif /* !AROS_NO_ATOMIC_OPERATIONS */
192 #define SCHEDQUANTUM_SET(val) \
194 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
196 __schd->Quantum = val; \
198 #define SCHEDQUANTUM_GET \
200 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
203 __ret = (__schd->Quantum); \
206 #define SCHEDELAPSED_SET(val) \
208 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
210 __schd->Elapsed = val; \
212 #define SCHEDELAPSED_GET \
214 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
217 __ret = (__schd->Elapsed); \
220 #define IDNESTCOUNT_GET \
222 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
225 __ret = (__schd->IDNestCnt); \
228 #define IDNESTCOUNT_SET(val) \
230 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
232 __schd->IDNestCnt = val; \
234 #define TDNESTCOUNT_GET \
236 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
239 __ret = (__schd->TDNestCnt); \
242 #define TDNESTCOUNT_SET(val) \
244 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
246 __schd->TDNestCnt = val; \
248 #define FLAG_SCHEDQUANTUM_ISSET \
250 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
251 BOOL __ret = FALSE; \
253 __ret = (__schd->ScheduleFlags & TLSSF_Quantum); \
256 #define FLAG_SCHEDSWITCH_ISSET \
258 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
259 BOOL __ret = FALSE; \
261 __ret = (__schd->ScheduleFlags & TLSSF_Switch); \
264 #define FLAG_SCHEDDISPATCH_ISSET \
266 tls_t *__tls = TLS_PTR_GET(); \
267 BOOL __ret = FALSE; \
269 __ret = (__schd->ScheduleFlags & TLSSF_Dispatch); \
272 #define GET_THIS_TASK \
274 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
275 struct Task *__ret = NULL; \
277 __ret = __schd->RunningTask; \
280 #define SET_THIS_TASK(x) \
282 struct X86SchedulerPrivate *__schd = TLS_GET(ScheduleData); \
285 EXEC_SPINLOCK_LOCK(&PrivExecBase(SysBase)->TaskRunningSpinLock, NULL, SPINLOCK_MODE_WRITE); \
286 __schd->RunningTask = (x); \
287 AddHead(&PrivExecBase(SysBase)->TaskRunning, (struct Node *)(x)); \
288 EXEC_SPINLOCK_UNLOCK(&PrivExecBase(SysBase)->TaskRunningSpinLock); \
292 #else /* !__AROSEXEC_SMP__ */
294 struct Exec_PlatformData
296 /* No platform-specific data on plain x86 builds */
299 #ifdef AROS_NO_ATOMIC_OPERATIONS
300 #define IDNESTCOUNT_INC SysBase->IDNestCnt++
301 #define IDNESTCOUNT_DEC SysBase->IDNestCnt--
302 #define TDNESTCOUNT_INC SysBase->TDNestCnt++
303 #define TDNESTCOUNT_DEC SysBase->TDNestCnt--
304 #define FLAG_SCHEDQUANTUM_CLEAR SysBase->SysFlags &= ~SFF_QuantumOver
305 #define FLAG_SCHEDQUANTUM_SET SysBase->SysFlags |= SFF_QuantumOver
306 #define FLAG_SCHEDSWITCH_CLEAR SysBase->AttnResched &= ~ARF_AttnSwitch
307 #define FLAG_SCHEDSWITCH_SET SysBase->AttnResched |= ARF_AttnSwitch
308 #define FLAG_SCHEDDISPATCH_CLEAR SysBase->AttnResched &= ~ARF_AttnDispatch
309 #define FLAG_SCHEDDISPATCH_SET SysBase->AttnResched |= ARF_AttnDispatch
311 #define IDNESTCOUNT_INC AROS_ATOMIC_INC(SysBase->IDNestCnt)
312 #define IDNESTCOUNT_DEC AROS_ATOMIC_DEC(SysBase->IDNestCnt)
313 #define TDNESTCOUNT_INC AROS_ATOMIC_INC(SysBase->TDNestCnt)
314 #define TDNESTCOUNT_DEC AROS_ATOMIC_DEC(SysBase->TDNestCnt)
315 #define FLAG_SCHEDQUANTUM_CLEAR AROS_ATOMIC_AND(SysBase->SysFlags, ~SFF_QuantumOver)
316 #define FLAG_SCHEDQUANTUM_SET AROS_ATOMIC_OR(SysBase->SysFlags, SFF_QuantumOver)
317 #define FLAG_SCHEDSWITCH_CLEAR AROS_ATOMIC_AND(SysBase->AttnResched, ~ARF_AttnSwitch)
318 #define FLAG_SCHEDSWITCH_SET AROS_ATOMIC_OR(SysBase->AttnResched, ARF_AttnSwitch)
319 #define FLAG_SCHEDDISPATCH_CLEAR AROS_ATOMIC_AND(SysBase->AttnResched, ~ARF_AttnDispatch)
320 #define FLAG_SCHEDDISPATCH_SET AROS_ATOMIC_OR(SysBase->AttnResched, ARF_AttnDispatch)
322 #define SCHEDQUANTUM_SET(val) (SysBase->Quantum=(val))
323 #define SCHEDQUANTUM_GET (SysBase->Quantum)
324 #define SCHEDELAPSED_SET(val) (SysBase->Elapsed=(val))
325 #define SCHEDELAPSED_GET (SysBase->Elapsed)
326 #define IDNESTCOUNT_GET (SysBase->IDNestCnt)
327 #define IDNESTCOUNT_SET(val) (SysBase->IDNestCnt=(val))
328 #define TDNESTCOUNT_GET (SysBase->TDNestCnt)
329 #define TDNESTCOUNT_SET(val) (SysBase->TDNestCnt=(val))
330 #define FLAG_SCHEDQUANTUM_ISSET (SysBase->SysFlags & SFF_QuantumOver)
331 #define FLAG_SCHEDSWITCH_ISSET (SysBase->AttnResched & ARF_AttnSwitch)
332 #define FLAG_SCHEDDISPATCH_ISSET (SysBase->AttnResched & ARF_AttnDispatch)
334 #define GET_THIS_TASK (SysBase->ThisTask)
335 #define SET_THIS_TASK(x) (SysBase->ThisTask=(x))
337 #endif /* !__AROSEXEC_SMP__ */
339 struct Task
*Exec_X86CreateIdleTask(APTR
);
341 #include "kernel_intr.h"
343 #include "x86_syscalls.h"
345 #endif /* __EXEC_PLATFORM_H */