added concrete implementations of putc(), getc(), getchar() and gets()
[tangerine.git] / arch / .unmaintained / m68k-native / machine.h
bloba83b0f4fc1f31d39a4b6a965c876b6101ee4be29
1 #ifndef AROS_MACHINE_H
2 #define AROS_MACHINE_H
4 /*
5 Copyright © 1995-2001, The AROS Development Team. All rights reserved.
6 $Id$
8 NOTE: This file must compile *without* any other header !
10 Desc: machine.h include file for Amiga/m68k
11 Lang: english
14 /* Information generated by machine.c */
15 #define AROS_STACK_GROWS_DOWNWARDS 1 /* Stack direction */
16 #define AROS_BIG_ENDIAN 1 /* Big or little endian */
17 #define AROS_SIZEOFULONG 4 /* Size of an ULONG */
18 #define AROS_WORDALIGN 2 /* Alignment for WORD */
19 #define AROS_LONGALIGN 2 /* Alignment for LONG */
20 #define AROS_PTRALIGN 2 /* Alignment for PTR */
21 #define AROS_IPTRALIGN 2 /* Alignment for IPTR */
22 #define AROS_DOUBLEALIGN 2 /* Alignment for double */
23 #define AROS_WORSTALIGN 8 /* Worst case alignment */
25 #define AROS_GET_SYSBASE extern struct ExecBase * SysBase;
26 #define AROS_GET_DOSBASE extern struct DosLibrary * DOSBase;
27 #define AROS_GET_SYSBASE_OK extern struct ExecBase * SysBase;
29 /* do we need a function attribute to get parameters on the stack? */
30 #define __stackparm
32 register unsigned char * AROS_GET_SP asm("%sp");
35 How much do I have to add to sp to get the address of the first
36 byte on the stack?
38 #define SP_OFFSET 0
41 BCPL datatypes
43 #define AROS_BPTR_TYPE unsigned long
44 #define AROS_BSTR_TYPE unsigned long
45 #define MKBADDR(a) (((unsigned long)(a))>>2)
46 #define BADDR(a) (((unsigned long)(a))<<2)
48 /* Macros to test/set failure of AllocEntry() */
49 #define AROS_ALLOCENTRY_FAILED(memType) \
50 ((struct MemList *)((IPTR)(memType) | 0x80ul<<(sizeof(APTR)-1)*8))
51 #define AROS_CHECK_ALLOCENTRY(memList) \
52 (!((IPTR)(memList) & 0x80ul<<(sizeof(APTR)-1)*8))
55 One entry in a libraries' jumptable. For assembler compatibility, the
56 field jmp should contain the code for an absolute jmp to a 32bit
57 address. There are also a couple of macros which you should use to
58 access the vector table from C.
60 struct JumpVec
62 unsigned short jmp;
63 unsigned char vec[4];
66 /* Any jump to an unimplemented vector will cause an access to this address */
67 #define _aros_empty_vector 0xc0edbabe
69 /* Internal macros */
70 #define __AROS_ASMJMP 0x4EF9
71 #define __AROS_SET_VEC(v,a) (*(ULONG*)(v)->vec=(ULONG)(a))
72 #define __AROS_GET_VEC(v) ((APTR)(*(ULONG*)(v)->vec))
74 /* Use these to acces a vector table */
75 #define LIB_VECTSIZE (sizeof (struct JumpVec))
76 #define __AROS_GETJUMPVEC(lib,n) ((struct JumpVec *)(((UBYTE *)lib)-(n*LIB_VECTSIZE)))
77 #define __AROS_GETVECADDR(lib,n) (__AROS_GET_VEC(__AROS_GETJUMPVEC(lib,n)))
78 #define __AROS_SETVECADDR(lib,n,addr) (__AROS_SET_VEC(__AROS_GETJUMPVEC(lib,n),(APTR)(addr)))
79 #define __AROS_INITVEC(lib,n) __AROS_GETJUMPVEC(lib,n)->jmp = __AROS_ASMJMP, \
80 __AROS_SETVECADDR(lib,n,_aros_empty_vector)
83 The following AROS_LVO_CALLs are not protected by #ifdef/#endif pairs.
84 This is because these are really the ones to be defined, not the (protected)
85 ones from <aros/libcall.h>. Better to have a compiler warning, than to
86 define the wrong ones.
88 The reason to define them this way (jsr offset(a6)), is to jump over the
89 library vector, instead of getting the address from offset+2 and jumping
90 to that address. Some libraries may be putting something else (e.g. rts)
91 in the vector, instead of a 'jsr address'. Fetching the address from such
92 a vector would crash the AROS_LVO_CALLs in <aros/libcall.h>
95 #define LVO_ARGT(t,v,r) t
96 #define LVO_ARGV(t,v,r) v
97 #define LVO_ARGR(t,v,r) r
99 #define AROS_LVO_CALL0(rt,bt,bn,offs,name) \
100 ({ \
102 register rt _##name##_re __asm("d0"); \
103 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
104 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
105 : "=r" (_##name##_re) \
106 : "r" (_##name##_bn) \
107 : "d0", "d1", "a0", "a1", "cc", "memory"); \
108 _##name##_re; \
112 #define AROS_LVO_CALL0NR(bt,bn,offs,name) \
113 ({ \
115 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
116 __asm volatile ("jsr a6@(-"#offs":W)" \
117 : /* no output */ \
118 : "r" (_##name##_bn) \
119 : "d0", "d1", "a0", "a1", "cc", "memory"); \
123 #define AROS_LVO_CALL1(rt,a1,bt,bn,offs,name) \
124 ({ \
125 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
127 register rt _##name##_re __asm("d0"); \
128 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
129 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
130 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
131 : "=r" (_##name##_re) \
132 : "r" (_##name##_bn), "r"(_n1) \
133 : "d0", "d1", "a0", "a1", "cc", "memory"); \
134 _##name##_re; \
138 #define AROS_LVO_CALL1NR(a1,bt,bn,offs,name) \
139 ({ \
140 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
142 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
143 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
144 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
145 : /* no output */ \
146 : "r" (_##name##_bn), "r"(_n1) \
147 : "d0", "d1", "a0", "a1", "cc", "memory"); \
151 #define AROS_LVO_CALL2(rt,a1,a2,bt,bn,offs,name) \
152 ({ \
153 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
154 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
156 register rt _##name##_re __asm("d0"); \
157 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
158 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
159 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
160 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
161 : "=r" (_##name##_re) \
162 : "r" (_##name##_bn), "r"(_n1), "r"(_n2) \
163 : "d0", "d1", "a0", "a1", "cc", "memory"); \
164 _##name##_re; \
168 #define AROS_LVO_CALL2NR(a1,a2,bt,bn,offs,name) \
169 ({ \
170 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
171 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
173 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
174 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
175 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
176 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
177 : /* no output */ \
178 : "r" (_##name##_bn), "r"(_n1), "r"(_n2) \
179 : "d0", "d1", "a0", "a1", "cc", "memory"); \
183 #define AROS_LVO_CALL3(rt,a1,a2,a3,bt,bn,offs,name) \
184 ({ \
185 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
186 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
187 LVO_ARGT(a3) _##name##_v3 = (LVO_ARGV(a3)); \
189 register rt _##name##_re __asm("d0"); \
190 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
191 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
192 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
193 register LVO_ARGT(a3) _n3 __asm(LVO_ARGR(a3)) = _##name##_v3; \
194 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
195 : "=r" (_##name##_re) \
196 : "r" (_##name##_bn), "r"(_n1), "r"(_n2), "r"(_n3)\
197 : "d0", "d1", "a0", "a1", "cc", "memory"); \
198 _##name##_re; \
202 #define AROS_LVO_CALL3NR(a1,a2,a3,bt,bn,offs,name) \
203 ({ \
204 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
205 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
206 LVO_ARGT(a3) _##name##_v3 = (LVO_ARGV(a3)); \
208 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
209 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
210 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
211 register LVO_ARGT(a3) _n3 __asm(LVO_ARGR(a3)) = _##name##_v3; \
212 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
213 : /* no output */ \
214 : "r" (_##name##_bn), "r"(_n1), "r"(_n2), "r"(_n3)\
215 : "d0", "d1", "a0", "a1", "cc", "memory"); \
219 #define AROS_LVO_CALL4(rt,a1,a2,a3,a4,bt,bn,offs,name) \
220 ({ \
221 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
222 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
223 LVO_ARGT(a3) _##name##_v3 = (LVO_ARGV(a3)); \
224 LVO_ARGT(a4) _##name##_v4 = (LVO_ARGV(a4)); \
226 register rt _##name##_re __asm("d0"); \
227 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
228 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
229 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
230 register LVO_ARGT(a3) _n3 __asm(LVO_ARGR(a3)) = _##name##_v3; \
231 register LVO_ARGT(a4) _n4 __asm(LVO_ARGR(a4)) = _##name##_v4; \
232 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
233 : "=r" (_##name##_re) \
234 : "r" (_##name##_bn), "r"(_n1), "r"(_n2), "r"(_n3), "r"(_n4) \
235 : "d0", "d1", "a0", "a1", "cc", "memory"); \
236 _##name##_re; \
240 #define AROS_LVO_CALL4NR(a1,a2,a3,a4,bt,bn,offs,name) \
241 ({ \
242 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
243 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
244 LVO_ARGT(a3) _##name##_v3 = (LVO_ARGV(a3)); \
245 LVO_ARGT(a4) _##name##_v4 = (LVO_ARGV(a4)); \
247 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
248 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
249 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
250 register LVO_ARGT(a3) _n3 __asm(LVO_ARGR(a3)) = _##name##_v3; \
251 register LVO_ARGT(a4) _n4 __asm(LVO_ARGR(a4)) = _##name##_v4; \
252 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
253 : /* no output */ \
254 : "r" (_##name##_bn), "r"(_n1), "r"(_n2), "r"(_n3), "r"(_n4) \
255 : "d0", "d1", "a0", "a1", "cc", "memory"); \
259 #define AROS_LVO_CALL5(rt,a1,a2,a3,a4,a5,bt,bn,offs,name) \
260 ({ \
261 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
262 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
263 LVO_ARGT(a3) _##name##_v3 = (LVO_ARGV(a3)); \
264 LVO_ARGT(a4) _##name##_v4 = (LVO_ARGV(a4)); \
265 LVO_ARGT(a5) _##name##_v5 = (LVO_ARGV(a5)); \
267 register rt _##name##_re __asm("d0"); \
268 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
269 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
270 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
271 register LVO_ARGT(a3) _n3 __asm(LVO_ARGR(a3)) = _##name##_v3; \
272 register LVO_ARGT(a4) _n4 __asm(LVO_ARGR(a4)) = _##name##_v4; \
273 register LVO_ARGT(a5) _n5 __asm(LVO_ARGR(a5)) = _##name##_v5; \
274 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
275 : "=r" (_##name##_re) \
276 : "r" (_##name##_bn), "r"(_n1), "r"(_n2), "r"(_n3), "r"(_n4), "r"(_n5) \
277 : "d0", "d1", "a0", "a1", "cc", "memory"); \
278 _##name##_re; \
282 #define AROS_LVO_CALL5NR(a1,a2,a3,a4,a5,bt,bn,offs,name) \
283 ({ \
284 LVO_ARGT(a1) _##name##_v1 = (LVO_ARGV(a1)); \
285 LVO_ARGT(a2) _##name##_v2 = (LVO_ARGV(a2)); \
286 LVO_ARGT(a3) _##name##_v3 = (LVO_ARGV(a3)); \
287 LVO_ARGT(a4) _##name##_v4 = (LVO_ARGV(a4)); \
288 LVO_ARGT(a5) _##name##_v5 = (LVO_ARGV(a5)); \
290 register struct Library *const _##name##_bn __asm("a6") = (struct Library*)(bn);\
291 register LVO_ARGT(a1) _n1 __asm(LVO_ARGR(a1)) = _##name##_v1; \
292 register LVO_ARGT(a2) _n2 __asm(LVO_ARGR(a2)) = _##name##_v2; \
293 register LVO_ARGT(a3) _n3 __asm(LVO_ARGR(a3)) = _##name##_v3; \
294 register LVO_ARGT(a4) _n4 __asm(LVO_ARGR(a4)) = _##name##_v4; \
295 register LVO_ARGT(a5) _n5 __asm(LVO_ARGR(a5)) = _##name##_v5; \
296 __asm volatile ("jsr a6@(-"#offs"*6:W)" \
297 : /* no output */ \
298 : "r" (_##name##_bn), "r"(_n1), "r"(_n2), "r"(_n3), "r"(_n4), "r"(_n5) \
299 : "d0", "d1", "a0", "a1", "cc", "memory"); \
304 Find the next valid alignment for a structure if the next x bytes must
305 be skipped.
307 #define AROS_ALIGN(x) (((x)+AROS_WORSTALIGN-1)&-AROS_WORSTALIGN)
309 /* Prototypes */
310 extern void _aros_not_implemented (char *);
312 /* How much stack do we need ? Lots :-) */
313 #define AROS_STACKSIZE 100000
315 /* The registers */
316 #define D0 "d0"
317 #define D1 "d1"
318 #define D2 "d2"
319 #define D3 "d3"
320 #define D4 "d4"
321 #define D5 "d5"
322 #define D6 "d6"
323 #define D7 "d7"
324 #define A0 "a0"
325 #define A1 "a1"
326 #define A2 "a2"
327 #define A3 "a3"
328 #define A4 "a4"
329 #define A5 "a5"
330 #define A6 "a6"
332 #ifndef __SASC
333 #define __d0 __asm("d0")
334 #define __d1 __asm("d1")
335 #define __d2 __asm("d2")
336 #define __d3 __asm("d3")
337 #define __d4 __asm("d4")
338 #define __d5 __asm("d5")
339 #define __d6 __asm("d6")
340 #define __d7 __asm("d7")
342 #define __a0 __asm("a0")
343 #define __a1 __asm("a1")
344 #define __a2 __asm("a2")
345 #define __a3 __asm("a3")
346 #define __a4 __asm("a4")
347 #define __a5 __asm("a5")
348 #define __a6 __asm("a6")
349 #endif
351 /* RawDoFmt hook */
352 #define RDFCALL(hook,data,dptr) ((void(*)(UBYTE __d0,APTR __a3))(hook))(data,dptr);
354 /* What to do with the library base in header, prototype and call */
355 #define __AROS_LH_BASE(basetype,basename) basetype basename __asm("a6")
356 #define __AROS_LP_BASE(basetype,basename) void * __asm("a6")
357 #define __AROS_LC_BASE(basetype,basename) basename
358 #define __AROS_LD_BASE(basetype,basename) basetype __asm("a6")
360 /* How to transform an argument in header, prototype and call */
361 #define __AROS_LHA(type,name,reg) type name __asm(reg)
362 #define __AROS_LPA(type,name,reg) type __asm(reg)
363 #define __AROS_LCA(type,name,reg) name
364 #define __AROS_LDA(type,name,reg) type __asm(reg)
365 #define __AROS_UFHA(type,name,reg) type name __asm(reg)
366 #define __AROS_UFPA(type,name,reg) type __asm(reg)
367 #define __AROS_UFCA(type,name,reg) name
368 #define __AROS_UFDA(type,name,reg) type __asm(reg)
370 /* Prefix for library function in header, prototype and call */
371 #define __AROS_LH_PREFIX /* eps */
372 #define __AROS_LP_PREFIX /* eps */
373 #define __AROS_LC_PREFIX /* eps */
374 #define __AROS_LD_PREFIX /* eps */
375 #define __AROS_UFH_PREFIX /* eps */
376 #define __AROS_UFP_PREFIX /* eps */
377 #define __AROS_UFC_PREFIX /* eps */
378 #define __AROS_UFD_PREFIX /* eps */
380 /* if this is defined, all AROS_LP*-macros will expand to nothing. */
381 #define __AROS_USE_MACROS_FOR_LIBCALL
383 #endif /* AROS_MACHINE_H */