2 * include/asm-s390/system.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
17 #include <asm/ptrace.h>
18 #include <asm/setup.h>
19 #include <asm/processor.h>
25 extern struct task_struct
*__switch_to(void *, void *);
28 #define __FLAG_SHIFT 56
29 #else /* ! __s390x__ */
30 #define __FLAG_SHIFT 24
31 #endif /* ! __s390x__ */
33 static inline void save_fp_regs(s390_fp_regs
*fpregs
)
40 : "=m" (*fpregs
) : "a" (fpregs
), "m" (*fpregs
) : "memory" );
41 if (!MACHINE_HAS_IEEE
)
57 : "=m" (*fpregs
) : "a" (fpregs
), "m" (*fpregs
) : "memory" );
60 static inline void restore_fp_regs(s390_fp_regs
*fpregs
)
67 : : "a" (fpregs
), "m" (*fpregs
) );
68 if (!MACHINE_HAS_IEEE
)
84 : : "a" (fpregs
), "m" (*fpregs
) );
87 static inline void save_access_regs(unsigned int *acrs
)
89 asm volatile ("stam 0,15,0(%0)" : : "a" (acrs
) : "memory" );
92 static inline void restore_access_regs(unsigned int *acrs
)
94 asm volatile ("lam 0,15,0(%0)" : : "a" (acrs
) );
97 #define switch_to(prev,next,last) do { \
100 save_fp_regs(&prev->thread.fp_regs); \
101 restore_fp_regs(&next->thread.fp_regs); \
102 save_access_regs(&prev->thread.acrs[0]); \
103 restore_access_regs(&next->thread.acrs[0]); \
104 prev = __switch_to(prev,next); \
107 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
108 extern void account_user_vtime(struct task_struct
*);
109 extern void account_system_vtime(struct task_struct
*);
111 #define account_system_vtime(prev) do { } while (0)
114 #define finish_arch_switch(rq, prev) do { \
115 set_fs(current->thread.mm_segment); \
116 account_system_vtime(prev); \
119 #define nop() __asm__ __volatile__ ("nop")
121 #define xchg(ptr,x) \
122 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))
124 static inline unsigned long __xchg(unsigned long x
, void * ptr
, int size
)
126 unsigned long addr
, old
;
131 addr
= (unsigned long) ptr
;
132 shift
= (3 ^ (addr
& 3)) << 3;
141 : "=&d" (old
), "=m" (*(int *) addr
)
142 : "d" (x
<< shift
), "d" (~(255 << shift
)), "a" (addr
),
143 "m" (*(int *) addr
) : "memory", "cc", "0" );
147 addr
= (unsigned long) ptr
;
148 shift
= (2 ^ (addr
& 2)) << 3;
157 : "=&d" (old
), "=m" (*(int *) addr
)
158 : "d" (x
<< shift
), "d" (~(65535 << shift
)), "a" (addr
),
159 "m" (*(int *) addr
) : "memory", "cc", "0" );
165 "0: cs %0,%2,0(%3)\n"
167 : "=&d" (old
), "=m" (*(int *) ptr
)
168 : "d" (x
), "a" (ptr
), "m" (*(int *) ptr
)
176 "0: csg %0,%2,0(%3)\n"
178 : "=&d" (old
), "=m" (*(long *) ptr
)
179 : "d" (x
), "a" (ptr
), "m" (*(long *) ptr
)
183 #endif /* __s390x__ */
189 * Atomic compare and exchange. Compare OLD with MEM, if identical,
190 * store NEW in MEM. Return the initial value in MEM. Success is
191 * indicated by comparing RETURN with OLD.
194 #define __HAVE_ARCH_CMPXCHG 1
196 #define cmpxchg(ptr,o,n)\
197 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
198 (unsigned long)(n),sizeof(*(ptr))))
200 static inline unsigned long
201 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
203 unsigned long addr
, prev
, tmp
;
208 addr
= (unsigned long) ptr
;
209 shift
= (3 ^ (addr
& 3)) << 3;
223 : "=&d" (prev
), "=&d" (tmp
)
224 : "d" (old
<< shift
), "d" (new << shift
), "a" (ptr
),
225 "d" (~(255 << shift
))
227 return prev
>> shift
;
229 addr
= (unsigned long) ptr
;
230 shift
= (2 ^ (addr
& 2)) << 3;
244 : "=&d" (prev
), "=&d" (tmp
)
245 : "d" (old
<< shift
), "d" (new << shift
), "a" (ptr
),
246 "d" (~(65535 << shift
))
248 return prev
>> shift
;
252 : "=&d" (prev
) : "0" (old
), "d" (new), "a" (ptr
)
259 : "=&d" (prev
) : "0" (old
), "d" (new), "a" (ptr
)
262 #endif /* __s390x__ */
268 * Force strict CPU ordering.
269 * And yes, this is required on UP too when we're talking
272 * This is very similar to the ppc eieio/sync instruction in that is
273 * does a checkpoint syncronisation & makes sure that
274 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
277 #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" )
278 # define SYNC_OTHER_CORES(x) eieio()
280 #define rmb() eieio()
281 #define wmb() eieio()
282 #define read_barrier_depends() do { } while(0)
283 #define smp_mb() mb()
284 #define smp_rmb() rmb()
285 #define smp_wmb() wmb()
286 #define smp_read_barrier_depends() read_barrier_depends()
287 #define smp_mb__before_clear_bit() smp_mb()
288 #define smp_mb__after_clear_bit() smp_mb()
291 #define set_mb(var, value) do { var = value; mb(); } while (0)
292 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
294 /* interrupt control.. */
295 #define local_irq_enable() ({ \
296 unsigned long __dummy; \
297 __asm__ __volatile__ ( \
299 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
302 #define local_irq_disable() ({ \
303 unsigned long __flags; \
304 __asm__ __volatile__ ( \
305 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
309 #define local_save_flags(x) \
310 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
312 #define local_irq_restore(x) \
313 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
315 #define irqs_disabled() \
317 unsigned long flags; \
318 local_save_flags(flags); \
319 !((flags >> __FLAG_SHIFT) & 3); \
324 #define __ctl_load(array, low, high) ({ \
325 typedef struct { char _[sizeof(array)]; } addrtype; \
326 __asm__ __volatile__ ( \
328 " lctlg 0,0,0(%0)\n" \
330 : : "a" (&array), "a" (((low)<<4)+(high)), \
331 "m" (*(addrtype *)(array)) : "1" ); \
334 #define __ctl_store(array, low, high) ({ \
335 typedef struct { char _[sizeof(array)]; } addrtype; \
336 __asm__ __volatile__ ( \
338 " stctg 0,0,0(%1)\n" \
340 : "=m" (*(addrtype *)(array)) \
341 : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
344 #define __ctl_set_bit(cr, bit) ({ \
346 __asm__ __volatile__ ( \
347 " bras 1,0f\n" /* skip indirect insns */ \
348 " stctg 0,0,0(%1)\n" \
349 " lctlg 0,0,0(%1)\n" \
350 "0: ex %2,0(1)\n" /* execute stctl */ \
352 " ogr 0,%3\n" /* set the bit */ \
354 "1: ex %2,6(1)" /* execute lctl */ \
356 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
357 "a" (cr*17), "a" (1L<<(bit)) \
358 : "cc", "0", "1" ); \
361 #define __ctl_clear_bit(cr, bit) ({ \
363 __asm__ __volatile__ ( \
364 " bras 1,0f\n" /* skip indirect insns */ \
365 " stctg 0,0,0(%1)\n" \
366 " lctlg 0,0,0(%1)\n" \
367 "0: ex %2,0(1)\n" /* execute stctl */ \
369 " ngr 0,%3\n" /* set the bit */ \
371 "1: ex %2,6(1)" /* execute lctl */ \
373 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
374 "a" (cr*17), "a" (~(1L<<(bit))) \
375 : "cc", "0", "1" ); \
378 #else /* __s390x__ */
380 #define __ctl_load(array, low, high) ({ \
381 typedef struct { char _[sizeof(array)]; } addrtype; \
382 __asm__ __volatile__ ( \
384 " lctl 0,0,0(%0)\n" \
386 : : "a" (&array), "a" (((low)<<4)+(high)), \
387 "m" (*(addrtype *)(array)) : "1" ); \
390 #define __ctl_store(array, low, high) ({ \
391 typedef struct { char _[sizeof(array)]; } addrtype; \
392 __asm__ __volatile__ ( \
394 " stctl 0,0,0(%1)\n" \
396 : "=m" (*(addrtype *)(array)) \
397 : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
400 #define __ctl_set_bit(cr, bit) ({ \
402 __asm__ __volatile__ ( \
403 " bras 1,0f\n" /* skip indirect insns */ \
404 " stctl 0,0,0(%1)\n" \
405 " lctl 0,0,0(%1)\n" \
406 "0: ex %2,0(1)\n" /* execute stctl */ \
408 " or 0,%3\n" /* set the bit */ \
410 "1: ex %2,4(1)" /* execute lctl */ \
412 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
413 "a" (cr*17), "a" (1<<(bit)) \
414 : "cc", "0", "1" ); \
417 #define __ctl_clear_bit(cr, bit) ({ \
419 __asm__ __volatile__ ( \
420 " bras 1,0f\n" /* skip indirect insns */ \
421 " stctl 0,0,0(%1)\n" \
422 " lctl 0,0,0(%1)\n" \
423 "0: ex %2,0(1)\n" /* execute stctl */ \
425 " nr 0,%3\n" /* set the bit */ \
427 "1: ex %2,4(1)" /* execute lctl */ \
429 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
430 "a" (cr*17), "a" (~(1<<(bit))) \
431 : "cc", "0", "1" ); \
433 #endif /* __s390x__ */
435 /* For spinlocks etc */
436 #define local_irq_save(x) ((x) = local_irq_disable())
439 * Use to set psw mask except for the first byte which
440 * won't be changed by this function.
443 __set_psw_mask(unsigned long mask
)
445 local_save_flags(mask
);
446 __load_psw_mask(mask
);
449 #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
450 #define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
454 extern void smp_ctl_set_bit(int cr
, int bit
);
455 extern void smp_ctl_clear_bit(int cr
, int bit
);
456 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
457 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
461 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
462 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
464 #endif /* CONFIG_SMP */
466 extern void (*_machine_restart
)(char *command
);
467 extern void (*_machine_halt
)(void);
468 extern void (*_machine_power_off
)(void);
470 #define arch_align_stack(x) (x)
472 #endif /* __KERNEL__ */