x86: introduce native_set_pte_atomic() on 64-bit too
[wrt350n-kernel.git] / include / asm-s390 / smp.h
blobc7b74326a527b15eaa059cca78bdbe1fb4b57d69
1 /*
2 * include/asm-s390/smp.h
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 */
10 #ifndef __ASM_SMP_H
11 #define __ASM_SMP_H
13 #include <linux/threads.h>
14 #include <linux/cpumask.h>
15 #include <linux/bitops.h>
17 #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
19 #include <asm/lowcore.h>
20 #include <asm/sigp.h>
21 #include <asm/ptrace.h>
24 s390 specific smp.c headers
26 typedef struct
28 int intresting;
29 sigp_ccode ccode;
30 __u32 status;
31 __u16 cpu;
32 } sigp_info;
34 extern void machine_restart_smp(char *);
35 extern void machine_halt_smp(void);
36 extern void machine_power_off_smp(void);
38 #define NO_PROC_ID 0xFF /* No processor magic marker */
41 * This magic constant controls our willingness to transfer
42 * a process across CPUs. Such a transfer incurs misses on the L1
43 * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
44 * gut feeling is this will vary by board in value. For a board
45 * with separate L2 cache it probably depends also on the RSS, and
46 * for a board with shared L2 cache it ought to decay fast as other
47 * processes are run.
50 #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
52 #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
54 static inline __u16 hard_smp_processor_id(void)
56 __u16 cpu_address;
58 asm volatile("stap %0" : "=m" (cpu_address));
59 return cpu_address;
63 * returns 1 if cpu is in stopped/check stopped state or not operational
64 * returns 0 otherwise
66 static inline int
67 smp_cpu_not_running(int cpu)
69 __u32 status;
71 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
72 case sigp_order_code_accepted:
73 case sigp_status_stored:
74 /* Check for stopped and check stop state */
75 if (status & 0x50)
76 return 1;
77 break;
78 case sigp_not_operational:
79 return 1;
80 default:
81 break;
83 return 0;
86 #define cpu_logical_map(cpu) (cpu)
88 extern int __cpu_disable (void);
89 extern void __cpu_die (unsigned int cpu);
90 extern void cpu_die (void) __attribute__ ((noreturn));
91 extern int __cpu_up (unsigned int cpu);
93 extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
94 void *info, int wait);
95 #endif
97 #ifndef CONFIG_SMP
98 static inline void smp_send_stop(void)
100 /* Disable all interrupts/machine checks */
101 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
104 #define hard_smp_processor_id() 0
105 #define smp_cpu_not_running(cpu) 1
106 #endif
108 extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
109 #endif