2 * include/asm-s390/smp.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com)
13 #include <linux/threads.h>
14 #include <linux/cpumask.h>
15 #include <linux/bitops.h>
17 #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
19 #include <asm/lowcore.h>
21 #include <asm/ptrace.h>
24 s390 specific smp.c headers
34 extern void smp_setup_cpu_possible_map(void);
35 extern int smp_call_function_on(void (*func
) (void *info
), void *info
,
36 int nonatomic
, int wait
, int cpu
);
37 #define NO_PROC_ID 0xFF /* No processor magic marker */
40 * This magic constant controls our willingness to transfer
41 * a process across CPUs. Such a transfer incurs misses on the L1
42 * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
43 * gut feeling is this will vary by board in value. For a board
44 * with separate L2 cache it probably depends also on the RSS, and
45 * for a board with shared L2 cache it ought to decay fast as other
49 #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
51 #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
53 extern int smp_get_cpu(cpumask_t cpu_map
);
54 extern void smp_put_cpu(int cpu
);
56 static inline __u16
hard_smp_processor_id(void)
60 asm volatile("stap %0" : "=m" (cpu_address
));
65 * returns 1 if cpu is in stopped/check stopped state or not operational
69 smp_cpu_not_running(int cpu
)
73 switch (signal_processor_ps(&status
, 0, cpu
, sigp_sense
)) {
74 case sigp_order_code_accepted
:
75 case sigp_status_stored
:
76 /* Check for stopped and check stop state */
80 case sigp_not_operational
:
88 #define cpu_logical_map(cpu) (cpu)
90 extern int __cpu_disable (void);
91 extern void __cpu_die (unsigned int cpu
);
92 extern void cpu_die (void) __attribute__ ((noreturn
));
93 extern int __cpu_up (unsigned int cpu
);
99 smp_call_function_on(void (*func
) (void *info
), void *info
,
100 int nonatomic
, int wait
, int cpu
)
106 static inline void smp_send_stop(void)
108 /* Disable all interrupts/machine checks */
109 __load_psw_mask(PSW_KERNEL_BITS
& ~PSW_MASK_MCHECK
);
112 #define smp_cpu_not_running(cpu) 1
113 #define smp_get_cpu(cpu) ({ 0; })
114 #define smp_put_cpu(cpu) ({ 0; })
115 #define smp_setup_cpu_possible_map() do { } while (0)