treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sparc / include / asm / smp_32.h
blob856081761b0fc3c11fe640cdd30d4009f727f309
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* smp.h: Sparc specific SMP stuff.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
7 #ifndef _SPARC_SMP_H
8 #define _SPARC_SMP_H
10 #include <linux/threads.h>
11 #include <asm/head.h>
13 #ifndef __ASSEMBLY__
15 #include <linux/cpumask.h>
17 #endif /* __ASSEMBLY__ */
19 #ifdef CONFIG_SMP
21 #ifndef __ASSEMBLY__
23 #include <asm/ptrace.h>
24 #include <asm/asi.h>
25 #include <linux/atomic.h>
28 * Private routines/data
31 extern unsigned char boot_cpu_id;
32 extern volatile unsigned long cpu_callin_map[NR_CPUS];
33 extern cpumask_t smp_commenced_mask;
34 extern struct linux_prom_registers smp_penguin_ctable;
36 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
37 unsigned long, unsigned long);
39 void cpu_panic(void);
42 * General functions that each host system must provide.
45 void sun4m_init_smp(void);
46 void sun4d_init_smp(void);
48 void smp_callin(void);
49 void smp_store_cpu_info(int);
51 void smp_resched_interrupt(void);
52 void smp_call_function_single_interrupt(void);
53 void smp_call_function_interrupt(void);
55 struct seq_file;
56 void smp_bogo(struct seq_file *);
57 void smp_info(struct seq_file *);
59 struct sparc32_ipi_ops {
60 void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1,
61 unsigned long arg2, unsigned long arg3,
62 unsigned long arg4);
63 void (*resched)(int cpu);
64 void (*single)(int cpu);
65 void (*mask_one)(int cpu);
67 extern const struct sparc32_ipi_ops *sparc32_ipi_ops;
69 static inline void xc0(smpfunc_t func)
71 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0);
74 static inline void xc1(smpfunc_t func, unsigned long arg1)
76 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0);
78 static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
80 sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0);
83 static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
84 unsigned long arg3)
86 sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
87 arg1, arg2, arg3, 0);
90 static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
91 unsigned long arg3, unsigned long arg4)
93 sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
94 arg1, arg2, arg3, arg4);
97 void arch_send_call_function_single_ipi(int cpu);
98 void arch_send_call_function_ipi_mask(const struct cpumask *mask);
100 static inline int cpu_logical_map(int cpu)
102 return cpu;
105 int hard_smp_processor_id(void);
107 #define raw_smp_processor_id() (current_thread_info()->cpu)
109 void smp_setup_cpu_possible_map(void);
111 #endif /* !(__ASSEMBLY__) */
113 /* Sparc specific messages. */
114 #define MSG_CROSS_CALL 0x0005 /* run func on cpus */
116 /* Empirical PROM processor mailbox constants. If the per-cpu mailbox
117 * contains something other than one of these then the ipi is from
118 * Linux's active_kernel_processor. This facility exists so that
119 * the boot monitor can capture all the other cpus when one catches
120 * a watchdog reset or the user enters the monitor using L1-A keys.
122 #define MBOX_STOPCPU 0xFB
123 #define MBOX_IDLECPU 0xFC
124 #define MBOX_IDLECPU2 0xFD
125 #define MBOX_STOPCPU2 0xFE
127 #else /* SMP */
129 #define hard_smp_processor_id() 0
130 #define smp_setup_cpu_possible_map() do { } while (0)
132 #endif /* !(SMP) */
133 #endif /* !(_SPARC_SMP_H) */