eventpoll.h: add missing epoll event masks
[linux/fpc-iii.git] / lib / smp_processor_id.c
blob1afec32de6f21c001447b6545b440aff6308bdd2
1 /*
2 * lib/smp_processor_id.c
4 * DEBUG_PREEMPT variant of smp_processor_id().
5 */
6 #include <linux/export.h>
7 #include <linux/kallsyms.h>
8 #include <linux/sched.h>
10 notrace static unsigned int check_preemption_disabled(const char *what1,
11 const char *what2)
13 int this_cpu = raw_smp_processor_id();
15 if (likely(preempt_count()))
16 goto out;
18 if (irqs_disabled())
19 goto out;
22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id():
25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
26 goto out;
29 * It is valid to assume CPU-locality during early bootup:
31 if (system_state != SYSTEM_RUNNING)
32 goto out;
35 * Avoid recursion:
37 preempt_disable_notrace();
39 if (!printk_ratelimit())
40 goto out_enable;
42 printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
43 what1, what2, preempt_count() - 1, current->comm, current->pid);
45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
46 dump_stack();
48 out_enable:
49 preempt_enable_no_resched_notrace();
50 out:
51 return this_cpu;
54 notrace unsigned int debug_smp_processor_id(void)
56 return check_preemption_disabled("smp_processor_id", "");
58 EXPORT_SYMBOL(debug_smp_processor_id);
60 notrace void __this_cpu_preempt_check(const char *op)
62 check_preemption_disabled("__this_cpu_", op);
64 EXPORT_SYMBOL(__this_cpu_preempt_check);