Revert "Staging: android: delete android drivers"
[zen-stable.git] / kernel / profile.c
blob76b8e77773eecb3120b87faa0a0b23e2b4cb8fd3
1 /*
2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 * Red Hat, July 2004
10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
16 #include <linux/export.h>
17 #include <linux/profile.h>
18 #include <linux/bootmem.h>
19 #include <linux/notifier.h>
20 #include <linux/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpu.h>
23 #include <linux/highmem.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <asm/sections.h>
28 #include <asm/irq_regs.h>
29 #include <asm/ptrace.h>
31 struct profile_hit {
32 u32 pc, hits;
34 #define PROFILE_GRPSHIFT 3
35 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
36 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
37 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
39 /* Oprofile timer tick hook */
40 static int (*timer_hook)(struct pt_regs *) __read_mostly;
42 static atomic_t *prof_buffer;
43 static unsigned long prof_len, prof_shift;
45 int prof_on __read_mostly;
46 EXPORT_SYMBOL_GPL(prof_on);
48 static cpumask_var_t prof_cpu_mask;
49 #ifdef CONFIG_SMP
50 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
51 static DEFINE_PER_CPU(int, cpu_profile_flip);
52 static DEFINE_MUTEX(profile_flip_mutex);
53 #endif /* CONFIG_SMP */
55 int profile_setup(char *str)
57 static char schedstr[] = "schedule";
58 static char sleepstr[] = "sleep";
59 static char kvmstr[] = "kvm";
60 int par;
62 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
63 #ifdef CONFIG_SCHEDSTATS
64 prof_on = SLEEP_PROFILING;
65 if (str[strlen(sleepstr)] == ',')
66 str += strlen(sleepstr) + 1;
67 if (get_option(&str, &par))
68 prof_shift = par;
69 printk(KERN_INFO
70 "kernel sleep profiling enabled (shift: %ld)\n",
71 prof_shift);
72 #else
73 printk(KERN_WARNING
74 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
75 #endif /* CONFIG_SCHEDSTATS */
76 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
77 prof_on = SCHED_PROFILING;
78 if (str[strlen(schedstr)] == ',')
79 str += strlen(schedstr) + 1;
80 if (get_option(&str, &par))
81 prof_shift = par;
82 printk(KERN_INFO
83 "kernel schedule profiling enabled (shift: %ld)\n",
84 prof_shift);
85 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
86 prof_on = KVM_PROFILING;
87 if (str[strlen(kvmstr)] == ',')
88 str += strlen(kvmstr) + 1;
89 if (get_option(&str, &par))
90 prof_shift = par;
91 printk(KERN_INFO
92 "kernel KVM profiling enabled (shift: %ld)\n",
93 prof_shift);
94 } else if (get_option(&str, &par)) {
95 prof_shift = par;
96 prof_on = CPU_PROFILING;
97 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
98 prof_shift);
100 return 1;
102 __setup("profile=", profile_setup);
105 int __ref profile_init(void)
107 int buffer_bytes;
108 if (!prof_on)
109 return 0;
111 /* only text is profiled */
112 prof_len = (_etext - _stext) >> prof_shift;
113 buffer_bytes = prof_len*sizeof(atomic_t);
115 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
116 return -ENOMEM;
118 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
120 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
121 if (prof_buffer)
122 return 0;
124 prof_buffer = alloc_pages_exact(buffer_bytes,
125 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
126 if (prof_buffer)
127 return 0;
129 prof_buffer = vzalloc(buffer_bytes);
130 if (prof_buffer)
131 return 0;
133 free_cpumask_var(prof_cpu_mask);
134 return -ENOMEM;
137 /* Profile event notifications */
139 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
140 static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
141 static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
143 void profile_task_exit(struct task_struct *task)
145 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
148 int profile_handoff_task(struct task_struct *task)
150 int ret;
151 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
152 return (ret == NOTIFY_OK) ? 1 : 0;
155 void profile_munmap(unsigned long addr)
157 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
160 int task_handoff_register(struct notifier_block *n)
162 return atomic_notifier_chain_register(&task_free_notifier, n);
164 EXPORT_SYMBOL_GPL(task_handoff_register);
166 int task_handoff_unregister(struct notifier_block *n)
168 return atomic_notifier_chain_unregister(&task_free_notifier, n);
170 EXPORT_SYMBOL_GPL(task_handoff_unregister);
172 int profile_event_register(enum profile_type type, struct notifier_block *n)
174 int err = -EINVAL;
176 switch (type) {
177 case PROFILE_TASK_EXIT:
178 err = blocking_notifier_chain_register(
179 &task_exit_notifier, n);
180 break;
181 case PROFILE_MUNMAP:
182 err = blocking_notifier_chain_register(
183 &munmap_notifier, n);
184 break;
187 return err;
189 EXPORT_SYMBOL_GPL(profile_event_register);
191 int profile_event_unregister(enum profile_type type, struct notifier_block *n)
193 int err = -EINVAL;
195 switch (type) {
196 case PROFILE_TASK_EXIT:
197 err = blocking_notifier_chain_unregister(
198 &task_exit_notifier, n);
199 break;
200 case PROFILE_MUNMAP:
201 err = blocking_notifier_chain_unregister(
202 &munmap_notifier, n);
203 break;
206 return err;
208 EXPORT_SYMBOL_GPL(profile_event_unregister);
210 int register_timer_hook(int (*hook)(struct pt_regs *))
212 if (timer_hook)
213 return -EBUSY;
214 timer_hook = hook;
215 return 0;
217 EXPORT_SYMBOL_GPL(register_timer_hook);
219 void unregister_timer_hook(int (*hook)(struct pt_regs *))
221 WARN_ON(hook != timer_hook);
222 timer_hook = NULL;
223 /* make sure all CPUs see the NULL hook */
224 synchronize_sched(); /* Allow ongoing interrupts to complete. */
226 EXPORT_SYMBOL_GPL(unregister_timer_hook);
229 #ifdef CONFIG_SMP
231 * Each cpu has a pair of open-addressed hashtables for pending
232 * profile hits. read_profile() IPI's all cpus to request them
233 * to flip buffers and flushes their contents to prof_buffer itself.
234 * Flip requests are serialized by the profile_flip_mutex. The sole
235 * use of having a second hashtable is for avoiding cacheline
236 * contention that would otherwise happen during flushes of pending
237 * profile hits required for the accuracy of reported profile hits
238 * and so resurrect the interrupt livelock issue.
240 * The open-addressed hashtables are indexed by profile buffer slot
241 * and hold the number of pending hits to that profile buffer slot on
242 * a cpu in an entry. When the hashtable overflows, all pending hits
243 * are accounted to their corresponding profile buffer slots with
244 * atomic_add() and the hashtable emptied. As numerous pending hits
245 * may be accounted to a profile buffer slot in a hashtable entry,
246 * this amortizes a number of atomic profile buffer increments likely
247 * to be far larger than the number of entries in the hashtable,
248 * particularly given that the number of distinct profile buffer
249 * positions to which hits are accounted during short intervals (e.g.
250 * several seconds) is usually very small. Exclusion from buffer
251 * flipping is provided by interrupt disablement (note that for
252 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
253 * process context).
254 * The hash function is meant to be lightweight as opposed to strong,
255 * and was vaguely inspired by ppc64 firmware-supported inverted
256 * pagetable hash functions, but uses a full hashtable full of finite
257 * collision chains, not just pairs of them.
259 * -- wli
261 static void __profile_flip_buffers(void *unused)
263 int cpu = smp_processor_id();
265 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
268 static void profile_flip_buffers(void)
270 int i, j, cpu;
272 mutex_lock(&profile_flip_mutex);
273 j = per_cpu(cpu_profile_flip, get_cpu());
274 put_cpu();
275 on_each_cpu(__profile_flip_buffers, NULL, 1);
276 for_each_online_cpu(cpu) {
277 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
278 for (i = 0; i < NR_PROFILE_HIT; ++i) {
279 if (!hits[i].hits) {
280 if (hits[i].pc)
281 hits[i].pc = 0;
282 continue;
284 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
285 hits[i].hits = hits[i].pc = 0;
288 mutex_unlock(&profile_flip_mutex);
291 static void profile_discard_flip_buffers(void)
293 int i, cpu;
295 mutex_lock(&profile_flip_mutex);
296 i = per_cpu(cpu_profile_flip, get_cpu());
297 put_cpu();
298 on_each_cpu(__profile_flip_buffers, NULL, 1);
299 for_each_online_cpu(cpu) {
300 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
301 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
303 mutex_unlock(&profile_flip_mutex);
306 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
308 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
309 int i, j, cpu;
310 struct profile_hit *hits;
312 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
313 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
314 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
315 cpu = get_cpu();
316 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
317 if (!hits) {
318 put_cpu();
319 return;
322 * We buffer the global profiler buffer into a per-CPU
323 * queue and thus reduce the number of global (and possibly
324 * NUMA-alien) accesses. The write-queue is self-coalescing:
326 local_irq_save(flags);
327 do {
328 for (j = 0; j < PROFILE_GRPSZ; ++j) {
329 if (hits[i + j].pc == pc) {
330 hits[i + j].hits += nr_hits;
331 goto out;
332 } else if (!hits[i + j].hits) {
333 hits[i + j].pc = pc;
334 hits[i + j].hits = nr_hits;
335 goto out;
338 i = (i + secondary) & (NR_PROFILE_HIT - 1);
339 } while (i != primary);
342 * Add the current hit(s) and flush the write-queue out
343 * to the global buffer:
345 atomic_add(nr_hits, &prof_buffer[pc]);
346 for (i = 0; i < NR_PROFILE_HIT; ++i) {
347 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
348 hits[i].pc = hits[i].hits = 0;
350 out:
351 local_irq_restore(flags);
352 put_cpu();
355 static int __cpuinit profile_cpu_callback(struct notifier_block *info,
356 unsigned long action, void *__cpu)
358 int node, cpu = (unsigned long)__cpu;
359 struct page *page;
361 switch (action) {
362 case CPU_UP_PREPARE:
363 case CPU_UP_PREPARE_FROZEN:
364 node = cpu_to_mem(cpu);
365 per_cpu(cpu_profile_flip, cpu) = 0;
366 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
367 page = alloc_pages_exact_node(node,
368 GFP_KERNEL | __GFP_ZERO,
370 if (!page)
371 return notifier_from_errno(-ENOMEM);
372 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
374 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
375 page = alloc_pages_exact_node(node,
376 GFP_KERNEL | __GFP_ZERO,
378 if (!page)
379 goto out_free;
380 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
382 break;
383 out_free:
384 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
385 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
386 __free_page(page);
387 return notifier_from_errno(-ENOMEM);
388 case CPU_ONLINE:
389 case CPU_ONLINE_FROZEN:
390 if (prof_cpu_mask != NULL)
391 cpumask_set_cpu(cpu, prof_cpu_mask);
392 break;
393 case CPU_UP_CANCELED:
394 case CPU_UP_CANCELED_FROZEN:
395 case CPU_DEAD:
396 case CPU_DEAD_FROZEN:
397 if (prof_cpu_mask != NULL)
398 cpumask_clear_cpu(cpu, prof_cpu_mask);
399 if (per_cpu(cpu_profile_hits, cpu)[0]) {
400 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
401 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
402 __free_page(page);
404 if (per_cpu(cpu_profile_hits, cpu)[1]) {
405 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
406 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
407 __free_page(page);
409 break;
411 return NOTIFY_OK;
413 #else /* !CONFIG_SMP */
414 #define profile_flip_buffers() do { } while (0)
415 #define profile_discard_flip_buffers() do { } while (0)
416 #define profile_cpu_callback NULL
418 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
420 unsigned long pc;
421 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
422 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
424 #endif /* !CONFIG_SMP */
426 void profile_hits(int type, void *__pc, unsigned int nr_hits)
428 if (prof_on != type || !prof_buffer)
429 return;
430 do_profile_hits(type, __pc, nr_hits);
432 EXPORT_SYMBOL_GPL(profile_hits);
434 void profile_tick(int type)
436 struct pt_regs *regs = get_irq_regs();
438 if (type == CPU_PROFILING && timer_hook)
439 timer_hook(regs);
440 if (!user_mode(regs) && prof_cpu_mask != NULL &&
441 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
442 profile_hit(type, (void *)profile_pc(regs));
445 #ifdef CONFIG_PROC_FS
446 #include <linux/proc_fs.h>
447 #include <linux/seq_file.h>
448 #include <asm/uaccess.h>
450 static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
452 seq_cpumask(m, prof_cpu_mask);
453 seq_putc(m, '\n');
454 return 0;
457 static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
459 return single_open(file, prof_cpu_mask_proc_show, NULL);
462 static ssize_t prof_cpu_mask_proc_write(struct file *file,
463 const char __user *buffer, size_t count, loff_t *pos)
465 cpumask_var_t new_value;
466 int err;
468 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
469 return -ENOMEM;
471 err = cpumask_parse_user(buffer, count, new_value);
472 if (!err) {
473 cpumask_copy(prof_cpu_mask, new_value);
474 err = count;
476 free_cpumask_var(new_value);
477 return err;
480 static const struct file_operations prof_cpu_mask_proc_fops = {
481 .open = prof_cpu_mask_proc_open,
482 .read = seq_read,
483 .llseek = seq_lseek,
484 .release = single_release,
485 .write = prof_cpu_mask_proc_write,
488 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
490 /* create /proc/irq/prof_cpu_mask */
491 proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops);
495 * This function accesses profiling information. The returned data is
496 * binary: the sampling step and the actual contents of the profile
497 * buffer. Use of the program readprofile is recommended in order to
498 * get meaningful info out of these data.
500 static ssize_t
501 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
503 unsigned long p = *ppos;
504 ssize_t read;
505 char *pnt;
506 unsigned int sample_step = 1 << prof_shift;
508 profile_flip_buffers();
509 if (p >= (prof_len+1)*sizeof(unsigned int))
510 return 0;
511 if (count > (prof_len+1)*sizeof(unsigned int) - p)
512 count = (prof_len+1)*sizeof(unsigned int) - p;
513 read = 0;
515 while (p < sizeof(unsigned int) && count > 0) {
516 if (put_user(*((char *)(&sample_step)+p), buf))
517 return -EFAULT;
518 buf++; p++; count--; read++;
520 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
521 if (copy_to_user(buf, (void *)pnt, count))
522 return -EFAULT;
523 read += count;
524 *ppos += read;
525 return read;
529 * Writing to /proc/profile resets the counters
531 * Writing a 'profiling multiplier' value into it also re-sets the profiling
532 * interrupt frequency, on architectures that support this.
534 static ssize_t write_profile(struct file *file, const char __user *buf,
535 size_t count, loff_t *ppos)
537 #ifdef CONFIG_SMP
538 extern int setup_profiling_timer(unsigned int multiplier);
540 if (count == sizeof(int)) {
541 unsigned int multiplier;
543 if (copy_from_user(&multiplier, buf, sizeof(int)))
544 return -EFAULT;
546 if (setup_profiling_timer(multiplier))
547 return -EINVAL;
549 #endif
550 profile_discard_flip_buffers();
551 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
552 return count;
555 static const struct file_operations proc_profile_operations = {
556 .read = read_profile,
557 .write = write_profile,
558 .llseek = default_llseek,
561 #ifdef CONFIG_SMP
562 static void profile_nop(void *unused)
566 static int create_hash_tables(void)
568 int cpu;
570 for_each_online_cpu(cpu) {
571 int node = cpu_to_mem(cpu);
572 struct page *page;
574 page = alloc_pages_exact_node(node,
575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
577 if (!page)
578 goto out_cleanup;
579 per_cpu(cpu_profile_hits, cpu)[1]
580 = (struct profile_hit *)page_address(page);
581 page = alloc_pages_exact_node(node,
582 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
584 if (!page)
585 goto out_cleanup;
586 per_cpu(cpu_profile_hits, cpu)[0]
587 = (struct profile_hit *)page_address(page);
589 return 0;
590 out_cleanup:
591 prof_on = 0;
592 smp_mb();
593 on_each_cpu(profile_nop, NULL, 1);
594 for_each_online_cpu(cpu) {
595 struct page *page;
597 if (per_cpu(cpu_profile_hits, cpu)[0]) {
598 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
599 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
600 __free_page(page);
602 if (per_cpu(cpu_profile_hits, cpu)[1]) {
603 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
604 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
605 __free_page(page);
608 return -1;
610 #else
611 #define create_hash_tables() ({ 0; })
612 #endif
614 int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
616 struct proc_dir_entry *entry;
618 if (!prof_on)
619 return 0;
620 if (create_hash_tables())
621 return -ENOMEM;
622 entry = proc_create("profile", S_IWUSR | S_IRUGO,
623 NULL, &proc_profile_operations);
624 if (!entry)
625 return 0;
626 entry->size = (1+prof_len) * sizeof(atomic_t);
627 hotcpu_notifier(profile_cpu_callback, 0);
628 return 0;
630 module_init(create_proc_profile);
631 #endif /* CONFIG_PROC_FS */