Timer initialization fixed
[cbs-scheduler.git] / kernel / profile.c
blobb2a6d7dacdb23316aab205404d9e67836511925f
1 /*
2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 * Red Hat, July 2004
10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
16 #include <linux/module.h>
17 #include <linux/profile.h>
18 #include <linux/bootmem.h>
19 #include <linux/notifier.h>
20 #include <linux/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpu.h>
23 #include <linux/highmem.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <asm/sections.h>
28 #include <asm/irq_regs.h>
29 #include <asm/ptrace.h>
31 struct profile_hit {
32 u32 pc, hits;
34 #define PROFILE_GRPSHIFT 3
35 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
36 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
37 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
39 /* Oprofile timer tick hook */
40 static int (*timer_hook)(struct pt_regs *) __read_mostly;
42 static atomic_t *prof_buffer;
43 static unsigned long prof_len, prof_shift;
45 int prof_on __read_mostly;
46 EXPORT_SYMBOL_GPL(prof_on);
48 static cpumask_var_t prof_cpu_mask;
49 #ifdef CONFIG_SMP
50 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
51 static DEFINE_PER_CPU(int, cpu_profile_flip);
52 static DEFINE_MUTEX(profile_flip_mutex);
53 #endif /* CONFIG_SMP */
55 int profile_setup(char *str)
57 static char schedstr[] = "schedule";
58 static char sleepstr[] = "sleep";
59 static char kvmstr[] = "kvm";
60 int par;
62 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
63 #ifdef CONFIG_SCHEDSTATS
64 prof_on = SLEEP_PROFILING;
65 if (str[strlen(sleepstr)] == ',')
66 str += strlen(sleepstr) + 1;
67 if (get_option(&str, &par))
68 prof_shift = par;
69 printk(KERN_INFO
70 "kernel sleep profiling enabled (shift: %ld)\n",
71 prof_shift);
72 #else
73 printk(KERN_WARNING
74 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
75 #endif /* CONFIG_SCHEDSTATS */
76 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
77 prof_on = SCHED_PROFILING;
78 if (str[strlen(schedstr)] == ',')
79 str += strlen(schedstr) + 1;
80 if (get_option(&str, &par))
81 prof_shift = par;
82 printk(KERN_INFO
83 "kernel schedule profiling enabled (shift: %ld)\n",
84 prof_shift);
85 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
86 prof_on = KVM_PROFILING;
87 if (str[strlen(kvmstr)] == ',')
88 str += strlen(kvmstr) + 1;
89 if (get_option(&str, &par))
90 prof_shift = par;
91 printk(KERN_INFO
92 "kernel KVM profiling enabled (shift: %ld)\n",
93 prof_shift);
94 } else if (get_option(&str, &par)) {
95 prof_shift = par;
96 prof_on = CPU_PROFILING;
97 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
98 prof_shift);
100 return 1;
102 __setup("profile=", profile_setup);
105 int __ref profile_init(void)
107 int buffer_bytes;
108 if (!prof_on)
109 return 0;
111 /* only text is profiled */
112 prof_len = (_etext - _stext) >> prof_shift;
113 buffer_bytes = prof_len*sizeof(atomic_t);
114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes);
116 alloc_bootmem_cpumask_var(&prof_cpu_mask);
117 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
118 return 0;
121 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
122 return -ENOMEM;
124 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
126 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
127 if (prof_buffer)
128 return 0;
130 prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
131 if (prof_buffer)
132 return 0;
134 prof_buffer = vmalloc(buffer_bytes);
135 if (prof_buffer)
136 return 0;
138 free_cpumask_var(prof_cpu_mask);
139 return -ENOMEM;
142 /* Profile event notifications */
144 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
145 static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
146 static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
148 void profile_task_exit(struct task_struct *task)
150 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
153 int profile_handoff_task(struct task_struct *task)
155 int ret;
156 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
157 return (ret == NOTIFY_OK) ? 1 : 0;
160 void profile_munmap(unsigned long addr)
162 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
165 int task_handoff_register(struct notifier_block *n)
167 return atomic_notifier_chain_register(&task_free_notifier, n);
169 EXPORT_SYMBOL_GPL(task_handoff_register);
171 int task_handoff_unregister(struct notifier_block *n)
173 return atomic_notifier_chain_unregister(&task_free_notifier, n);
175 EXPORT_SYMBOL_GPL(task_handoff_unregister);
177 int profile_event_register(enum profile_type type, struct notifier_block *n)
179 int err = -EINVAL;
181 switch (type) {
182 case PROFILE_TASK_EXIT:
183 err = blocking_notifier_chain_register(
184 &task_exit_notifier, n);
185 break;
186 case PROFILE_MUNMAP:
187 err = blocking_notifier_chain_register(
188 &munmap_notifier, n);
189 break;
192 return err;
194 EXPORT_SYMBOL_GPL(profile_event_register);
196 int profile_event_unregister(enum profile_type type, struct notifier_block *n)
198 int err = -EINVAL;
200 switch (type) {
201 case PROFILE_TASK_EXIT:
202 err = blocking_notifier_chain_unregister(
203 &task_exit_notifier, n);
204 break;
205 case PROFILE_MUNMAP:
206 err = blocking_notifier_chain_unregister(
207 &munmap_notifier, n);
208 break;
211 return err;
213 EXPORT_SYMBOL_GPL(profile_event_unregister);
215 int register_timer_hook(int (*hook)(struct pt_regs *))
217 if (timer_hook)
218 return -EBUSY;
219 timer_hook = hook;
220 return 0;
222 EXPORT_SYMBOL_GPL(register_timer_hook);
224 void unregister_timer_hook(int (*hook)(struct pt_regs *))
226 WARN_ON(hook != timer_hook);
227 timer_hook = NULL;
228 /* make sure all CPUs see the NULL hook */
229 synchronize_sched(); /* Allow ongoing interrupts to complete. */
231 EXPORT_SYMBOL_GPL(unregister_timer_hook);
234 #ifdef CONFIG_SMP
236 * Each cpu has a pair of open-addressed hashtables for pending
237 * profile hits. read_profile() IPI's all cpus to request them
238 * to flip buffers and flushes their contents to prof_buffer itself.
239 * Flip requests are serialized by the profile_flip_mutex. The sole
240 * use of having a second hashtable is for avoiding cacheline
241 * contention that would otherwise happen during flushes of pending
242 * profile hits required for the accuracy of reported profile hits
243 * and so resurrect the interrupt livelock issue.
245 * The open-addressed hashtables are indexed by profile buffer slot
246 * and hold the number of pending hits to that profile buffer slot on
247 * a cpu in an entry. When the hashtable overflows, all pending hits
248 * are accounted to their corresponding profile buffer slots with
249 * atomic_add() and the hashtable emptied. As numerous pending hits
250 * may be accounted to a profile buffer slot in a hashtable entry,
251 * this amortizes a number of atomic profile buffer increments likely
252 * to be far larger than the number of entries in the hashtable,
253 * particularly given that the number of distinct profile buffer
254 * positions to which hits are accounted during short intervals (e.g.
255 * several seconds) is usually very small. Exclusion from buffer
256 * flipping is provided by interrupt disablement (note that for
257 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
258 * process context).
259 * The hash function is meant to be lightweight as opposed to strong,
260 * and was vaguely inspired by ppc64 firmware-supported inverted
261 * pagetable hash functions, but uses a full hashtable full of finite
262 * collision chains, not just pairs of them.
264 * -- wli
266 #ifdef CONFIG_PROC_FS
267 static void __profile_flip_buffers(void *unused)
269 int cpu = smp_processor_id();
271 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
274 static void profile_flip_buffers(void)
276 int i, j, cpu;
278 mutex_lock(&profile_flip_mutex);
279 j = per_cpu(cpu_profile_flip, get_cpu());
280 put_cpu();
281 on_each_cpu(__profile_flip_buffers, NULL, 1);
282 for_each_online_cpu(cpu) {
283 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
284 for (i = 0; i < NR_PROFILE_HIT; ++i) {
285 if (!hits[i].hits) {
286 if (hits[i].pc)
287 hits[i].pc = 0;
288 continue;
290 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
291 hits[i].hits = hits[i].pc = 0;
294 mutex_unlock(&profile_flip_mutex);
297 static void profile_discard_flip_buffers(void)
299 int i, cpu;
301 mutex_lock(&profile_flip_mutex);
302 i = per_cpu(cpu_profile_flip, get_cpu());
303 put_cpu();
304 on_each_cpu(__profile_flip_buffers, NULL, 1);
305 for_each_online_cpu(cpu) {
306 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
307 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
309 mutex_unlock(&profile_flip_mutex);
312 static int __cpuinit profile_cpu_callback(struct notifier_block *info,
313 unsigned long action, void *__cpu)
315 int node, cpu = (unsigned long)__cpu;
316 struct page *page;
318 switch (action) {
319 case CPU_UP_PREPARE:
320 case CPU_UP_PREPARE_FROZEN:
321 node = cpu_to_node(cpu);
322 per_cpu(cpu_profile_flip, cpu) = 0;
323 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
324 page = alloc_pages_node(node,
325 GFP_KERNEL | __GFP_ZERO,
327 if (!page)
328 return NOTIFY_BAD;
329 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
331 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
332 page = alloc_pages_node(node,
333 GFP_KERNEL | __GFP_ZERO,
335 if (!page)
336 goto out_free;
337 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
339 break;
340 out_free:
341 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
342 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
343 __free_page(page);
344 return NOTIFY_BAD;
345 case CPU_ONLINE:
346 case CPU_ONLINE_FROZEN:
347 if (prof_cpu_mask != NULL)
348 cpumask_set_cpu(cpu, prof_cpu_mask);
349 break;
350 case CPU_UP_CANCELED:
351 case CPU_UP_CANCELED_FROZEN:
352 case CPU_DEAD:
353 case CPU_DEAD_FROZEN:
354 if (prof_cpu_mask != NULL)
355 cpumask_clear_cpu(cpu, prof_cpu_mask);
356 if (per_cpu(cpu_profile_hits, cpu)[0]) {
357 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
358 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
359 __free_page(page);
361 if (per_cpu(cpu_profile_hits, cpu)[1]) {
362 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
363 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
364 __free_page(page);
366 break;
368 return NOTIFY_OK;
370 #endif /* CONFIG_PROC_FS */
372 void profile_hits(int type, void *__pc, unsigned int nr_hits)
374 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
375 int i, j, cpu;
376 struct profile_hit *hits;
378 if (prof_on != type || !prof_buffer)
379 return;
380 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
381 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
382 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
383 cpu = get_cpu();
384 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
385 if (!hits) {
386 put_cpu();
387 return;
390 * We buffer the global profiler buffer into a per-CPU
391 * queue and thus reduce the number of global (and possibly
392 * NUMA-alien) accesses. The write-queue is self-coalescing:
394 local_irq_save(flags);
395 do {
396 for (j = 0; j < PROFILE_GRPSZ; ++j) {
397 if (hits[i + j].pc == pc) {
398 hits[i + j].hits += nr_hits;
399 goto out;
400 } else if (!hits[i + j].hits) {
401 hits[i + j].pc = pc;
402 hits[i + j].hits = nr_hits;
403 goto out;
406 i = (i + secondary) & (NR_PROFILE_HIT - 1);
407 } while (i != primary);
410 * Add the current hit(s) and flush the write-queue out
411 * to the global buffer:
413 atomic_add(nr_hits, &prof_buffer[pc]);
414 for (i = 0; i < NR_PROFILE_HIT; ++i) {
415 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
416 hits[i].pc = hits[i].hits = 0;
418 out:
419 local_irq_restore(flags);
420 put_cpu();
424 #else /* !CONFIG_SMP */
425 #define profile_flip_buffers() do { } while (0)
426 #define profile_discard_flip_buffers() do { } while (0)
427 #define profile_cpu_callback NULL
429 void profile_hits(int type, void *__pc, unsigned int nr_hits)
431 unsigned long pc;
433 if (prof_on != type || !prof_buffer)
434 return;
435 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
436 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
438 #endif /* !CONFIG_SMP */
439 EXPORT_SYMBOL_GPL(profile_hits);
441 void profile_tick(int type)
443 struct pt_regs *regs = get_irq_regs();
445 if (type == CPU_PROFILING && timer_hook)
446 timer_hook(regs);
447 if (!user_mode(regs) && prof_cpu_mask != NULL &&
448 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
449 profile_hit(type, (void *)profile_pc(regs));
452 #ifdef CONFIG_PROC_FS
453 #include <linux/proc_fs.h>
454 #include <asm/uaccess.h>
456 static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
457 int count, int *eof, void *data)
459 int len = cpumask_scnprintf(page, count, data);
460 if (count - len < 2)
461 return -EINVAL;
462 len += sprintf(page + len, "\n");
463 return len;
466 static int prof_cpu_mask_write_proc(struct file *file,
467 const char __user *buffer, unsigned long count, void *data)
469 struct cpumask *mask = data;
470 unsigned long full_count = count, err;
471 cpumask_var_t new_value;
473 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
474 return -ENOMEM;
476 err = cpumask_parse_user(buffer, count, new_value);
477 if (!err) {
478 cpumask_copy(mask, new_value);
479 err = full_count;
481 free_cpumask_var(new_value);
482 return err;
485 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
487 struct proc_dir_entry *entry;
489 /* create /proc/irq/prof_cpu_mask */
490 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
491 if (!entry)
492 return;
493 entry->data = prof_cpu_mask;
494 entry->read_proc = prof_cpu_mask_read_proc;
495 entry->write_proc = prof_cpu_mask_write_proc;
499 * This function accesses profiling information. The returned data is
500 * binary: the sampling step and the actual contents of the profile
501 * buffer. Use of the program readprofile is recommended in order to
502 * get meaningful info out of these data.
504 static ssize_t
505 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
507 unsigned long p = *ppos;
508 ssize_t read;
509 char *pnt;
510 unsigned int sample_step = 1 << prof_shift;
512 profile_flip_buffers();
513 if (p >= (prof_len+1)*sizeof(unsigned int))
514 return 0;
515 if (count > (prof_len+1)*sizeof(unsigned int) - p)
516 count = (prof_len+1)*sizeof(unsigned int) - p;
517 read = 0;
519 while (p < sizeof(unsigned int) && count > 0) {
520 if (put_user(*((char *)(&sample_step)+p), buf))
521 return -EFAULT;
522 buf++; p++; count--; read++;
524 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
525 if (copy_to_user(buf, (void *)pnt, count))
526 return -EFAULT;
527 read += count;
528 *ppos += read;
529 return read;
533 * Writing to /proc/profile resets the counters
535 * Writing a 'profiling multiplier' value into it also re-sets the profiling
536 * interrupt frequency, on architectures that support this.
538 static ssize_t write_profile(struct file *file, const char __user *buf,
539 size_t count, loff_t *ppos)
541 #ifdef CONFIG_SMP
542 extern int setup_profiling_timer(unsigned int multiplier);
544 if (count == sizeof(int)) {
545 unsigned int multiplier;
547 if (copy_from_user(&multiplier, buf, sizeof(int)))
548 return -EFAULT;
550 if (setup_profiling_timer(multiplier))
551 return -EINVAL;
553 #endif
554 profile_discard_flip_buffers();
555 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
556 return count;
559 static const struct file_operations proc_profile_operations = {
560 .read = read_profile,
561 .write = write_profile,
564 #ifdef CONFIG_SMP
565 static void profile_nop(void *unused)
569 static int create_hash_tables(void)
571 int cpu;
573 for_each_online_cpu(cpu) {
574 int node = cpu_to_node(cpu);
575 struct page *page;
577 page = alloc_pages_node(node,
578 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
580 if (!page)
581 goto out_cleanup;
582 per_cpu(cpu_profile_hits, cpu)[1]
583 = (struct profile_hit *)page_address(page);
584 page = alloc_pages_node(node,
585 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
587 if (!page)
588 goto out_cleanup;
589 per_cpu(cpu_profile_hits, cpu)[0]
590 = (struct profile_hit *)page_address(page);
592 return 0;
593 out_cleanup:
594 prof_on = 0;
595 smp_mb();
596 on_each_cpu(profile_nop, NULL, 1);
597 for_each_online_cpu(cpu) {
598 struct page *page;
600 if (per_cpu(cpu_profile_hits, cpu)[0]) {
601 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
602 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
603 __free_page(page);
605 if (per_cpu(cpu_profile_hits, cpu)[1]) {
606 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
607 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
608 __free_page(page);
611 return -1;
613 #else
614 #define create_hash_tables() ({ 0; })
615 #endif
617 int create_proc_profile(void)
619 struct proc_dir_entry *entry;
621 if (!prof_on)
622 return 0;
623 if (create_hash_tables())
624 return -ENOMEM;
625 entry = proc_create("profile", S_IWUSR | S_IRUGO,
626 NULL, &proc_profile_operations);
627 if (!entry)
628 return 0;
629 entry->size = (1+prof_len) * sizeof(atomic_t);
630 hotcpu_notifier(profile_cpu_callback, 0);
631 return 0;
633 module_init(create_proc_profile);
634 #endif /* CONFIG_PROC_FS */