2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/profile.h>
19 #include <linux/bootmem.h>
20 #include <linux/notifier.h>
22 #include <linux/cpumask.h>
23 #include <linux/cpu.h>
24 #include <linux/profile.h>
25 #include <linux/highmem.h>
26 #include <linux/mutex.h>
27 #include <asm/sections.h>
28 #include <asm/semaphore.h>
33 #define PROFILE_GRPSHIFT 3
34 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
35 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
36 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
38 /* Oprofile timer tick hook */
39 int (*timer_hook
)(struct pt_regs
*) __read_mostly
;
41 static atomic_t
*prof_buffer
;
42 static unsigned long prof_len
, prof_shift
;
43 static int prof_on __read_mostly
;
44 static cpumask_t prof_cpu_mask
= CPU_MASK_ALL
;
46 static DEFINE_PER_CPU(struct profile_hit
*[2], cpu_profile_hits
);
47 static DEFINE_PER_CPU(int, cpu_profile_flip
);
48 static DEFINE_MUTEX(profile_flip_mutex
);
49 #endif /* CONFIG_SMP */
51 static int __init
profile_setup(char * str
)
53 static char __initdata schedstr
[] = "schedule";
56 if (!strncmp(str
, schedstr
, strlen(schedstr
))) {
57 prof_on
= SCHED_PROFILING
;
58 if (str
[strlen(schedstr
)] == ',')
59 str
+= strlen(schedstr
) + 1;
60 if (get_option(&str
, &par
))
63 "kernel schedule profiling enabled (shift: %ld)\n",
65 } else if (get_option(&str
, &par
)) {
67 prof_on
= CPU_PROFILING
;
68 printk(KERN_INFO
"kernel profiling enabled (shift: %ld)\n",
73 __setup("profile=", profile_setup
);
76 void __init
profile_init(void)
81 /* only text is profiled */
82 prof_len
= (_etext
- _stext
) >> prof_shift
;
83 prof_buffer
= alloc_bootmem(prof_len
*sizeof(atomic_t
));
86 /* Profile event notifications */
88 #ifdef CONFIG_PROFILING
90 static DECLARE_RWSEM(profile_rwsem
);
91 static DEFINE_RWLOCK(handoff_lock
);
92 static struct notifier_block
* task_exit_notifier
;
93 static struct notifier_block
* task_free_notifier
;
94 static struct notifier_block
* munmap_notifier
;
96 void profile_task_exit(struct task_struct
* task
)
98 down_read(&profile_rwsem
);
99 notifier_call_chain(&task_exit_notifier
, 0, task
);
100 up_read(&profile_rwsem
);
103 int profile_handoff_task(struct task_struct
* task
)
106 read_lock(&handoff_lock
);
107 ret
= notifier_call_chain(&task_free_notifier
, 0, task
);
108 read_unlock(&handoff_lock
);
109 return (ret
== NOTIFY_OK
) ? 1 : 0;
112 void profile_munmap(unsigned long addr
)
114 down_read(&profile_rwsem
);
115 notifier_call_chain(&munmap_notifier
, 0, (void *)addr
);
116 up_read(&profile_rwsem
);
119 int task_handoff_register(struct notifier_block
* n
)
123 write_lock(&handoff_lock
);
124 err
= notifier_chain_register(&task_free_notifier
, n
);
125 write_unlock(&handoff_lock
);
129 int task_handoff_unregister(struct notifier_block
* n
)
133 write_lock(&handoff_lock
);
134 err
= notifier_chain_unregister(&task_free_notifier
, n
);
135 write_unlock(&handoff_lock
);
139 int profile_event_register(enum profile_type type
, struct notifier_block
* n
)
143 down_write(&profile_rwsem
);
146 case PROFILE_TASK_EXIT
:
147 err
= notifier_chain_register(&task_exit_notifier
, n
);
150 err
= notifier_chain_register(&munmap_notifier
, n
);
154 up_write(&profile_rwsem
);
160 int profile_event_unregister(enum profile_type type
, struct notifier_block
* n
)
164 down_write(&profile_rwsem
);
167 case PROFILE_TASK_EXIT
:
168 err
= notifier_chain_unregister(&task_exit_notifier
, n
);
171 err
= notifier_chain_unregister(&munmap_notifier
, n
);
175 up_write(&profile_rwsem
);
179 int register_timer_hook(int (*hook
)(struct pt_regs
*))
187 void unregister_timer_hook(int (*hook
)(struct pt_regs
*))
189 WARN_ON(hook
!= timer_hook
);
191 /* make sure all CPUs see the NULL hook */
192 synchronize_sched(); /* Allow ongoing interrupts to complete. */
195 EXPORT_SYMBOL_GPL(register_timer_hook
);
196 EXPORT_SYMBOL_GPL(unregister_timer_hook
);
197 EXPORT_SYMBOL_GPL(task_handoff_register
);
198 EXPORT_SYMBOL_GPL(task_handoff_unregister
);
200 #endif /* CONFIG_PROFILING */
202 EXPORT_SYMBOL_GPL(profile_event_register
);
203 EXPORT_SYMBOL_GPL(profile_event_unregister
);
207 * Each cpu has a pair of open-addressed hashtables for pending
208 * profile hits. read_profile() IPI's all cpus to request them
209 * to flip buffers and flushes their contents to prof_buffer itself.
210 * Flip requests are serialized by the profile_flip_mutex. The sole
211 * use of having a second hashtable is for avoiding cacheline
212 * contention that would otherwise happen during flushes of pending
213 * profile hits required for the accuracy of reported profile hits
214 * and so resurrect the interrupt livelock issue.
216 * The open-addressed hashtables are indexed by profile buffer slot
217 * and hold the number of pending hits to that profile buffer slot on
218 * a cpu in an entry. When the hashtable overflows, all pending hits
219 * are accounted to their corresponding profile buffer slots with
220 * atomic_add() and the hashtable emptied. As numerous pending hits
221 * may be accounted to a profile buffer slot in a hashtable entry,
222 * this amortizes a number of atomic profile buffer increments likely
223 * to be far larger than the number of entries in the hashtable,
224 * particularly given that the number of distinct profile buffer
225 * positions to which hits are accounted during short intervals (e.g.
226 * several seconds) is usually very small. Exclusion from buffer
227 * flipping is provided by interrupt disablement (note that for
228 * SCHED_PROFILING profile_hit() may be called from process context).
229 * The hash function is meant to be lightweight as opposed to strong,
230 * and was vaguely inspired by ppc64 firmware-supported inverted
231 * pagetable hash functions, but uses a full hashtable full of finite
232 * collision chains, not just pairs of them.
236 static void __profile_flip_buffers(void *unused
)
238 int cpu
= smp_processor_id();
240 per_cpu(cpu_profile_flip
, cpu
) = !per_cpu(cpu_profile_flip
, cpu
);
243 static void profile_flip_buffers(void)
247 mutex_lock(&profile_flip_mutex
);
248 j
= per_cpu(cpu_profile_flip
, get_cpu());
250 on_each_cpu(__profile_flip_buffers
, NULL
, 0, 1);
251 for_each_online_cpu(cpu
) {
252 struct profile_hit
*hits
= per_cpu(cpu_profile_hits
, cpu
)[j
];
253 for (i
= 0; i
< NR_PROFILE_HIT
; ++i
) {
259 atomic_add(hits
[i
].hits
, &prof_buffer
[hits
[i
].pc
]);
260 hits
[i
].hits
= hits
[i
].pc
= 0;
263 mutex_unlock(&profile_flip_mutex
);
266 static void profile_discard_flip_buffers(void)
270 mutex_lock(&profile_flip_mutex
);
271 i
= per_cpu(cpu_profile_flip
, get_cpu());
273 on_each_cpu(__profile_flip_buffers
, NULL
, 0, 1);
274 for_each_online_cpu(cpu
) {
275 struct profile_hit
*hits
= per_cpu(cpu_profile_hits
, cpu
)[i
];
276 memset(hits
, 0, NR_PROFILE_HIT
*sizeof(struct profile_hit
));
278 mutex_unlock(&profile_flip_mutex
);
281 void profile_hit(int type
, void *__pc
)
283 unsigned long primary
, secondary
, flags
, pc
= (unsigned long)__pc
;
285 struct profile_hit
*hits
;
287 if (prof_on
!= type
|| !prof_buffer
)
289 pc
= min((pc
- (unsigned long)_stext
) >> prof_shift
, prof_len
- 1);
290 i
= primary
= (pc
& (NR_PROFILE_GRP
- 1)) << PROFILE_GRPSHIFT
;
291 secondary
= (~(pc
<< 1) & (NR_PROFILE_GRP
- 1)) << PROFILE_GRPSHIFT
;
293 hits
= per_cpu(cpu_profile_hits
, cpu
)[per_cpu(cpu_profile_flip
, cpu
)];
298 local_irq_save(flags
);
300 for (j
= 0; j
< PROFILE_GRPSZ
; ++j
) {
301 if (hits
[i
+ j
].pc
== pc
) {
304 } else if (!hits
[i
+ j
].hits
) {
306 hits
[i
+ j
].hits
= 1;
310 i
= (i
+ secondary
) & (NR_PROFILE_HIT
- 1);
311 } while (i
!= primary
);
312 atomic_inc(&prof_buffer
[pc
]);
313 for (i
= 0; i
< NR_PROFILE_HIT
; ++i
) {
314 atomic_add(hits
[i
].hits
, &prof_buffer
[hits
[i
].pc
]);
315 hits
[i
].pc
= hits
[i
].hits
= 0;
318 local_irq_restore(flags
);
322 #ifdef CONFIG_HOTPLUG_CPU
323 static int __devinit
profile_cpu_callback(struct notifier_block
*info
,
324 unsigned long action
, void *__cpu
)
326 int node
, cpu
= (unsigned long)__cpu
;
331 node
= cpu_to_node(cpu
);
332 per_cpu(cpu_profile_flip
, cpu
) = 0;
333 if (!per_cpu(cpu_profile_hits
, cpu
)[1]) {
334 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
337 per_cpu(cpu_profile_hits
, cpu
)[1] = page_address(page
);
339 if (!per_cpu(cpu_profile_hits
, cpu
)[0]) {
340 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
343 per_cpu(cpu_profile_hits
, cpu
)[0] = page_address(page
);
347 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[1]);
348 per_cpu(cpu_profile_hits
, cpu
)[1] = NULL
;
352 cpu_set(cpu
, prof_cpu_mask
);
354 case CPU_UP_CANCELED
:
356 cpu_clear(cpu
, prof_cpu_mask
);
357 if (per_cpu(cpu_profile_hits
, cpu
)[0]) {
358 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[0]);
359 per_cpu(cpu_profile_hits
, cpu
)[0] = NULL
;
362 if (per_cpu(cpu_profile_hits
, cpu
)[1]) {
363 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[1]);
364 per_cpu(cpu_profile_hits
, cpu
)[1] = NULL
;
371 #endif /* CONFIG_HOTPLUG_CPU */
372 #else /* !CONFIG_SMP */
373 #define profile_flip_buffers() do { } while (0)
374 #define profile_discard_flip_buffers() do { } while (0)
376 void profile_hit(int type
, void *__pc
)
380 if (prof_on
!= type
|| !prof_buffer
)
382 pc
= ((unsigned long)__pc
- (unsigned long)_stext
) >> prof_shift
;
383 atomic_inc(&prof_buffer
[min(pc
, prof_len
- 1)]);
385 #endif /* !CONFIG_SMP */
387 void profile_tick(int type
, struct pt_regs
*regs
)
389 if (type
== CPU_PROFILING
&& timer_hook
)
391 if (!user_mode(regs
) && cpu_isset(smp_processor_id(), prof_cpu_mask
))
392 profile_hit(type
, (void *)profile_pc(regs
));
395 #ifdef CONFIG_PROC_FS
396 #include <linux/proc_fs.h>
397 #include <asm/uaccess.h>
398 #include <asm/ptrace.h>
400 static int prof_cpu_mask_read_proc (char *page
, char **start
, off_t off
,
401 int count
, int *eof
, void *data
)
403 int len
= cpumask_scnprintf(page
, count
, *(cpumask_t
*)data
);
406 len
+= sprintf(page
+ len
, "\n");
410 static int prof_cpu_mask_write_proc (struct file
*file
, const char __user
*buffer
,
411 unsigned long count
, void *data
)
413 cpumask_t
*mask
= (cpumask_t
*)data
;
414 unsigned long full_count
= count
, err
;
417 err
= cpumask_parse(buffer
, count
, new_value
);
425 void create_prof_cpu_mask(struct proc_dir_entry
*root_irq_dir
)
427 struct proc_dir_entry
*entry
;
429 /* create /proc/irq/prof_cpu_mask */
430 if (!(entry
= create_proc_entry("prof_cpu_mask", 0600, root_irq_dir
)))
433 entry
->data
= (void *)&prof_cpu_mask
;
434 entry
->read_proc
= prof_cpu_mask_read_proc
;
435 entry
->write_proc
= prof_cpu_mask_write_proc
;
439 * This function accesses profiling information. The returned data is
440 * binary: the sampling step and the actual contents of the profile
441 * buffer. Use of the program readprofile is recommended in order to
442 * get meaningful info out of these data.
445 read_profile(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
447 unsigned long p
= *ppos
;
450 unsigned int sample_step
= 1 << prof_shift
;
452 profile_flip_buffers();
453 if (p
>= (prof_len
+1)*sizeof(unsigned int))
455 if (count
> (prof_len
+1)*sizeof(unsigned int) - p
)
456 count
= (prof_len
+1)*sizeof(unsigned int) - p
;
459 while (p
< sizeof(unsigned int) && count
> 0) {
460 put_user(*((char *)(&sample_step
)+p
),buf
);
461 buf
++; p
++; count
--; read
++;
463 pnt
= (char *)prof_buffer
+ p
- sizeof(atomic_t
);
464 if (copy_to_user(buf
,(void *)pnt
,count
))
472 * Writing to /proc/profile resets the counters
474 * Writing a 'profiling multiplier' value into it also re-sets the profiling
475 * interrupt frequency, on architectures that support this.
477 static ssize_t
write_profile(struct file
*file
, const char __user
*buf
,
478 size_t count
, loff_t
*ppos
)
481 extern int setup_profiling_timer (unsigned int multiplier
);
483 if (count
== sizeof(int)) {
484 unsigned int multiplier
;
486 if (copy_from_user(&multiplier
, buf
, sizeof(int)))
489 if (setup_profiling_timer(multiplier
))
493 profile_discard_flip_buffers();
494 memset(prof_buffer
, 0, prof_len
* sizeof(atomic_t
));
498 static struct file_operations proc_profile_operations
= {
499 .read
= read_profile
,
500 .write
= write_profile
,
504 static void __init
profile_nop(void *unused
)
508 static int __init
create_hash_tables(void)
512 for_each_online_cpu(cpu
) {
513 int node
= cpu_to_node(cpu
);
516 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
519 per_cpu(cpu_profile_hits
, cpu
)[1]
520 = (struct profile_hit
*)page_address(page
);
521 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
524 per_cpu(cpu_profile_hits
, cpu
)[0]
525 = (struct profile_hit
*)page_address(page
);
531 on_each_cpu(profile_nop
, NULL
, 0, 1);
532 for_each_online_cpu(cpu
) {
535 if (per_cpu(cpu_profile_hits
, cpu
)[0]) {
536 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[0]);
537 per_cpu(cpu_profile_hits
, cpu
)[0] = NULL
;
540 if (per_cpu(cpu_profile_hits
, cpu
)[1]) {
541 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[1]);
542 per_cpu(cpu_profile_hits
, cpu
)[1] = NULL
;
549 #define create_hash_tables() ({ 0; })
552 static int __init
create_proc_profile(void)
554 struct proc_dir_entry
*entry
;
558 if (create_hash_tables())
560 if (!(entry
= create_proc_entry("profile", S_IWUSR
| S_IRUGO
, NULL
)))
562 entry
->proc_fops
= &proc_profile_operations
;
563 entry
->size
= (1+prof_len
) * sizeof(atomic_t
);
564 hotcpu_notifier(profile_cpu_callback
, 0);
567 module_init(create_proc_profile
);
568 #endif /* CONFIG_PROC_FS */