powerpc/pmac: Add missing unlocks in error path
[linux-2.6/next.git] / kernel / trace / trace_ksym.c
blobd59cd687947731c1c27de38947f3e0ee057e33f0
1 /*
2 * trace_ksym.c - Kernel Symbol Tracer
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2009
21 #include <linux/kallsyms.h>
22 #include <linux/uaccess.h>
23 #include <linux/debugfs.h>
24 #include <linux/ftrace.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/fs.h>
29 #include "trace_output.h"
30 #include "trace.h"
32 #include <linux/hw_breakpoint.h>
33 #include <asm/hw_breakpoint.h>
35 #include <asm/atomic.h>
38 * For now, let us restrict the no. of symbols traced simultaneously to number
39 * of available hardware breakpoint registers.
41 #define KSYM_TRACER_MAX HBP_NUM
43 #define KSYM_TRACER_OP_LEN 3 /* rw- */
45 struct trace_ksym {
46 struct perf_event **ksym_hbp;
47 struct perf_event_attr attr;
48 #ifdef CONFIG_PROFILE_KSYM_TRACER
49 atomic64_t counter;
50 #endif
51 struct hlist_node ksym_hlist;
54 static struct trace_array *ksym_trace_array;
56 static unsigned int ksym_filter_entry_count;
57 static unsigned int ksym_tracing_enabled;
59 static HLIST_HEAD(ksym_filter_head);
61 static DEFINE_MUTEX(ksym_tracer_mutex);
63 #ifdef CONFIG_PROFILE_KSYM_TRACER
65 #define MAX_UL_INT 0xffffffff
67 void ksym_collect_stats(unsigned long hbp_hit_addr)
69 struct hlist_node *node;
70 struct trace_ksym *entry;
72 rcu_read_lock();
73 hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
74 if (entry->attr.bp_addr == hbp_hit_addr) {
75 atomic64_inc(&entry->counter);
76 break;
79 rcu_read_unlock();
81 #endif /* CONFIG_PROFILE_KSYM_TRACER */
83 void ksym_hbp_handler(struct perf_event *hbp, int nmi,
84 struct perf_sample_data *data,
85 struct pt_regs *regs)
87 struct ring_buffer_event *event;
88 struct ksym_trace_entry *entry;
89 struct ring_buffer *buffer;
90 int pc;
92 if (!ksym_tracing_enabled)
93 return;
95 buffer = ksym_trace_array->buffer;
97 pc = preempt_count();
99 event = trace_buffer_lock_reserve(buffer, TRACE_KSYM,
100 sizeof(*entry), 0, pc);
101 if (!event)
102 return;
104 entry = ring_buffer_event_data(event);
105 entry->ip = instruction_pointer(regs);
106 entry->type = hw_breakpoint_type(hbp);
107 entry->addr = hw_breakpoint_addr(hbp);
108 strlcpy(entry->cmd, current->comm, TASK_COMM_LEN);
110 #ifdef CONFIG_PROFILE_KSYM_TRACER
111 ksym_collect_stats(hw_breakpoint_addr(hbp));
112 #endif /* CONFIG_PROFILE_KSYM_TRACER */
114 trace_buffer_unlock_commit(buffer, event, 0, pc);
117 /* Valid access types are represented as
119 * rw- : Set Read/Write Access Breakpoint
120 * -w- : Set Write Access Breakpoint
121 * --- : Clear Breakpoints
122 * --x : Set Execution Break points (Not available yet)
125 static int ksym_trace_get_access_type(char *str)
127 int access = 0;
129 if (str[0] == 'r')
130 access |= HW_BREAKPOINT_R;
132 if (str[1] == 'w')
133 access |= HW_BREAKPOINT_W;
135 if (str[2] == 'x')
136 access |= HW_BREAKPOINT_X;
138 switch (access) {
139 case HW_BREAKPOINT_R:
140 case HW_BREAKPOINT_W:
141 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
142 return access;
143 default:
144 return -EINVAL;
149 * There can be several possible malformed requests and we attempt to capture
150 * all of them. We enumerate some of the rules
151 * 1. We will not allow kernel symbols with ':' since it is used as a delimiter.
152 * i.e. multiple ':' symbols disallowed. Possible uses are of the form
153 * <module>:<ksym_name>:<op>.
154 * 2. No delimiter symbol ':' in the input string
155 * 3. Spurious operator symbols or symbols not in their respective positions
156 * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file
157 * 5. Kernel symbol not a part of /proc/kallsyms
158 * 6. Duplicate requests
160 static int parse_ksym_trace_str(char *input_string, char **ksymname,
161 unsigned long *addr)
163 int ret;
165 *ksymname = strsep(&input_string, ":");
166 *addr = kallsyms_lookup_name(*ksymname);
168 /* Check for malformed request: (2), (1) and (5) */
169 if ((!input_string) ||
170 (strlen(input_string) != KSYM_TRACER_OP_LEN) ||
171 (*addr == 0))
172 return -EINVAL;;
174 ret = ksym_trace_get_access_type(input_string);
176 return ret;
179 int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
181 struct trace_ksym *entry;
182 int ret = -ENOMEM;
184 if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
185 printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
186 " new requests for tracing can be accepted now.\n",
187 KSYM_TRACER_MAX);
188 return -ENOSPC;
191 entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
192 if (!entry)
193 return -ENOMEM;
195 hw_breakpoint_init(&entry->attr);
197 entry->attr.bp_type = op;
198 entry->attr.bp_addr = addr;
199 entry->attr.bp_len = HW_BREAKPOINT_LEN_4;
201 entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr,
202 ksym_hbp_handler);
204 if (IS_ERR(entry->ksym_hbp)) {
205 ret = PTR_ERR(entry->ksym_hbp);
206 printk(KERN_INFO "ksym_tracer request failed. Try again"
207 " later!!\n");
208 goto err;
211 hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
212 ksym_filter_entry_count++;
214 return 0;
216 err:
217 kfree(entry);
219 return ret;
222 static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf,
223 size_t count, loff_t *ppos)
225 struct trace_ksym *entry;
226 struct hlist_node *node;
227 struct trace_seq *s;
228 ssize_t cnt = 0;
229 int ret;
231 s = kmalloc(sizeof(*s), GFP_KERNEL);
232 if (!s)
233 return -ENOMEM;
234 trace_seq_init(s);
236 mutex_lock(&ksym_tracer_mutex);
238 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
239 ret = trace_seq_printf(s, "%pS:",
240 (void *)(unsigned long)entry->attr.bp_addr);
241 if (entry->attr.bp_type == HW_BREAKPOINT_R)
242 ret = trace_seq_puts(s, "r--\n");
243 else if (entry->attr.bp_type == HW_BREAKPOINT_W)
244 ret = trace_seq_puts(s, "-w-\n");
245 else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R))
246 ret = trace_seq_puts(s, "rw-\n");
247 WARN_ON_ONCE(!ret);
250 cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
252 mutex_unlock(&ksym_tracer_mutex);
254 kfree(s);
256 return cnt;
259 static void __ksym_trace_reset(void)
261 struct trace_ksym *entry;
262 struct hlist_node *node, *node1;
264 mutex_lock(&ksym_tracer_mutex);
265 hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
266 ksym_hlist) {
267 unregister_wide_hw_breakpoint(entry->ksym_hbp);
268 ksym_filter_entry_count--;
269 hlist_del_rcu(&(entry->ksym_hlist));
270 synchronize_rcu();
271 kfree(entry);
273 mutex_unlock(&ksym_tracer_mutex);
276 static ssize_t ksym_trace_filter_write(struct file *file,
277 const char __user *buffer,
278 size_t count, loff_t *ppos)
280 struct trace_ksym *entry;
281 struct hlist_node *node;
282 char *buf, *input_string, *ksymname = NULL;
283 unsigned long ksym_addr = 0;
284 int ret, op, changed = 0;
286 buf = kzalloc(count + 1, GFP_KERNEL);
287 if (!buf)
288 return -ENOMEM;
290 ret = -EFAULT;
291 if (copy_from_user(buf, buffer, count))
292 goto out;
294 buf[count] = '\0';
295 input_string = strstrip(buf);
298 * Clear all breakpoints if:
299 * 1: echo > ksym_trace_filter
300 * 2: echo 0 > ksym_trace_filter
301 * 3: echo "*:---" > ksym_trace_filter
303 if (!input_string[0] || !strcmp(input_string, "0") ||
304 !strcmp(input_string, "*:---")) {
305 __ksym_trace_reset();
306 ret = 0;
307 goto out;
310 ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr);
311 if (ret < 0)
312 goto out;
314 mutex_lock(&ksym_tracer_mutex);
316 ret = -EINVAL;
317 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
318 if (entry->attr.bp_addr == ksym_addr) {
319 /* Check for malformed request: (6) */
320 if (entry->attr.bp_type != op)
321 changed = 1;
322 else
323 goto out_unlock;
324 break;
327 if (changed) {
328 unregister_wide_hw_breakpoint(entry->ksym_hbp);
329 entry->attr.bp_type = op;
330 ret = 0;
331 if (op > 0) {
332 entry->ksym_hbp =
333 register_wide_hw_breakpoint(&entry->attr,
334 ksym_hbp_handler);
335 if (IS_ERR(entry->ksym_hbp))
336 ret = PTR_ERR(entry->ksym_hbp);
337 else
338 goto out_unlock;
340 /* Error or "symbol:---" case: drop it */
341 ksym_filter_entry_count--;
342 hlist_del_rcu(&(entry->ksym_hlist));
343 synchronize_rcu();
344 kfree(entry);
345 goto out_unlock;
346 } else {
347 /* Check for malformed request: (4) */
348 if (op)
349 ret = process_new_ksym_entry(ksymname, op, ksym_addr);
351 out_unlock:
352 mutex_unlock(&ksym_tracer_mutex);
353 out:
354 kfree(buf);
355 return !ret ? count : ret;
358 static const struct file_operations ksym_tracing_fops = {
359 .open = tracing_open_generic,
360 .read = ksym_trace_filter_read,
361 .write = ksym_trace_filter_write,
364 static void ksym_trace_reset(struct trace_array *tr)
366 ksym_tracing_enabled = 0;
367 __ksym_trace_reset();
370 static int ksym_trace_init(struct trace_array *tr)
372 int cpu, ret = 0;
374 for_each_online_cpu(cpu)
375 tracing_reset(tr, cpu);
376 ksym_tracing_enabled = 1;
377 ksym_trace_array = tr;
379 return ret;
382 static void ksym_trace_print_header(struct seq_file *m)
384 seq_puts(m,
385 "# TASK-PID CPU# Symbol "
386 "Type Function\n");
387 seq_puts(m,
388 "# | | | "
389 " | |\n");
392 static enum print_line_t ksym_trace_output(struct trace_iterator *iter)
394 struct trace_entry *entry = iter->ent;
395 struct trace_seq *s = &iter->seq;
396 struct ksym_trace_entry *field;
397 char str[KSYM_SYMBOL_LEN];
398 int ret;
400 if (entry->type != TRACE_KSYM)
401 return TRACE_TYPE_UNHANDLED;
403 trace_assign_type(field, entry);
405 ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd,
406 entry->pid, iter->cpu, (char *)field->addr);
407 if (!ret)
408 return TRACE_TYPE_PARTIAL_LINE;
410 switch (field->type) {
411 case HW_BREAKPOINT_R:
412 ret = trace_seq_printf(s, " R ");
413 break;
414 case HW_BREAKPOINT_W:
415 ret = trace_seq_printf(s, " W ");
416 break;
417 case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
418 ret = trace_seq_printf(s, " RW ");
419 break;
420 default:
421 return TRACE_TYPE_PARTIAL_LINE;
424 if (!ret)
425 return TRACE_TYPE_PARTIAL_LINE;
427 sprint_symbol(str, field->ip);
428 ret = trace_seq_printf(s, "%s\n", str);
429 if (!ret)
430 return TRACE_TYPE_PARTIAL_LINE;
432 return TRACE_TYPE_HANDLED;
435 struct tracer ksym_tracer __read_mostly =
437 .name = "ksym_tracer",
438 .init = ksym_trace_init,
439 .reset = ksym_trace_reset,
440 #ifdef CONFIG_FTRACE_SELFTEST
441 .selftest = trace_selftest_startup_ksym,
442 #endif
443 .print_header = ksym_trace_print_header,
444 .print_line = ksym_trace_output
447 #ifdef CONFIG_PROFILE_KSYM_TRACER
448 static int ksym_profile_show(struct seq_file *m, void *v)
450 struct hlist_node *node;
451 struct trace_ksym *entry;
452 int access_type = 0;
453 char fn_name[KSYM_NAME_LEN];
455 seq_puts(m, " Access Type ");
456 seq_puts(m, " Symbol Counter\n");
457 seq_puts(m, " ----------- ");
458 seq_puts(m, " ------ -------\n");
460 rcu_read_lock();
461 hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
463 access_type = entry->attr.bp_type;
465 switch (access_type) {
466 case HW_BREAKPOINT_R:
467 seq_puts(m, " R ");
468 break;
469 case HW_BREAKPOINT_W:
470 seq_puts(m, " W ");
471 break;
472 case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
473 seq_puts(m, " RW ");
474 break;
475 default:
476 seq_puts(m, " NA ");
479 if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0)
480 seq_printf(m, " %-36s", fn_name);
481 else
482 seq_printf(m, " %-36s", "<NA>");
483 seq_printf(m, " %15llu\n",
484 (unsigned long long)atomic64_read(&entry->counter));
486 rcu_read_unlock();
488 return 0;
491 static int ksym_profile_open(struct inode *node, struct file *file)
493 return single_open(file, ksym_profile_show, NULL);
496 static const struct file_operations ksym_profile_fops = {
497 .open = ksym_profile_open,
498 .read = seq_read,
499 .llseek = seq_lseek,
500 .release = single_release,
502 #endif /* CONFIG_PROFILE_KSYM_TRACER */
504 __init static int init_ksym_trace(void)
506 struct dentry *d_tracer;
508 d_tracer = tracing_init_dentry();
510 trace_create_file("ksym_trace_filter", 0644, d_tracer,
511 NULL, &ksym_tracing_fops);
513 #ifdef CONFIG_PROFILE_KSYM_TRACER
514 trace_create_file("ksym_profile", 0444, d_tracer,
515 NULL, &ksym_profile_fops);
516 #endif
518 return register_tracer(&ksym_tracer);
520 device_initcall(init_ksym_trace);