Linux 2.6.26.3
[linux/fpc-iii.git] / kernel / marker.c
blob39e7596c3d9514988bf441f23a7ed9bb89e38a21
1 /*
2 * Copyright (C) 2007 Mathieu Desnoyers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/types.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/rcupdate.h>
24 #include <linux/marker.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
28 extern struct marker __start___markers[];
29 extern struct marker __stop___markers[];
31 /* Set to 1 to enable marker debug output */
32 static const int marker_debug;
35 * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
36 * and module markers and the hash table.
38 static DEFINE_MUTEX(markers_mutex);
41 * Marker hash table, containing the active markers.
42 * Protected by module_mutex.
44 #define MARKER_HASH_BITS 6
45 #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
48 * Note about RCU :
49 * It is used to make sure every handler has finished using its private data
50 * between two consecutive operation (add or remove) on a given marker. It is
51 * also used to delay the free of multiple probes array until a quiescent state
52 * is reached.
53 * marker entries modifications are protected by the markers_mutex.
55 struct marker_entry {
56 struct hlist_node hlist;
57 char *format;
58 void (*call)(const struct marker *mdata, /* Probe wrapper */
59 void *call_private, const char *fmt, ...);
60 struct marker_probe_closure single;
61 struct marker_probe_closure *multi;
62 int refcount; /* Number of times armed. 0 if disarmed. */
63 struct rcu_head rcu;
64 void *oldptr;
65 unsigned char rcu_pending:1;
66 unsigned char ptype:1;
67 char name[0]; /* Contains name'\0'format'\0' */
70 static struct hlist_head marker_table[MARKER_TABLE_SIZE];
72 /**
73 * __mark_empty_function - Empty probe callback
74 * @probe_private: probe private data
75 * @call_private: call site private data
76 * @fmt: format string
77 * @...: variable argument list
79 * Empty callback provided as a probe to the markers. By providing this to a
80 * disabled marker, we make sure the execution flow is always valid even
81 * though the function pointer change and the marker enabling are two distinct
82 * operations that modifies the execution flow of preemptible code.
84 void __mark_empty_function(void *probe_private, void *call_private,
85 const char *fmt, va_list *args)
88 EXPORT_SYMBOL_GPL(__mark_empty_function);
91 * marker_probe_cb Callback that prepares the variable argument list for probes.
92 * @mdata: pointer of type struct marker
93 * @call_private: caller site private data
94 * @fmt: format string
95 * @...: Variable argument list.
97 * Since we do not use "typical" pointer based RCU in the 1 argument case, we
98 * need to put a full smp_rmb() in this branch. This is why we do not use
99 * rcu_dereference() for the pointer read.
101 void marker_probe_cb(const struct marker *mdata, void *call_private,
102 const char *fmt, ...)
104 va_list args;
105 char ptype;
108 * preempt_disable does two things : disabling preemption to make sure
109 * the teardown of the callbacks can be done correctly when they are in
110 * modules and they insure RCU read coherency.
112 preempt_disable();
113 ptype = mdata->ptype;
114 if (likely(!ptype)) {
115 marker_probe_func *func;
116 /* Must read the ptype before ptr. They are not data dependant,
117 * so we put an explicit smp_rmb() here. */
118 smp_rmb();
119 func = mdata->single.func;
120 /* Must read the ptr before private data. They are not data
121 * dependant, so we put an explicit smp_rmb() here. */
122 smp_rmb();
123 va_start(args, fmt);
124 func(mdata->single.probe_private, call_private, fmt, &args);
125 va_end(args);
126 } else {
127 struct marker_probe_closure *multi;
128 int i;
130 * Read mdata->ptype before mdata->multi.
132 smp_rmb();
133 multi = mdata->multi;
135 * multi points to an array, therefore accessing the array
136 * depends on reading multi. However, even in this case,
137 * we must insure that the pointer is read _before_ the array
138 * data. Same as rcu_dereference, but we need a full smp_rmb()
139 * in the fast path, so put the explicit barrier here.
141 smp_read_barrier_depends();
142 for (i = 0; multi[i].func; i++) {
143 va_start(args, fmt);
144 multi[i].func(multi[i].probe_private, call_private, fmt,
145 &args);
146 va_end(args);
149 preempt_enable();
151 EXPORT_SYMBOL_GPL(marker_probe_cb);
154 * marker_probe_cb Callback that does not prepare the variable argument list.
155 * @mdata: pointer of type struct marker
156 * @call_private: caller site private data
157 * @fmt: format string
158 * @...: Variable argument list.
160 * Should be connected to markers "MARK_NOARGS".
162 void marker_probe_cb_noarg(const struct marker *mdata,
163 void *call_private, const char *fmt, ...)
165 va_list args; /* not initialized */
166 char ptype;
168 preempt_disable();
169 ptype = mdata->ptype;
170 if (likely(!ptype)) {
171 marker_probe_func *func;
172 /* Must read the ptype before ptr. They are not data dependant,
173 * so we put an explicit smp_rmb() here. */
174 smp_rmb();
175 func = mdata->single.func;
176 /* Must read the ptr before private data. They are not data
177 * dependant, so we put an explicit smp_rmb() here. */
178 smp_rmb();
179 func(mdata->single.probe_private, call_private, fmt, &args);
180 } else {
181 struct marker_probe_closure *multi;
182 int i;
184 * Read mdata->ptype before mdata->multi.
186 smp_rmb();
187 multi = mdata->multi;
189 * multi points to an array, therefore accessing the array
190 * depends on reading multi. However, even in this case,
191 * we must insure that the pointer is read _before_ the array
192 * data. Same as rcu_dereference, but we need a full smp_rmb()
193 * in the fast path, so put the explicit barrier here.
195 smp_read_barrier_depends();
196 for (i = 0; multi[i].func; i++)
197 multi[i].func(multi[i].probe_private, call_private, fmt,
198 &args);
200 preempt_enable();
202 EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
204 static void free_old_closure(struct rcu_head *head)
206 struct marker_entry *entry = container_of(head,
207 struct marker_entry, rcu);
208 kfree(entry->oldptr);
209 /* Make sure we free the data before setting the pending flag to 0 */
210 smp_wmb();
211 entry->rcu_pending = 0;
214 static void debug_print_probes(struct marker_entry *entry)
216 int i;
218 if (!marker_debug)
219 return;
221 if (!entry->ptype) {
222 printk(KERN_DEBUG "Single probe : %p %p\n",
223 entry->single.func,
224 entry->single.probe_private);
225 } else {
226 for (i = 0; entry->multi[i].func; i++)
227 printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
228 entry->multi[i].func,
229 entry->multi[i].probe_private);
233 static struct marker_probe_closure *
234 marker_entry_add_probe(struct marker_entry *entry,
235 marker_probe_func *probe, void *probe_private)
237 int nr_probes = 0;
238 struct marker_probe_closure *old, *new;
240 WARN_ON(!probe);
242 debug_print_probes(entry);
243 old = entry->multi;
244 if (!entry->ptype) {
245 if (entry->single.func == probe &&
246 entry->single.probe_private == probe_private)
247 return ERR_PTR(-EBUSY);
248 if (entry->single.func == __mark_empty_function) {
249 /* 0 -> 1 probes */
250 entry->single.func = probe;
251 entry->single.probe_private = probe_private;
252 entry->refcount = 1;
253 entry->ptype = 0;
254 debug_print_probes(entry);
255 return NULL;
256 } else {
257 /* 1 -> 2 probes */
258 nr_probes = 1;
259 old = NULL;
261 } else {
262 /* (N -> N+1), (N != 0, 1) probes */
263 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
264 if (old[nr_probes].func == probe
265 && old[nr_probes].probe_private
266 == probe_private)
267 return ERR_PTR(-EBUSY);
269 /* + 2 : one for new probe, one for NULL func */
270 new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
271 GFP_KERNEL);
272 if (new == NULL)
273 return ERR_PTR(-ENOMEM);
274 if (!old)
275 new[0] = entry->single;
276 else
277 memcpy(new, old,
278 nr_probes * sizeof(struct marker_probe_closure));
279 new[nr_probes].func = probe;
280 new[nr_probes].probe_private = probe_private;
281 entry->refcount = nr_probes + 1;
282 entry->multi = new;
283 entry->ptype = 1;
284 debug_print_probes(entry);
285 return old;
288 static struct marker_probe_closure *
289 marker_entry_remove_probe(struct marker_entry *entry,
290 marker_probe_func *probe, void *probe_private)
292 int nr_probes = 0, nr_del = 0, i;
293 struct marker_probe_closure *old, *new;
295 old = entry->multi;
297 debug_print_probes(entry);
298 if (!entry->ptype) {
299 /* 0 -> N is an error */
300 WARN_ON(entry->single.func == __mark_empty_function);
301 /* 1 -> 0 probes */
302 WARN_ON(probe && entry->single.func != probe);
303 WARN_ON(entry->single.probe_private != probe_private);
304 entry->single.func = __mark_empty_function;
305 entry->refcount = 0;
306 entry->ptype = 0;
307 debug_print_probes(entry);
308 return NULL;
309 } else {
310 /* (N -> M), (N > 1, M >= 0) probes */
311 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
312 if ((!probe || old[nr_probes].func == probe)
313 && old[nr_probes].probe_private
314 == probe_private)
315 nr_del++;
319 if (nr_probes - nr_del == 0) {
320 /* N -> 0, (N > 1) */
321 entry->single.func = __mark_empty_function;
322 entry->refcount = 0;
323 entry->ptype = 0;
324 } else if (nr_probes - nr_del == 1) {
325 /* N -> 1, (N > 1) */
326 for (i = 0; old[i].func; i++)
327 if ((probe && old[i].func != probe) ||
328 old[i].probe_private != probe_private)
329 entry->single = old[i];
330 entry->refcount = 1;
331 entry->ptype = 0;
332 } else {
333 int j = 0;
334 /* N -> M, (N > 1, M > 1) */
335 /* + 1 for NULL */
336 new = kzalloc((nr_probes - nr_del + 1)
337 * sizeof(struct marker_probe_closure), GFP_KERNEL);
338 if (new == NULL)
339 return ERR_PTR(-ENOMEM);
340 for (i = 0; old[i].func; i++)
341 if ((probe && old[i].func != probe) ||
342 old[i].probe_private != probe_private)
343 new[j++] = old[i];
344 entry->refcount = nr_probes - nr_del;
345 entry->ptype = 1;
346 entry->multi = new;
348 debug_print_probes(entry);
349 return old;
353 * Get marker if the marker is present in the marker hash table.
354 * Must be called with markers_mutex held.
355 * Returns NULL if not present.
357 static struct marker_entry *get_marker(const char *name)
359 struct hlist_head *head;
360 struct hlist_node *node;
361 struct marker_entry *e;
362 u32 hash = jhash(name, strlen(name), 0);
364 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
365 hlist_for_each_entry(e, node, head, hlist) {
366 if (!strcmp(name, e->name))
367 return e;
369 return NULL;
373 * Add the marker to the marker hash table. Must be called with markers_mutex
374 * held.
376 static struct marker_entry *add_marker(const char *name, const char *format)
378 struct hlist_head *head;
379 struct hlist_node *node;
380 struct marker_entry *e;
381 size_t name_len = strlen(name) + 1;
382 size_t format_len = 0;
383 u32 hash = jhash(name, name_len-1, 0);
385 if (format)
386 format_len = strlen(format) + 1;
387 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
388 hlist_for_each_entry(e, node, head, hlist) {
389 if (!strcmp(name, e->name)) {
390 printk(KERN_NOTICE
391 "Marker %s busy\n", name);
392 return ERR_PTR(-EBUSY); /* Already there */
396 * Using kmalloc here to allocate a variable length element. Could
397 * cause some memory fragmentation if overused.
399 e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
400 GFP_KERNEL);
401 if (!e)
402 return ERR_PTR(-ENOMEM);
403 memcpy(&e->name[0], name, name_len);
404 if (format) {
405 e->format = &e->name[name_len];
406 memcpy(e->format, format, format_len);
407 if (strcmp(e->format, MARK_NOARGS) == 0)
408 e->call = marker_probe_cb_noarg;
409 else
410 e->call = marker_probe_cb;
411 trace_mark(core_marker_format, "name %s format %s",
412 e->name, e->format);
413 } else {
414 e->format = NULL;
415 e->call = marker_probe_cb;
417 e->single.func = __mark_empty_function;
418 e->single.probe_private = NULL;
419 e->multi = NULL;
420 e->ptype = 0;
421 e->refcount = 0;
422 e->rcu_pending = 0;
423 hlist_add_head(&e->hlist, head);
424 return e;
428 * Remove the marker from the marker hash table. Must be called with mutex_lock
429 * held.
431 static int remove_marker(const char *name)
433 struct hlist_head *head;
434 struct hlist_node *node;
435 struct marker_entry *e;
436 int found = 0;
437 size_t len = strlen(name) + 1;
438 u32 hash = jhash(name, len-1, 0);
440 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
441 hlist_for_each_entry(e, node, head, hlist) {
442 if (!strcmp(name, e->name)) {
443 found = 1;
444 break;
447 if (!found)
448 return -ENOENT;
449 if (e->single.func != __mark_empty_function)
450 return -EBUSY;
451 hlist_del(&e->hlist);
452 /* Make sure the call_rcu has been executed */
453 if (e->rcu_pending)
454 rcu_barrier();
455 kfree(e);
456 return 0;
460 * Set the mark_entry format to the format found in the element.
462 static int marker_set_format(struct marker_entry **entry, const char *format)
464 struct marker_entry *e;
465 size_t name_len = strlen((*entry)->name) + 1;
466 size_t format_len = strlen(format) + 1;
469 e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
470 GFP_KERNEL);
471 if (!e)
472 return -ENOMEM;
473 memcpy(&e->name[0], (*entry)->name, name_len);
474 e->format = &e->name[name_len];
475 memcpy(e->format, format, format_len);
476 if (strcmp(e->format, MARK_NOARGS) == 0)
477 e->call = marker_probe_cb_noarg;
478 else
479 e->call = marker_probe_cb;
480 e->single = (*entry)->single;
481 e->multi = (*entry)->multi;
482 e->ptype = (*entry)->ptype;
483 e->refcount = (*entry)->refcount;
484 e->rcu_pending = 0;
485 hlist_add_before(&e->hlist, &(*entry)->hlist);
486 hlist_del(&(*entry)->hlist);
487 /* Make sure the call_rcu has been executed */
488 if ((*entry)->rcu_pending)
489 rcu_barrier();
490 kfree(*entry);
491 *entry = e;
492 trace_mark(core_marker_format, "name %s format %s",
493 e->name, e->format);
494 return 0;
498 * Sets the probe callback corresponding to one marker.
500 static int set_marker(struct marker_entry **entry, struct marker *elem,
501 int active)
503 int ret;
504 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
506 if ((*entry)->format) {
507 if (strcmp((*entry)->format, elem->format) != 0) {
508 printk(KERN_NOTICE
509 "Format mismatch for probe %s "
510 "(%s), marker (%s)\n",
511 (*entry)->name,
512 (*entry)->format,
513 elem->format);
514 return -EPERM;
516 } else {
517 ret = marker_set_format(entry, elem->format);
518 if (ret)
519 return ret;
523 * probe_cb setup (statically known) is done here. It is
524 * asynchronous with the rest of execution, therefore we only
525 * pass from a "safe" callback (with argument) to an "unsafe"
526 * callback (does not set arguments).
528 elem->call = (*entry)->call;
530 * Sanity check :
531 * We only update the single probe private data when the ptr is
532 * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
534 WARN_ON(elem->single.func != __mark_empty_function
535 && elem->single.probe_private
536 != (*entry)->single.probe_private &&
537 !elem->ptype);
538 elem->single.probe_private = (*entry)->single.probe_private;
540 * Make sure the private data is valid when we update the
541 * single probe ptr.
543 smp_wmb();
544 elem->single.func = (*entry)->single.func;
546 * We also make sure that the new probe callbacks array is consistent
547 * before setting a pointer to it.
549 rcu_assign_pointer(elem->multi, (*entry)->multi);
551 * Update the function or multi probe array pointer before setting the
552 * ptype.
554 smp_wmb();
555 elem->ptype = (*entry)->ptype;
556 elem->state = active;
558 return 0;
562 * Disable a marker and its probe callback.
563 * Note: only waiting an RCU period after setting elem->call to the empty
564 * function insures that the original callback is not used anymore. This insured
565 * by preempt_disable around the call site.
567 static void disable_marker(struct marker *elem)
569 /* leave "call" as is. It is known statically. */
570 elem->state = 0;
571 elem->single.func = __mark_empty_function;
572 /* Update the function before setting the ptype */
573 smp_wmb();
574 elem->ptype = 0; /* single probe */
576 * Leave the private data and id there, because removal is racy and
577 * should be done only after an RCU period. These are never used until
578 * the next initialization anyway.
583 * marker_update_probe_range - Update a probe range
584 * @begin: beginning of the range
585 * @end: end of the range
587 * Updates the probe callback corresponding to a range of markers.
589 void marker_update_probe_range(struct marker *begin,
590 struct marker *end)
592 struct marker *iter;
593 struct marker_entry *mark_entry;
595 mutex_lock(&markers_mutex);
596 for (iter = begin; iter < end; iter++) {
597 mark_entry = get_marker(iter->name);
598 if (mark_entry) {
599 set_marker(&mark_entry, iter,
600 !!mark_entry->refcount);
602 * ignore error, continue
604 } else {
605 disable_marker(iter);
608 mutex_unlock(&markers_mutex);
612 * Update probes, removing the faulty probes.
614 * Internal callback only changed before the first probe is connected to it.
615 * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
616 * transitions. All other transitions will leave the old private data valid.
617 * This makes the non-atomicity of the callback/private data updates valid.
619 * "special case" updates :
620 * 0 -> 1 callback
621 * 1 -> 0 callback
622 * 1 -> 2 callbacks
623 * 2 -> 1 callbacks
624 * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
625 * Site effect : marker_set_format may delete the marker entry (creating a
626 * replacement).
628 static void marker_update_probes(void)
630 /* Core kernel markers */
631 marker_update_probe_range(__start___markers, __stop___markers);
632 /* Markers in modules. */
633 module_update_markers();
637 * marker_probe_register - Connect a probe to a marker
638 * @name: marker name
639 * @format: format string
640 * @probe: probe handler
641 * @probe_private: probe private data
643 * private data must be a valid allocated memory address, or NULL.
644 * Returns 0 if ok, error value on error.
645 * The probe address must at least be aligned on the architecture pointer size.
647 int marker_probe_register(const char *name, const char *format,
648 marker_probe_func *probe, void *probe_private)
650 struct marker_entry *entry;
651 int ret = 0;
652 struct marker_probe_closure *old;
654 mutex_lock(&markers_mutex);
655 entry = get_marker(name);
656 if (!entry) {
657 entry = add_marker(name, format);
658 if (IS_ERR(entry)) {
659 ret = PTR_ERR(entry);
660 goto end;
664 * If we detect that a call_rcu is pending for this marker,
665 * make sure it's executed now.
667 if (entry->rcu_pending)
668 rcu_barrier();
669 old = marker_entry_add_probe(entry, probe, probe_private);
670 if (IS_ERR(old)) {
671 ret = PTR_ERR(old);
672 goto end;
674 mutex_unlock(&markers_mutex);
675 marker_update_probes(); /* may update entry */
676 mutex_lock(&markers_mutex);
677 entry = get_marker(name);
678 WARN_ON(!entry);
679 entry->oldptr = old;
680 entry->rcu_pending = 1;
681 /* write rcu_pending before calling the RCU callback */
682 smp_wmb();
683 #ifdef CONFIG_PREEMPT_RCU
684 synchronize_sched(); /* Until we have the call_rcu_sched() */
685 #endif
686 call_rcu(&entry->rcu, free_old_closure);
687 end:
688 mutex_unlock(&markers_mutex);
689 return ret;
691 EXPORT_SYMBOL_GPL(marker_probe_register);
694 * marker_probe_unregister - Disconnect a probe from a marker
695 * @name: marker name
696 * @probe: probe function pointer
697 * @probe_private: probe private data
699 * Returns the private data given to marker_probe_register, or an ERR_PTR().
700 * We do not need to call a synchronize_sched to make sure the probes have
701 * finished running before doing a module unload, because the module unload
702 * itself uses stop_machine(), which insures that every preempt disabled section
703 * have finished.
705 int marker_probe_unregister(const char *name,
706 marker_probe_func *probe, void *probe_private)
708 struct marker_entry *entry;
709 struct marker_probe_closure *old;
710 int ret = -ENOENT;
712 mutex_lock(&markers_mutex);
713 entry = get_marker(name);
714 if (!entry)
715 goto end;
716 if (entry->rcu_pending)
717 rcu_barrier();
718 old = marker_entry_remove_probe(entry, probe, probe_private);
719 mutex_unlock(&markers_mutex);
720 marker_update_probes(); /* may update entry */
721 mutex_lock(&markers_mutex);
722 entry = get_marker(name);
723 if (!entry)
724 goto end;
725 entry->oldptr = old;
726 entry->rcu_pending = 1;
727 /* write rcu_pending before calling the RCU callback */
728 smp_wmb();
729 #ifdef CONFIG_PREEMPT_RCU
730 synchronize_sched(); /* Until we have the call_rcu_sched() */
731 #endif
732 call_rcu(&entry->rcu, free_old_closure);
733 remove_marker(name); /* Ignore busy error message */
734 ret = 0;
735 end:
736 mutex_unlock(&markers_mutex);
737 return ret;
739 EXPORT_SYMBOL_GPL(marker_probe_unregister);
741 static struct marker_entry *
742 get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
744 struct marker_entry *entry;
745 unsigned int i;
746 struct hlist_head *head;
747 struct hlist_node *node;
749 for (i = 0; i < MARKER_TABLE_SIZE; i++) {
750 head = &marker_table[i];
751 hlist_for_each_entry(entry, node, head, hlist) {
752 if (!entry->ptype) {
753 if (entry->single.func == probe
754 && entry->single.probe_private
755 == probe_private)
756 return entry;
757 } else {
758 struct marker_probe_closure *closure;
759 closure = entry->multi;
760 for (i = 0; closure[i].func; i++) {
761 if (closure[i].func == probe &&
762 closure[i].probe_private
763 == probe_private)
764 return entry;
769 return NULL;
773 * marker_probe_unregister_private_data - Disconnect a probe from a marker
774 * @probe: probe function
775 * @probe_private: probe private data
777 * Unregister a probe by providing the registered private data.
778 * Only removes the first marker found in hash table.
779 * Return 0 on success or error value.
780 * We do not need to call a synchronize_sched to make sure the probes have
781 * finished running before doing a module unload, because the module unload
782 * itself uses stop_machine(), which insures that every preempt disabled section
783 * have finished.
785 int marker_probe_unregister_private_data(marker_probe_func *probe,
786 void *probe_private)
788 struct marker_entry *entry;
789 int ret = 0;
790 struct marker_probe_closure *old;
792 mutex_lock(&markers_mutex);
793 entry = get_marker_from_private_data(probe, probe_private);
794 if (!entry) {
795 ret = -ENOENT;
796 goto end;
798 if (entry->rcu_pending)
799 rcu_barrier();
800 old = marker_entry_remove_probe(entry, NULL, probe_private);
801 mutex_unlock(&markers_mutex);
802 marker_update_probes(); /* may update entry */
803 mutex_lock(&markers_mutex);
804 entry = get_marker_from_private_data(probe, probe_private);
805 WARN_ON(!entry);
806 entry->oldptr = old;
807 entry->rcu_pending = 1;
808 /* write rcu_pending before calling the RCU callback */
809 smp_wmb();
810 #ifdef CONFIG_PREEMPT_RCU
811 synchronize_sched(); /* Until we have the call_rcu_sched() */
812 #endif
813 call_rcu(&entry->rcu, free_old_closure);
814 remove_marker(entry->name); /* Ignore busy error message */
815 end:
816 mutex_unlock(&markers_mutex);
817 return ret;
819 EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
822 * marker_get_private_data - Get a marker's probe private data
823 * @name: marker name
824 * @probe: probe to match
825 * @num: get the nth matching probe's private data
827 * Returns the nth private data pointer (starting from 0) matching, or an
828 * ERR_PTR.
829 * Returns the private data pointer, or an ERR_PTR.
830 * The private data pointer should _only_ be dereferenced if the caller is the
831 * owner of the data, or its content could vanish. This is mostly used to
832 * confirm that a caller is the owner of a registered probe.
834 void *marker_get_private_data(const char *name, marker_probe_func *probe,
835 int num)
837 struct hlist_head *head;
838 struct hlist_node *node;
839 struct marker_entry *e;
840 size_t name_len = strlen(name) + 1;
841 u32 hash = jhash(name, name_len-1, 0);
842 int i;
844 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
845 hlist_for_each_entry(e, node, head, hlist) {
846 if (!strcmp(name, e->name)) {
847 if (!e->ptype) {
848 if (num == 0 && e->single.func == probe)
849 return e->single.probe_private;
850 else
851 break;
852 } else {
853 struct marker_probe_closure *closure;
854 int match = 0;
855 closure = e->multi;
856 for (i = 0; closure[i].func; i++) {
857 if (closure[i].func != probe)
858 continue;
859 if (match++ == num)
860 return closure[i].probe_private;
865 return ERR_PTR(-ENOENT);
867 EXPORT_SYMBOL_GPL(marker_get_private_data);