4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
19 * Copyright (C) 2007-2008 Intel Corporation.
20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
26 #include <linux/errno.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
34 * The configuration for a particular DS hardware implementation.
36 struct ds_configuration
{
37 /* the size of the DS structure in bytes */
38 unsigned char sizeof_ds
;
39 /* the size of one pointer-typed field in the DS structure in bytes;
40 this covers the first 8 fields related to buffer management. */
41 unsigned char sizeof_field
;
42 /* the size of a BTS/PEBS record in bytes */
43 unsigned char sizeof_rec
[2];
45 static struct ds_configuration ds_cfg
;
49 * Debug Store (DS) save area configuration (see Intel64 and IA32
50 * Architectures Software Developer's Manual, section 18.5)
52 * The DS configuration consists of the following fields; different
53 * architetures vary in the size of those fields.
54 * - double-word aligned base linear address of the BTS buffer
55 * - write pointer into the BTS buffer
56 * - end linear address of the BTS buffer (one byte beyond the end of
58 * - interrupt pointer into BTS buffer
59 * (interrupt occurs when write pointer passes interrupt pointer)
60 * - double-word aligned base linear address of the PEBS buffer
61 * - write pointer into the PEBS buffer
62 * - end linear address of the PEBS buffer (one byte beyond the end of
64 * - interrupt pointer into PEBS buffer
65 * (interrupt occurs when write pointer passes interrupt pointer)
66 * - value to which counter is reset following counter overflow
68 * Later architectures use 64bit pointers throughout, whereas earlier
69 * architectures use 32bit pointers in 32bit mode.
72 * We compute the base address for the first 8 fields based on:
73 * - the field size stored in the DS configuration
74 * - the relative field position
75 * - an offset giving the start of the respective region
77 * This offset is further used to index various arrays holding
78 * information for BTS and PEBS at the respective index.
80 * On later 32bit processors, we only access the lower 32bit of the
81 * 64bit pointer fields. The upper halves will be zeroed out.
88 ds_interrupt_threshold
,
96 static inline unsigned long ds_get(const unsigned char *base
,
97 enum ds_qualifier qual
, enum ds_field field
)
99 base
+= (ds_cfg
.sizeof_field
* (field
+ (4 * qual
)));
100 return *(unsigned long *)base
;
103 static inline void ds_set(unsigned char *base
, enum ds_qualifier qual
,
104 enum ds_field field
, unsigned long value
)
106 base
+= (ds_cfg
.sizeof_field
* (field
+ (4 * qual
)));
107 (*(unsigned long *)base
) = value
;
112 * Locking is done only for allocating BTS or PEBS resources and for
113 * guarding context and buffer memory allocation.
115 * Most functions require the current task to own the ds context part
116 * they are going to access. All the locking is done when validating
117 * access to the context.
119 static spinlock_t ds_lock
= __SPIN_LOCK_UNLOCKED(ds_lock
);
122 * Validate that the current task is allowed to access the BTS/PEBS
123 * buffer of the parameter task.
125 * Returns 0, if access is granted; -Eerrno, otherwise.
127 static inline int ds_validate_access(struct ds_context
*context
,
128 enum ds_qualifier qual
)
133 if (context
->owner
[qual
] == current
)
141 * We either support (system-wide) per-cpu or per-thread allocation.
142 * We distinguish the two based on the task_struct pointer, where a
143 * NULL pointer indicates per-cpu allocation for the current cpu.
145 * Allocations are use-counted. As soon as resources are allocated,
146 * further allocations must be of the same type (per-cpu or
147 * per-thread). We model this by counting allocations (i.e. the number
148 * of tracers of a certain type) for one type negatively:
150 * >0 number of per-thread tracers
151 * <0 number of per-cpu tracers
153 * The below functions to get and put tracers and to check the
154 * allocation type require the ds_lock to be held by the caller.
156 * Tracers essentially gives the number of ds contexts for a certain
157 * type of allocation.
161 static inline void get_tracer(struct task_struct
*task
)
163 tracers
+= (task
? 1 : -1);
166 static inline void put_tracer(struct task_struct
*task
)
168 tracers
-= (task
? 1 : -1);
171 static inline int check_tracer(struct task_struct
*task
)
173 return (task
? (tracers
>= 0) : (tracers
<= 0));
178 * The DS context is either attached to a thread or to a cpu:
179 * - in the former case, the thread_struct contains a pointer to the
181 * - in the latter case, we use a static array of per-cpu context
184 * Contexts are use-counted. They are allocated on first access and
185 * deallocated when the last user puts the context.
187 * We distinguish between an allocating and a non-allocating get of a
189 * - the allocating get is used for requesting BTS/PEBS resources. It
190 * requires the caller to hold the global ds_lock.
191 * - the non-allocating get is used for all other cases. A
192 * non-existing context indicates an error. It acquires and releases
193 * the ds_lock itself for obtaining the context.
195 * A context and its DS configuration are allocated and deallocated
196 * together. A context always has a DS configuration of the
199 static DEFINE_PER_CPU(struct ds_context
*, system_context
);
201 #define this_system_context per_cpu(system_context, smp_processor_id())
204 * Returns the pointer to the parameter task's context or to the
205 * system-wide context, if task is NULL.
207 * Increases the use count of the returned context, if not NULL.
209 static inline struct ds_context
*ds_get_context(struct task_struct
*task
)
211 struct ds_context
*context
;
214 spin_lock_irqsave(&ds_lock
, irq
);
216 context
= (task
? task
->thread
.ds_ctx
: this_system_context
);
220 spin_unlock_irqrestore(&ds_lock
, irq
);
226 * Same as ds_get_context, but allocates the context and it's DS
227 * structure, if necessary; returns NULL; if out of memory.
229 static inline struct ds_context
*ds_alloc_context(struct task_struct
*task
)
231 struct ds_context
**p_context
=
232 (task
? &task
->thread
.ds_ctx
: &this_system_context
);
233 struct ds_context
*context
= *p_context
;
237 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
241 context
->ds
= kzalloc(ds_cfg
.sizeof_ds
, GFP_KERNEL
);
247 spin_lock_irqsave(&ds_lock
, irq
);
253 context
= *p_context
;
255 *p_context
= context
;
257 context
->this = p_context
;
258 context
->task
= task
;
261 set_tsk_thread_flag(task
, TIF_DS_AREA_MSR
);
263 if (!task
|| (task
== current
))
264 wrmsrl(MSR_IA32_DS_AREA
,
265 (unsigned long)context
->ds
);
267 spin_unlock_irqrestore(&ds_lock
, irq
);
276 * Decreases the use count of the parameter context, if not NULL.
277 * Deallocates the context, if the use count reaches zero.
279 static inline void ds_put_context(struct ds_context
*context
)
286 spin_lock_irqsave(&ds_lock
, irq
);
288 if (--context
->count
)
291 *(context
->this) = NULL
;
294 clear_tsk_thread_flag(context
->task
, TIF_DS_AREA_MSR
);
296 if (!context
->task
|| (context
->task
== current
))
297 wrmsrl(MSR_IA32_DS_AREA
, 0);
299 put_tracer(context
->task
);
301 /* free any leftover buffers from tracers that did not
302 * deallocate them properly. */
303 kfree(context
->buffer
[ds_bts
]);
304 kfree(context
->buffer
[ds_pebs
]);
308 spin_unlock_irqrestore(&ds_lock
, irq
);
313 * Handle a buffer overflow
315 * task: the task whose buffers are overflowing;
316 * NULL for a buffer overflow on the current cpu
317 * context: the ds context
318 * qual: the buffer type
320 static void ds_overflow(struct task_struct
*task
, struct ds_context
*context
,
321 enum ds_qualifier qual
)
326 if (context
->callback
[qual
])
327 (*context
->callback
[qual
])(task
);
329 /* todo: do some more overflow handling */
334 * Allocate a non-pageable buffer of the parameter size.
335 * Checks the memory and the locked memory rlimit.
337 * Returns the buffer, if successful;
338 * NULL, if out of memory or rlimit exceeded.
340 * size: the requested buffer size in bytes
341 * pages (out): if not NULL, contains the number of pages reserved
343 static inline void *ds_allocate_buffer(size_t size
, unsigned int *pages
)
345 unsigned long rlim
, vm
, pgsz
;
348 pgsz
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
350 rlim
= current
->signal
->rlim
[RLIMIT_AS
].rlim_cur
>> PAGE_SHIFT
;
351 vm
= current
->mm
->total_vm
+ pgsz
;
355 rlim
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
>> PAGE_SHIFT
;
356 vm
= current
->mm
->locked_vm
+ pgsz
;
360 buffer
= kzalloc(size
, GFP_KERNEL
);
364 current
->mm
->total_vm
+= pgsz
;
365 current
->mm
->locked_vm
+= pgsz
;
373 static int ds_request(struct task_struct
*task
, void *base
, size_t size
,
374 ds_ovfl_callback_t ovfl
, enum ds_qualifier qual
)
376 struct ds_context
*context
;
377 unsigned long buffer
, adj
;
378 const unsigned long alignment
= (1 << 3);
382 if (!ds_cfg
.sizeof_ds
)
385 /* we require some space to do alignment adjustments below */
386 if (size
< (alignment
+ ds_cfg
.sizeof_rec
[qual
]))
389 /* buffer overflow notification is not yet implemented */
394 context
= ds_alloc_context(task
);
398 spin_lock_irqsave(&ds_lock
, irq
);
401 if (!check_tracer(task
))
407 if (context
->owner
[qual
] == current
)
410 if (context
->owner
[qual
] != NULL
)
412 context
->owner
[qual
] = current
;
414 spin_unlock_irqrestore(&ds_lock
, irq
);
419 base
= ds_allocate_buffer(size
, &context
->pages
[qual
]);
423 context
->buffer
[qual
] = base
;
427 context
->callback
[qual
] = ovfl
;
429 /* adjust the buffer address and size to meet alignment
431 * - buffer is double-word aligned
432 * - size is multiple of record size
434 * We checked the size at the very beginning; we have enough
435 * space to do the adjustment.
437 buffer
= (unsigned long)base
;
439 adj
= ALIGN(buffer
, alignment
) - buffer
;
443 size
/= ds_cfg
.sizeof_rec
[qual
];
444 size
*= ds_cfg
.sizeof_rec
[qual
];
446 ds_set(context
->ds
, qual
, ds_buffer_base
, buffer
);
447 ds_set(context
->ds
, qual
, ds_index
, buffer
);
448 ds_set(context
->ds
, qual
, ds_absolute_maximum
, buffer
+ size
);
451 /* todo: select a suitable interrupt threshold */
453 ds_set(context
->ds
, qual
,
454 ds_interrupt_threshold
, buffer
+ size
+ 1);
456 /* we keep the context until ds_release */
460 context
->owner
[qual
] = NULL
;
461 ds_put_context(context
);
466 spin_unlock_irqrestore(&ds_lock
, irq
);
467 ds_put_context(context
);
472 spin_unlock_irqrestore(&ds_lock
, irq
);
473 ds_put_context(context
);
477 int ds_request_bts(struct task_struct
*task
, void *base
, size_t size
,
478 ds_ovfl_callback_t ovfl
)
480 return ds_request(task
, base
, size
, ovfl
, ds_bts
);
483 int ds_request_pebs(struct task_struct
*task
, void *base
, size_t size
,
484 ds_ovfl_callback_t ovfl
)
486 return ds_request(task
, base
, size
, ovfl
, ds_pebs
);
489 static int ds_release(struct task_struct
*task
, enum ds_qualifier qual
)
491 struct ds_context
*context
;
494 context
= ds_get_context(task
);
495 error
= ds_validate_access(context
, qual
);
499 kfree(context
->buffer
[qual
]);
500 context
->buffer
[qual
] = NULL
;
502 current
->mm
->total_vm
-= context
->pages
[qual
];
503 current
->mm
->locked_vm
-= context
->pages
[qual
];
504 context
->pages
[qual
] = 0;
505 context
->owner
[qual
] = NULL
;
508 * we put the context twice:
509 * once for the ds_get_context
510 * once for the corresponding ds_request
512 ds_put_context(context
);
514 ds_put_context(context
);
518 int ds_release_bts(struct task_struct
*task
)
520 return ds_release(task
, ds_bts
);
523 int ds_release_pebs(struct task_struct
*task
)
525 return ds_release(task
, ds_pebs
);
528 static int ds_get_index(struct task_struct
*task
, size_t *pos
,
529 enum ds_qualifier qual
)
531 struct ds_context
*context
;
532 unsigned long base
, index
;
535 context
= ds_get_context(task
);
536 error
= ds_validate_access(context
, qual
);
540 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
541 index
= ds_get(context
->ds
, qual
, ds_index
);
543 error
= ((index
- base
) / ds_cfg
.sizeof_rec
[qual
]);
547 ds_put_context(context
);
551 int ds_get_bts_index(struct task_struct
*task
, size_t *pos
)
553 return ds_get_index(task
, pos
, ds_bts
);
556 int ds_get_pebs_index(struct task_struct
*task
, size_t *pos
)
558 return ds_get_index(task
, pos
, ds_pebs
);
561 static int ds_get_end(struct task_struct
*task
, size_t *pos
,
562 enum ds_qualifier qual
)
564 struct ds_context
*context
;
565 unsigned long base
, end
;
568 context
= ds_get_context(task
);
569 error
= ds_validate_access(context
, qual
);
573 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
574 end
= ds_get(context
->ds
, qual
, ds_absolute_maximum
);
576 error
= ((end
- base
) / ds_cfg
.sizeof_rec
[qual
]);
580 ds_put_context(context
);
584 int ds_get_bts_end(struct task_struct
*task
, size_t *pos
)
586 return ds_get_end(task
, pos
, ds_bts
);
589 int ds_get_pebs_end(struct task_struct
*task
, size_t *pos
)
591 return ds_get_end(task
, pos
, ds_pebs
);
594 static int ds_access(struct task_struct
*task
, size_t index
,
595 const void **record
, enum ds_qualifier qual
)
597 struct ds_context
*context
;
598 unsigned long base
, idx
;
604 context
= ds_get_context(task
);
605 error
= ds_validate_access(context
, qual
);
609 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
610 idx
= base
+ (index
* ds_cfg
.sizeof_rec
[qual
]);
613 if (idx
> ds_get(context
->ds
, qual
, ds_absolute_maximum
))
616 *record
= (const void *)idx
;
617 error
= ds_cfg
.sizeof_rec
[qual
];
619 ds_put_context(context
);
623 int ds_access_bts(struct task_struct
*task
, size_t index
, const void **record
)
625 return ds_access(task
, index
, record
, ds_bts
);
628 int ds_access_pebs(struct task_struct
*task
, size_t index
, const void **record
)
630 return ds_access(task
, index
, record
, ds_pebs
);
633 static int ds_write(struct task_struct
*task
, const void *record
, size_t size
,
634 enum ds_qualifier qual
, int force
)
636 struct ds_context
*context
;
643 context
= ds_get_context(task
);
648 error
= ds_validate_access(context
, qual
);
655 unsigned long base
, index
, end
, write_end
, int_th
;
656 unsigned long write_size
, adj_write_size
;
659 * write as much as possible without producing an
660 * overflow interrupt.
662 * interrupt_threshold must either be
663 * - bigger than absolute_maximum or
664 * - point to a record between buffer_base and absolute_maximum
666 * index points to a valid record.
668 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
669 index
= ds_get(context
->ds
, qual
, ds_index
);
670 end
= ds_get(context
->ds
, qual
, ds_absolute_maximum
);
671 int_th
= ds_get(context
->ds
, qual
, ds_interrupt_threshold
);
673 write_end
= min(end
, int_th
);
675 /* if we are already beyond the interrupt threshold,
676 * we fill the entire buffer */
677 if (write_end
<= index
)
680 if (write_end
<= index
)
683 write_size
= min((unsigned long) size
, write_end
- index
);
684 memcpy((void *)index
, record
, write_size
);
686 record
= (const char *)record
+ write_size
;
690 adj_write_size
= write_size
/ ds_cfg
.sizeof_rec
[qual
];
691 adj_write_size
*= ds_cfg
.sizeof_rec
[qual
];
693 /* zero out trailing bytes */
694 memset((char *)index
+ write_size
, 0,
695 adj_write_size
- write_size
);
696 index
+= adj_write_size
;
700 ds_set(context
->ds
, qual
, ds_index
, index
);
703 ds_overflow(task
, context
, qual
);
707 ds_put_context(context
);
711 int ds_write_bts(struct task_struct
*task
, const void *record
, size_t size
)
713 return ds_write(task
, record
, size
, ds_bts
, /* force = */ 0);
716 int ds_write_pebs(struct task_struct
*task
, const void *record
, size_t size
)
718 return ds_write(task
, record
, size
, ds_pebs
, /* force = */ 0);
721 int ds_unchecked_write_bts(struct task_struct
*task
,
722 const void *record
, size_t size
)
724 return ds_write(task
, record
, size
, ds_bts
, /* force = */ 1);
727 int ds_unchecked_write_pebs(struct task_struct
*task
,
728 const void *record
, size_t size
)
730 return ds_write(task
, record
, size
, ds_pebs
, /* force = */ 1);
733 static int ds_reset_or_clear(struct task_struct
*task
,
734 enum ds_qualifier qual
, int clear
)
736 struct ds_context
*context
;
737 unsigned long base
, end
;
740 context
= ds_get_context(task
);
741 error
= ds_validate_access(context
, qual
);
745 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
746 end
= ds_get(context
->ds
, qual
, ds_absolute_maximum
);
749 memset((void *)base
, 0, end
- base
);
751 ds_set(context
->ds
, qual
, ds_index
, base
);
755 ds_put_context(context
);
759 int ds_reset_bts(struct task_struct
*task
)
761 return ds_reset_or_clear(task
, ds_bts
, /* clear = */ 0);
764 int ds_reset_pebs(struct task_struct
*task
)
766 return ds_reset_or_clear(task
, ds_pebs
, /* clear = */ 0);
769 int ds_clear_bts(struct task_struct
*task
)
771 return ds_reset_or_clear(task
, ds_bts
, /* clear = */ 1);
774 int ds_clear_pebs(struct task_struct
*task
)
776 return ds_reset_or_clear(task
, ds_pebs
, /* clear = */ 1);
779 int ds_get_pebs_reset(struct task_struct
*task
, u64
*value
)
781 struct ds_context
*context
;
787 context
= ds_get_context(task
);
788 error
= ds_validate_access(context
, ds_pebs
);
792 *value
= *(u64
*)(context
->ds
+ (ds_cfg
.sizeof_field
* 8));
796 ds_put_context(context
);
800 int ds_set_pebs_reset(struct task_struct
*task
, u64 value
)
802 struct ds_context
*context
;
805 context
= ds_get_context(task
);
806 error
= ds_validate_access(context
, ds_pebs
);
810 *(u64
*)(context
->ds
+ (ds_cfg
.sizeof_field
* 8)) = value
;
814 ds_put_context(context
);
818 static const struct ds_configuration ds_cfg_var
= {
819 .sizeof_ds
= sizeof(long) * 12,
820 .sizeof_field
= sizeof(long),
821 .sizeof_rec
[ds_bts
] = sizeof(long) * 3,
823 .sizeof_rec
[ds_pebs
] = sizeof(long) * 10
825 .sizeof_rec
[ds_pebs
] = sizeof(long) * 18
828 static const struct ds_configuration ds_cfg_64
= {
831 .sizeof_rec
[ds_bts
] = 8 * 3,
833 .sizeof_rec
[ds_pebs
] = 8 * 10
835 .sizeof_rec
[ds_pebs
] = 8 * 18
840 ds_configure(const struct ds_configuration
*cfg
)
845 void __cpuinit
ds_init_intel(struct cpuinfo_x86
*c
)
849 switch (c
->x86_model
) {
851 case 0xE: /* Pentium M */
852 ds_configure(&ds_cfg_var
);
854 case 0xF: /* Core2 */
855 case 0x1C: /* Atom */
856 ds_configure(&ds_cfg_64
);
859 /* sorry, don't know about them */
864 switch (c
->x86_model
) {
867 case 0x2: /* Netburst */
868 ds_configure(&ds_cfg_var
);
871 /* sorry, don't know about them */
876 /* sorry, don't know about them */
881 void ds_free(struct ds_context
*context
)
883 /* This is called when the task owning the parameter context
884 * is dying. There should not be any user of that context left
885 * to disturb us, anymore. */
886 unsigned long leftovers
= context
->count
;
888 ds_put_context(context
);