1 /* $NetBSD: xscale_pmc.c,v 1.12 2007/10/17 19:53:45 garbled Exp $ */
4 * Copyright (c) 2002 Wasabi Systems, Inc.
7 * Written by Allen Briggs for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: xscale_pmc.c,v 1.12 2007/10/17 19:53:45 garbled Exp $");
41 #include <sys/param.h>
42 #include <sys/malloc.h>
45 #include <sys/systm.h>
46 #include <sys/types.h>
48 #include <machine/pmc.h>
50 #include <arm/xscale/xscalereg.h>
51 #include <arm/xscale/xscalevar.h>
55 struct xscale_pmc_state
{
56 uint32_t pmnc
; /* performance monitor ctrl */
57 uint32_t pmcr
[3]; /* array of counter reset values */
58 uint64_t pmcv
[3]; /* array of counter values */
59 uint64_t pmc_accv
[3]; /* accumulated ctr values of children */
62 #define __PMC_CCNT_I (0)
65 #define __PMC_CCNT (1 << __PMC_CCNT_I)
66 #define __PMC0 (1 << __PMC0_I)
67 #define __PMC1 (1 << __PMC1_I)
70 uint32_t pmc_kernel_mask
= 0;
71 uint32_t pmc_kernel_bits
= 0;
72 uint32_t pmc_kernel_enabled
= 0;
73 uint32_t pmc_profiling_enabled
= 0;
74 uint32_t pmc_reset_vals
[3] = {0x80000000, 0x80000000, 0x80000000};
76 int pmc_usecount
[3] = {0, 0, 0};
78 static inline uint32_t
79 xscale_pmnc_read(void)
83 __asm
volatile("mrc p14, 0, %0, c0, c0, 0"
90 xscale_pmnc_write(uint32_t val
)
93 __asm
volatile("mcr p14, 0, %0, c0, c0, 0"
98 xscale_pmc_dispatch(void *arg
)
100 struct clockframe
*frame
= arg
;
101 struct xscale_pmc_state
*pmcs
;
108 pmnc
= xscale_pmnc_read() & ~(PMNC_C
| PMNC_P
);
111 * Disable interrupts -- ensure we don't reset anything.
113 xscale_pmnc_write(pmnc
&
114 ~(PMNC_PMN0_IF
| PMNC_PMN1_IF
| PMNC_CC_IF
| PMNC_E
));
117 * If we have recorded a clock overflow...
119 if (pmnc
& PMNC_CC_IF
) {
120 if (pmc_profiling_enabled
& __PMC_CCNT
) {
122 __asm
volatile("mcr p14, 0, %0, c1, c0, 0"
123 : : "r" (pmc_reset_vals
[__PMC_CCNT_I
]));
124 } else if ((p
= curproc
) != NULL
&&
125 (p
->p_md
.pmc_enabled
& __PMC_CCNT
) != 0) {
127 * XXX - It's not quite clear that this is the proper
128 * way to handle this case (here or in the other
131 pmcs
= p
->p_md
.pmc_state
;
132 pmcs
->pmcv
[__PMC_CCNT_I
] += 0x100000000ULL
;
133 __asm
volatile("mcr p14, 0, %0, c1, c0, 0"
134 : : "r" (pmcs
->pmcr
[__PMC_CCNT_I
]));
139 * If we have recorded an PMC0 overflow...
141 if (pmnc
& PMNC_PMN0_IF
) {
142 if (pmc_profiling_enabled
& __PMC0
) {
144 __asm
volatile("mcr p14, 0, %0, c2, c0, 0"
145 : : "r" (pmc_reset_vals
[__PMC0_I
]));
146 } else if ((p
= curproc
) != NULL
&&
147 (p
->p_md
.pmc_enabled
& __PMC0
) != 0) {
149 * XXX - should handle wrapping the counter.
151 pmcs
= p
->p_md
.pmc_state
;
152 pmcs
->pmcv
[__PMC0_I
] += 0x100000000ULL
;
153 __asm
volatile("mcr p14, 0, %0, c2, c0, 0"
154 : : "r" (pmcs
->pmcr
[__PMC0_I
]));
159 * If we have recorded an PMC1 overflow...
161 if (pmnc
& PMNC_PMN1_IF
) {
162 if (pmc_profiling_enabled
& __PMC1
) {
164 __asm
volatile("mcr p14, 0, %0, c3, c0, 0"
165 : : "r" (pmc_reset_vals
[__PMC1_I
]));
166 } else if ((p
= curproc
) != NULL
&&
167 (p
->p_md
.pmc_enabled
& __PMC1
) != 0) {
168 pmcs
= p
->p_md
.pmc_state
;
169 pmcs
->pmcv
[__PMC1_I
] += 0x100000000ULL
;
170 __asm
volatile("mcr p14, 0, %0, c3, c0, 0"
171 : : "r" (pmcs
->pmcr
[__PMC1_I
]));
176 * If any overflows were set, this will clear them.
177 * It will also re-enable the counters.
179 xscale_pmnc_write(pmnc
);
187 xscale_fork(struct proc
*p1
, struct proc
*p2
)
189 struct xscale_pmc_state
*pmcs_p1
, *pmcs_p2
;
191 p2
->p_md
.pmc_enabled
= p1
->p_md
.pmc_enabled
;
192 p2
->p_md
.pmc_state
= malloc(sizeof(struct xscale_pmc_state
),
194 if (p2
->p_md
.pmc_state
== NULL
) {
196 * Can't return failure at this point, so just disable
197 * PMC for new process.
199 p2
->p_md
.pmc_enabled
= 0;
202 pmcs_p1
= p1
->p_md
.pmc_state
;
203 pmcs_p2
= p2
->p_md
.pmc_state
;
204 pmcs_p2
->pmnc
= pmcs_p1
->pmnc
;
205 pmcs_p2
->pmcv
[0] = 0;
206 pmcs_p2
->pmcv
[1] = 0;
207 pmcs_p2
->pmcv
[2] = 0;
208 pmcs_p2
->pmc_accv
[0] = 0;
209 pmcs_p2
->pmc_accv
[1] = 0;
210 pmcs_p2
->pmc_accv
[2] = 0;
211 if (p2
->p_md
.pmc_enabled
& __PMC_CCNT
)
212 pmc_usecount
[__PMC_CCNT_I
]++;
213 if (p2
->p_md
.pmc_enabled
& __PMC0
)
214 pmc_usecount
[__PMC0_I
]++;
215 if (p2
->p_md
.pmc_enabled
& __PMC1
)
216 pmc_usecount
[__PMC1_I
]++;
220 xscale_num_counters(void)
226 xscale_counter_type(int ctr
)
232 ret
= PMC_TYPE_I80200_CCNT
;
236 ret
= PMC_TYPE_I80200_PMCx
;
239 ret
= PMC_CLASS_I80200
;
250 xscale_save_context(struct proc
*p
)
252 struct xscale_pmc_state
*pmcs
;
255 if (p
&& p
->p_md
.pmc_state
) {
256 pmcs
= (struct xscale_pmc_state
*) p
->p_md
.pmc_state
;
258 /* disable counters */
259 pmnc
= xscale_pmnc_read() & ~PMNC_E
;
260 xscale_pmnc_write(pmnc
);
262 /* do not save pmnc */
264 if (p
->p_md
.pmc_enabled
& __PMC_CCNT
) {
266 __asm
volatile("mrc p14, 0, %0, c1, c0, 0"
268 pmcs
->pmcv
[__PMC_CCNT_I
] &= ~0xffffffffULL
;
269 pmcs
->pmcv
[__PMC_CCNT_I
] |= val
;
272 if (p
->p_md
.pmc_enabled
& __PMC0
) {
274 __asm
volatile("mrc p14, 0, %0, c2, c0, 0"
276 pmcs
->pmcv
[__PMC0_I
] &= ~0xffffffffULL
;
277 pmcs
->pmcv
[__PMC0_I
] |= val
;
280 if (p
->p_md
.pmc_enabled
& __PMC1
) {
282 __asm
volatile("mrc p14, 0, %0, c3, c0, 0"
284 pmcs
->pmcv
[__PMC1_I
] &= ~0xffffffffULL
;
285 pmcs
->pmcv
[__PMC1_I
] |= val
;
288 if (pmc_kernel_bits
) {
289 __asm
volatile("mcr p14, 0, %0, c0, c0, 0"
290 : : "r" (pmc_kernel_bits
| PMNC_E
));
296 xscale_restore_context(struct proc
*p
)
298 struct xscale_pmc_state
*pmcs
;
302 if (p
&& p
->p_md
.pmc_state
) {
303 pmcs
= (struct xscale_pmc_state
*) p
->p_md
.pmc_state
;
305 if (p
->p_md
.pmc_enabled
& __PMC1
) {
307 val
= pmcs
->pmcv
[__PMC1_I
] & 0xffffffffULL
;
308 __asm
volatile("mcr p14, 0, %0, c3, c0, 0" : :
312 if (p
->p_md
.pmc_enabled
& __PMC0
) {
314 val
= pmcs
->pmcv
[__PMC0_I
] & 0xffffffffULL
;
315 __asm
volatile("mcr p14, 0, %0, c2, c0, 0" : :
319 if (p
->p_md
.pmc_enabled
& __PMC_CCNT
) {
321 val
= pmcs
->pmcv
[__PMC_CCNT_I
] & 0xffffffffULL
;
322 __asm
volatile("mcr p14, 0, %0, c1, c0, 0" : :
326 if (p
->p_md
.pmc_enabled
)
330 if (r
| pmc_kernel_bits
) {
331 /* restore pmnc & enable counters */
332 __asm
volatile("mcr p14, 0, %0, c0, c0, 0"
333 : : "r" (r
| pmc_kernel_bits
| PMNC_E
));
338 xscale_accumulate(struct proc
*parent
, struct proc
*child
)
340 struct xscale_pmc_state
*pmcs_parent
, *pmcs_child
;
342 pmcs_parent
= parent
->p_md
.pmc_state
;
343 pmcs_child
= child
->p_md
.pmc_state
;
344 if (pmcs_parent
&& pmcs_child
) {
345 pmcs_parent
->pmc_accv
[__PMC_CCNT_I
] +=
346 pmcs_child
->pmcv
[__PMC_CCNT_I
];
347 pmcs_parent
->pmc_accv
[__PMC0_I
] += pmcs_child
->pmcv
[__PMC0_I
];
348 pmcs_parent
->pmc_accv
[__PMC1_I
] += pmcs_child
->pmcv
[__PMC1_I
];
353 xscale_process_exit(struct proc
*p
)
360 for (i
=0 ; i
<__PMC_NCTRS
; i
++) {
361 if (p
->p_md
.pmc_enabled
& (1 << i
)) {
363 p
->p_md
.pmc_enabled
&= ~(1 << i
);
366 if (p
->p_md
.pmc_state
)
367 free(p
->p_md
.pmc_state
, M_TEMP
);
368 p
->p_md
.pmc_state
= NULL
;
369 p
->p_md
.pmc_enabled
= 0;
373 xscale_enable_counter(struct proc
*p
, int ctr
)
375 int current
= (p
== curproc
);
377 if (ctr
< 0 || ctr
>= __PMC_NCTRS
|| !p
)
383 if ((p
->p_md
.pmc_enabled
& (1 << ctr
)) == 0) {
385 p
->p_md
.pmc_enabled
|= (1 << ctr
);
389 pmc_restore_context(p
);
393 xscale_disable_counter(struct proc
*p
, int ctr
)
395 int current
= (p
== curproc
);
397 if (ctr
< 0 || ctr
>= __PMC_NCTRS
|| !p
)
403 if (p
->p_md
.pmc_enabled
& (1 << ctr
)) {
405 p
->p_md
.pmc_enabled
&= ~(1 << ctr
);
409 pmc_restore_context(p
);
413 xscale_counter_isrunning(struct proc
*p
, int ctr
)
416 if (ctr
< 0 || ctr
>= __PMC_NCTRS
)
419 return ((pmc_kernel_enabled
| p
->p_md
.pmc_enabled
) & (1 << ctr
));
423 xscale_counter_isconfigured(struct proc
*p
, int ctr
)
426 return ((ctr
>= 0) && (ctr
< __PMC_NCTRS
));
430 xscale_configure_counter(struct proc
*p
, int ctr
, struct pmc_counter_cfg
*cfg
)
432 struct xscale_pmc_state
*pmcs
;
433 int current
= (p
== curproc
);
435 if (ctr
< 0 || ctr
>= __PMC_NCTRS
|| !p
)
438 if (pmc_kernel_enabled
& (1 << ctr
))
442 if ((cfg
->event_id
> 0x16) || ((cfg
->event_id
& 0xe) == 0xe)
443 || (cfg
->event_id
== 0x13))
446 if (cfg
->event_id
!= 0x100 && cfg
->event_id
!= 0x101)
453 if (p
->p_md
.pmc_state
== NULL
) {
454 p
->p_md
.pmc_state
= malloc(sizeof(struct xscale_pmc_state
),
456 if (!p
->p_md
.pmc_state
)
459 pmcs
= p
->p_md
.pmc_state
;
463 pmcs
->pmnc
&= ~PMNC_D
;
464 pmcs
->pmnc
|= (PMNC_CC_IF
| PMNC_CC_IE
);
465 if (cfg
->event_id
== 0x101)
466 pmcs
->pmnc
|= PMNC_D
;
469 pmcs
->pmnc
&= ~PMNC_EVCNT0_MASK
;
470 pmcs
->pmnc
|= (cfg
->event_id
<< PMNC_EVCNT0_SHIFT
)
471 | (PMNC_PMN0_IF
| PMNC_PMN0_IE
);
474 pmcs
->pmnc
&= ~PMNC_EVCNT1_MASK
;
475 pmcs
->pmnc
|= (cfg
->event_id
<< PMNC_EVCNT1_SHIFT
)
476 | (PMNC_PMN1_IF
| PMNC_PMN1_IE
);
479 pmcs
->pmcr
[ctr
] = (uint32_t) -((int32_t) cfg
->reset_value
);
480 pmcs
->pmcv
[ctr
] = pmcs
->pmcr
[ctr
];
483 pmc_restore_context(p
);
489 xscale_get_counter_value(struct proc
*p
, int ctr
, int flags
, uint64_t *pval
)
491 struct xscale_pmc_state
*pmcs
;
494 if (ctr
< 0 || ctr
>= __PMC_NCTRS
)
498 pmcs
= p
->p_md
.pmc_state
;
500 if (flags
& PMC_VALUE_FLAGS_CHILDREN
) {
501 *pval
= pmcs
->pmc_accv
[ctr
];
505 *pval
= pmcs
->pmcv
[ctr
];
512 __asm
volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val
));
515 __asm
volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val
));
518 __asm
volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val
));
527 pmcs
= p
->p_md
.pmc_state
;
528 *pval
+= pmcs
->pmcv
[ctr
];
535 xscale_start_profiling(int ctr
, struct pmc_counter_cfg
*cfg
)
537 struct proc
*p
= curproc
;
540 if (ctr
< 0 || ctr
>= __PMC_NCTRS
)
543 if ((pmc_usecount
[ctr
] > 0) || (pmc_kernel_enabled
& (1 << ctr
)))
547 if ((cfg
->event_id
> 0x16) || ((cfg
->event_id
& 0xe) == 0xe)
548 || (cfg
->event_id
== 0x13))
551 if (cfg
->event_id
!= 0x100 && cfg
->event_id
!= 0x101)
557 pmc_reset_vals
[ctr
] = (uint32_t) -((int32_t) cfg
->reset_value
);
563 pmc_kernel_bits
&= PMNC_D
;
564 pmc_kernel_bits
|= PMNC_CC_IE
;
566 pmc_kernel_bits
|= PMNC_D
;
567 __asm
volatile("mcr p14, 0, %0, c1, c0, 0" : :
568 "r" (pmc_reset_vals
[__PMC_CCNT_I
]));
569 __asm
volatile("mcr p14, 0, %0, c0, c0, 0" : :
573 pmc_kernel_bits
&= ~PMNC_EVCNT0_MASK
;
574 pmc_kernel_bits
|= (cfg
->event_id
<< PMNC_EVCNT0_SHIFT
)
576 __asm
volatile("mcr p14, 0, %0, c2, c0, 0" : :
577 "r" (pmc_reset_vals
[__PMC0_I
]));
578 __asm
volatile("mcr p14, 0, %0, c0, c0, 0" : :
582 pmc_kernel_bits
&= ~PMNC_EVCNT1_MASK
;
583 pmc_kernel_bits
|= (cfg
->event_id
<< PMNC_EVCNT1_SHIFT
)
585 __asm
volatile("mcr p14, 0, %0, c3, c0, 0" : :
586 "r" (pmc_reset_vals
[__PMC1_I
]));
587 __asm
volatile("mcr p14, 0, %0, c0, c0, 0" : :
593 pmc_kernel_enabled
|= (1 << ctr
);
594 pmc_profiling_enabled
|= (1 << ctr
);
596 pmc_restore_context(p
);
604 xscale_stop_profiling(int ctr
)
606 struct proc
*p
= curproc
;
609 if (ctr
< 0 || ctr
>= __PMC_NCTRS
)
612 if (!(pmc_kernel_enabled
& (1 << ctr
)))
615 save
= pmc_kernel_bits
;
618 pmc_kernel_bits
= save
;
622 pmc_kernel_bits
&= (PMNC_D
| PMNC_CC_IE
);
625 pmc_kernel_bits
&= ~(PMNC_EVCNT0_MASK
| PMNC_PMN0_IE
);
628 pmc_kernel_bits
&= ~(PMNC_EVCNT1_MASK
| PMNC_PMN1_IE
);
632 pmc_kernel_enabled
&= ~(1 << ctr
);
633 pmc_profiling_enabled
&= ~(1 << ctr
);
635 if (pmc_profiling_enabled
== 0)
638 pmc_restore_context(p
);
644 xscale_alloc_kernel_counter(int ctr
, struct pmc_counter_cfg
*cfg
)
646 struct proc
*p
= curproc
;
648 if (ctr
< 0 || ctr
>= __PMC_NCTRS
)
651 if ((pmc_usecount
[ctr
] > 0) || (pmc_kernel_enabled
& (1 << ctr
)))
655 if ((cfg
->event_id
> 0x16) || ((cfg
->event_id
& 0xe) == 0xe)
656 || (cfg
->event_id
== 0x13))
659 if (cfg
->event_id
!= 0x100 && cfg
->event_id
!= 0x101)
665 pmc_reset_vals
[ctr
] = (uint32_t) -((int32_t) cfg
->reset_value
);
669 pmc_kernel_bits
&= PMNC_D
;
671 pmc_kernel_bits
|= PMNC_D
;
672 __asm
volatile("mcr p14, 0, %0, c1, c0, 0" : :
673 "r" (pmc_reset_vals
[__PMC_CCNT_I
]));
676 pmc_kernel_bits
&= ~PMNC_EVCNT0_MASK
;
677 pmc_kernel_bits
|= (cfg
->event_id
<< PMNC_EVCNT0_SHIFT
);
678 __asm
volatile("mcr p14, 0, %0, c2, c0, 0" : :
679 "r" (pmc_reset_vals
[__PMC0_I
]));
682 pmc_kernel_bits
&= ~PMNC_EVCNT1_MASK
;
683 pmc_kernel_bits
|= (cfg
->event_id
<< PMNC_EVCNT1_SHIFT
);
684 __asm
volatile("mcr p14, 0, %0, c3, c0, 0" : :
685 "r" (pmc_reset_vals
[__PMC1_I
]));
689 pmc_kernel_enabled
|= (1 << ctr
);
691 pmc_restore_context(p
);
697 xscale_free_kernel_counter(int ctr
)
699 if (ctr
< 0 || ctr
>= __PMC_NCTRS
)
702 if (!(pmc_kernel_enabled
& (1 << ctr
)))
705 pmc_kernel_enabled
&= ~(1 << ctr
);
710 struct arm_pmc_funcs xscale_pmc_funcs
= {
715 xscale_restore_context
,
716 xscale_enable_counter
,
717 xscale_disable_counter
,
720 xscale_configure_counter
,
721 xscale_get_counter_value
,
722 xscale_counter_isconfigured
,
723 xscale_counter_isrunning
,
724 xscale_start_profiling
,
725 xscale_stop_profiling
,
726 xscale_alloc_kernel_counter
,
727 xscale_free_kernel_counter
731 xscale_pmu_init(void)
733 arm_pmc
= &xscale_pmc_funcs
;