No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / arm / xscale / xscale_pmc.c
blobd26ce87d704e3a3503c09028d02adfede2f67d1c
1 /* $NetBSD: xscale_pmc.c,v 1.12 2007/10/17 19:53:45 garbled Exp $ */
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
7 * Written by Allen Briggs for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: xscale_pmc.c,v 1.12 2007/10/17 19:53:45 garbled Exp $");
41 #include <sys/param.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/pmc.h>
45 #include <sys/systm.h>
46 #include <sys/types.h>
48 #include <machine/pmc.h>
50 #include <arm/xscale/xscalereg.h>
51 #include <arm/xscale/xscalevar.h>
53 extern int profsrc;
55 struct xscale_pmc_state {
56 uint32_t pmnc; /* performance monitor ctrl */
57 uint32_t pmcr[3]; /* array of counter reset values */
58 uint64_t pmcv[3]; /* array of counter values */
59 uint64_t pmc_accv[3]; /* accumulated ctr values of children */
62 #define __PMC_CCNT_I (0)
63 #define __PMC0_I (1)
64 #define __PMC1_I (2)
65 #define __PMC_CCNT (1 << __PMC_CCNT_I)
66 #define __PMC0 (1 << __PMC0_I)
67 #define __PMC1 (1 << __PMC1_I)
68 #define __PMC_NCTRS 3
70 uint32_t pmc_kernel_mask = 0;
71 uint32_t pmc_kernel_bits = 0;
72 uint32_t pmc_kernel_enabled = 0;
73 uint32_t pmc_profiling_enabled = 0;
74 uint32_t pmc_reset_vals[3] = {0x80000000, 0x80000000, 0x80000000};
76 int pmc_usecount[3] = {0, 0, 0};
78 static inline uint32_t
79 xscale_pmnc_read(void)
81 uint32_t pmnc;
83 __asm volatile("mrc p14, 0, %0, c0, c0, 0"
84 : "=r" (pmnc));
86 return pmnc;
89 static inline void
90 xscale_pmnc_write(uint32_t val)
93 __asm volatile("mcr p14, 0, %0, c0, c0, 0"
94 : : "r" (val));
97 int
98 xscale_pmc_dispatch(void *arg)
100 struct clockframe *frame = arg;
101 struct xscale_pmc_state *pmcs;
102 struct proc *p;
103 uint32_t pmnc;
104 int s;
106 s = splhigh();
108 pmnc = xscale_pmnc_read() & ~(PMNC_C | PMNC_P);
111 * Disable interrupts -- ensure we don't reset anything.
113 xscale_pmnc_write(pmnc &
114 ~(PMNC_PMN0_IF | PMNC_PMN1_IF | PMNC_CC_IF | PMNC_E));
117 * If we have recorded a clock overflow...
119 if (pmnc & PMNC_CC_IF) {
120 if (pmc_profiling_enabled & __PMC_CCNT) {
121 proftick(frame);
122 __asm volatile("mcr p14, 0, %0, c1, c0, 0"
123 : : "r" (pmc_reset_vals[__PMC_CCNT_I]));
124 } else if ((p = curproc) != NULL &&
125 (p->p_md.pmc_enabled & __PMC_CCNT) != 0) {
127 * XXX - It's not quite clear that this is the proper
128 * way to handle this case (here or in the other
129 * counters below).
131 pmcs = p->p_md.pmc_state;
132 pmcs->pmcv[__PMC_CCNT_I] += 0x100000000ULL;
133 __asm volatile("mcr p14, 0, %0, c1, c0, 0"
134 : : "r" (pmcs->pmcr[__PMC_CCNT_I]));
139 * If we have recorded an PMC0 overflow...
141 if (pmnc & PMNC_PMN0_IF) {
142 if (pmc_profiling_enabled & __PMC0) {
143 proftick(frame);
144 __asm volatile("mcr p14, 0, %0, c2, c0, 0"
145 : : "r" (pmc_reset_vals[__PMC0_I]));
146 } else if ((p = curproc) != NULL &&
147 (p->p_md.pmc_enabled & __PMC0) != 0) {
149 * XXX - should handle wrapping the counter.
151 pmcs = p->p_md.pmc_state;
152 pmcs->pmcv[__PMC0_I] += 0x100000000ULL;
153 __asm volatile("mcr p14, 0, %0, c2, c0, 0"
154 : : "r" (pmcs->pmcr[__PMC0_I]));
159 * If we have recorded an PMC1 overflow...
161 if (pmnc & PMNC_PMN1_IF) {
162 if (pmc_profiling_enabled & __PMC1) {
163 proftick(frame);
164 __asm volatile("mcr p14, 0, %0, c3, c0, 0"
165 : : "r" (pmc_reset_vals[__PMC1_I]));
166 } else if ((p = curproc) != NULL &&
167 (p->p_md.pmc_enabled & __PMC1) != 0) {
168 pmcs = p->p_md.pmc_state;
169 pmcs->pmcv[__PMC1_I] += 0x100000000ULL;
170 __asm volatile("mcr p14, 0, %0, c3, c0, 0"
171 : : "r" (pmcs->pmcr[__PMC1_I]));
176 * If any overflows were set, this will clear them.
177 * It will also re-enable the counters.
179 xscale_pmnc_write(pmnc);
181 splx(s);
183 return 1;
186 static void
187 xscale_fork(struct proc *p1, struct proc *p2)
189 struct xscale_pmc_state *pmcs_p1, *pmcs_p2;
191 p2->p_md.pmc_enabled = p1->p_md.pmc_enabled;
192 p2->p_md.pmc_state = malloc(sizeof(struct xscale_pmc_state),
193 M_TEMP, M_NOWAIT);
194 if (p2->p_md.pmc_state == NULL) {
195 /* XXX
196 * Can't return failure at this point, so just disable
197 * PMC for new process.
199 p2->p_md.pmc_enabled = 0;
200 return;
202 pmcs_p1 = p1->p_md.pmc_state;
203 pmcs_p2 = p2->p_md.pmc_state;
204 pmcs_p2->pmnc = pmcs_p1->pmnc;
205 pmcs_p2->pmcv[0] = 0;
206 pmcs_p2->pmcv[1] = 0;
207 pmcs_p2->pmcv[2] = 0;
208 pmcs_p2->pmc_accv[0] = 0;
209 pmcs_p2->pmc_accv[1] = 0;
210 pmcs_p2->pmc_accv[2] = 0;
211 if (p2->p_md.pmc_enabled & __PMC_CCNT)
212 pmc_usecount[__PMC_CCNT_I]++;
213 if (p2->p_md.pmc_enabled & __PMC0)
214 pmc_usecount[__PMC0_I]++;
215 if (p2->p_md.pmc_enabled & __PMC1)
216 pmc_usecount[__PMC1_I]++;
219 static int
220 xscale_num_counters(void)
222 return __PMC_NCTRS;
225 static int
226 xscale_counter_type(int ctr)
228 int ret;
230 switch (ctr) {
231 case __PMC_CCNT_I:
232 ret = PMC_TYPE_I80200_CCNT;
233 break;
234 case __PMC0_I:
235 case __PMC1_I:
236 ret = PMC_TYPE_I80200_PMCx;
237 break;
238 case -1:
239 ret = PMC_CLASS_I80200;
240 break;
241 default:
242 ret = -1;
243 break;
246 return ret;
249 static void
250 xscale_save_context(struct proc *p)
252 struct xscale_pmc_state *pmcs;
253 uint32_t pmnc, val;
255 if (p && p->p_md.pmc_state) {
256 pmcs = (struct xscale_pmc_state *) p->p_md.pmc_state;
258 /* disable counters */
259 pmnc = xscale_pmnc_read() & ~PMNC_E;
260 xscale_pmnc_write(pmnc);
262 /* do not save pmnc */
264 if (p->p_md.pmc_enabled & __PMC_CCNT) {
265 /* save ccnt */
266 __asm volatile("mrc p14, 0, %0, c1, c0, 0"
267 : "=r" (val));
268 pmcs->pmcv[__PMC_CCNT_I] &= ~0xffffffffULL;
269 pmcs->pmcv[__PMC_CCNT_I] |= val;
272 if (p->p_md.pmc_enabled & __PMC0) {
273 /* save pmc0 */
274 __asm volatile("mrc p14, 0, %0, c2, c0, 0"
275 : "=r" (val));
276 pmcs->pmcv[__PMC0_I] &= ~0xffffffffULL;
277 pmcs->pmcv[__PMC0_I] |= val;
280 if (p->p_md.pmc_enabled & __PMC1) {
281 /* save pmc1 */
282 __asm volatile("mrc p14, 0, %0, c3, c0, 0"
283 : "=r" (val));
284 pmcs->pmcv[__PMC1_I] &= ~0xffffffffULL;
285 pmcs->pmcv[__PMC1_I] |= val;
288 if (pmc_kernel_bits) {
289 __asm volatile("mcr p14, 0, %0, c0, c0, 0"
290 : : "r" (pmc_kernel_bits | PMNC_E));
295 static void
296 xscale_restore_context(struct proc *p)
298 struct xscale_pmc_state *pmcs;
299 register_t r = 0;
300 uint32_t val;
302 if (p && p->p_md.pmc_state) {
303 pmcs = (struct xscale_pmc_state *) p->p_md.pmc_state;
305 if (p->p_md.pmc_enabled & __PMC1) {
306 /* restore pmc1 */
307 val = pmcs->pmcv[__PMC1_I] & 0xffffffffULL;
308 __asm volatile("mcr p14, 0, %0, c3, c0, 0" : :
309 "r" (val));
312 if (p->p_md.pmc_enabled & __PMC0) {
313 /* restore pmc0 */
314 val = pmcs->pmcv[__PMC0_I] & 0xffffffffULL;
315 __asm volatile("mcr p14, 0, %0, c2, c0, 0" : :
316 "r" (val));
319 if (p->p_md.pmc_enabled & __PMC_CCNT) {
320 /* restore ccnt */
321 val = pmcs->pmcv[__PMC_CCNT_I] & 0xffffffffULL;
322 __asm volatile("mcr p14, 0, %0, c1, c0, 0" : :
323 "r" (val));
326 if (p->p_md.pmc_enabled)
327 r = pmcs->pmnc;
330 if (r | pmc_kernel_bits) {
331 /* restore pmnc & enable counters */
332 __asm volatile("mcr p14, 0, %0, c0, c0, 0"
333 : : "r" (r | pmc_kernel_bits | PMNC_E));
337 static void
338 xscale_accumulate(struct proc *parent, struct proc *child)
340 struct xscale_pmc_state *pmcs_parent, *pmcs_child;
342 pmcs_parent = parent->p_md.pmc_state;
343 pmcs_child = child->p_md.pmc_state;
344 if (pmcs_parent && pmcs_child) {
345 pmcs_parent->pmc_accv[__PMC_CCNT_I] +=
346 pmcs_child->pmcv[__PMC_CCNT_I];
347 pmcs_parent->pmc_accv[__PMC0_I] += pmcs_child->pmcv[__PMC0_I];
348 pmcs_parent->pmc_accv[__PMC1_I] += pmcs_child->pmcv[__PMC1_I];
352 static void
353 xscale_process_exit(struct proc *p)
355 int i;
357 if (!p)
358 return;
360 for (i=0 ; i<__PMC_NCTRS ; i++) {
361 if (p->p_md.pmc_enabled & (1 << i)) {
362 pmc_usecount[i]--;
363 p->p_md.pmc_enabled &= ~(1 << i);
366 if (p->p_md.pmc_state)
367 free(p->p_md.pmc_state, M_TEMP);
368 p->p_md.pmc_state = NULL;
369 p->p_md.pmc_enabled = 0;
372 static void
373 xscale_enable_counter(struct proc *p, int ctr)
375 int current = (p == curproc);
377 if (ctr < 0 || ctr >= __PMC_NCTRS || !p)
378 return;
380 if (current)
381 pmc_save_context(p);
383 if ((p->p_md.pmc_enabled & (1 << ctr)) == 0) {
384 pmc_usecount[ctr]++;
385 p->p_md.pmc_enabled |= (1 << ctr);
388 if (current)
389 pmc_restore_context(p);
392 static void
393 xscale_disable_counter(struct proc *p, int ctr)
395 int current = (p == curproc);
397 if (ctr < 0 || ctr >= __PMC_NCTRS || !p)
398 return;
400 if (current)
401 pmc_save_context(p);
403 if (p->p_md.pmc_enabled & (1 << ctr)) {
404 pmc_usecount[ctr]--;
405 p->p_md.pmc_enabled &= ~(1 << ctr);
408 if (current)
409 pmc_restore_context(p);
412 static int
413 xscale_counter_isrunning(struct proc *p, int ctr)
416 if (ctr < 0 || ctr >= __PMC_NCTRS)
417 return -1;
419 return ((pmc_kernel_enabled | p->p_md.pmc_enabled) & (1 << ctr));
422 static int
423 xscale_counter_isconfigured(struct proc *p, int ctr)
426 return ((ctr >= 0) && (ctr < __PMC_NCTRS));
429 static int
430 xscale_configure_counter(struct proc *p, int ctr, struct pmc_counter_cfg *cfg)
432 struct xscale_pmc_state *pmcs;
433 int current = (p == curproc);
435 if (ctr < 0 || ctr >= __PMC_NCTRS || !p)
436 return EINVAL;
438 if (pmc_kernel_enabled & (1 << ctr))
439 return EBUSY;
441 if (ctr) {
442 if ((cfg->event_id > 0x16) || ((cfg->event_id & 0xe) == 0xe)
443 || (cfg->event_id == 0x13))
444 return ENODEV;
445 } else {
446 if (cfg->event_id != 0x100 && cfg->event_id != 0x101)
447 return ENODEV;
450 if (current)
451 pmc_save_context(p);
453 if (p->p_md.pmc_state == NULL) {
454 p->p_md.pmc_state = malloc(sizeof(struct xscale_pmc_state),
455 M_TEMP, M_WAITOK);
456 if (!p->p_md.pmc_state)
457 return ENOMEM;
459 pmcs = p->p_md.pmc_state;
461 switch (ctr) {
462 case __PMC_CCNT_I:
463 pmcs->pmnc &= ~PMNC_D;
464 pmcs->pmnc |= (PMNC_CC_IF | PMNC_CC_IE);
465 if (cfg->event_id == 0x101)
466 pmcs->pmnc |= PMNC_D;
467 break;
468 case __PMC0_I:
469 pmcs->pmnc &= ~PMNC_EVCNT0_MASK;
470 pmcs->pmnc |= (cfg->event_id << PMNC_EVCNT0_SHIFT)
471 | (PMNC_PMN0_IF | PMNC_PMN0_IE);
472 break;
473 case __PMC1_I:
474 pmcs->pmnc &= ~PMNC_EVCNT1_MASK;
475 pmcs->pmnc |= (cfg->event_id << PMNC_EVCNT1_SHIFT)
476 | (PMNC_PMN1_IF | PMNC_PMN1_IE);
477 break;
479 pmcs->pmcr[ctr] = (uint32_t) -((int32_t) cfg->reset_value);
480 pmcs->pmcv[ctr] = pmcs->pmcr[ctr];
482 if (current)
483 pmc_restore_context(p);
485 return 0;
488 static int
489 xscale_get_counter_value(struct proc *p, int ctr, int flags, uint64_t *pval)
491 struct xscale_pmc_state *pmcs;
492 uint32_t val;
494 if (ctr < 0 || ctr >= __PMC_NCTRS)
495 return EINVAL;
497 if (p) {
498 pmcs = p->p_md.pmc_state;
500 if (flags & PMC_VALUE_FLAGS_CHILDREN) {
501 *pval = pmcs->pmc_accv[ctr];
502 return 0;
504 if (p != curproc) {
505 *pval = pmcs->pmcv[ctr];
506 return 0;
510 switch (ctr) {
511 case __PMC_CCNT_I:
512 __asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
513 break;
514 case __PMC0_I:
515 __asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
516 break;
517 case __PMC1_I:
518 __asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
519 break;
520 default:
521 val = 0;
522 break;
525 *pval = val;
526 if (p) {
527 pmcs = p->p_md.pmc_state;
528 *pval += pmcs->pmcv[ctr];
531 return 0;
534 static int
535 xscale_start_profiling(int ctr, struct pmc_counter_cfg *cfg)
537 struct proc *p = curproc;
538 int s;
540 if (ctr < 0 || ctr >= __PMC_NCTRS)
541 return EINVAL;
543 if ((pmc_usecount[ctr] > 0) || (pmc_kernel_enabled & (1 << ctr)))
544 return EBUSY;
546 if (ctr) {
547 if ((cfg->event_id > 0x16) || ((cfg->event_id & 0xe) == 0xe)
548 || (cfg->event_id == 0x13))
549 return ENODEV;
550 } else {
551 if (cfg->event_id != 0x100 && cfg->event_id != 0x101)
552 return ENODEV;
555 pmc_save_context(p);
557 pmc_reset_vals[ctr] = (uint32_t) -((int32_t) cfg->reset_value);
559 s = splhigh();
561 switch (ctr) {
562 case __PMC_CCNT_I:
563 pmc_kernel_bits &= PMNC_D;
564 pmc_kernel_bits |= PMNC_CC_IE;
565 if (cfg->event_id)
566 pmc_kernel_bits |= PMNC_D;
567 __asm volatile("mcr p14, 0, %0, c1, c0, 0" : :
568 "r" (pmc_reset_vals[__PMC_CCNT_I]));
569 __asm volatile("mcr p14, 0, %0, c0, c0, 0" : :
570 "r" (PMNC_CC_IF));
571 break;
572 case __PMC0_I:
573 pmc_kernel_bits &= ~PMNC_EVCNT0_MASK;
574 pmc_kernel_bits |= (cfg->event_id << PMNC_EVCNT0_SHIFT)
575 | PMNC_PMN0_IE;
576 __asm volatile("mcr p14, 0, %0, c2, c0, 0" : :
577 "r" (pmc_reset_vals[__PMC0_I]));
578 __asm volatile("mcr p14, 0, %0, c0, c0, 0" : :
579 "r" (PMNC_PMN0_IF));
580 break;
581 case __PMC1_I:
582 pmc_kernel_bits &= ~PMNC_EVCNT1_MASK;
583 pmc_kernel_bits |= (cfg->event_id << PMNC_EVCNT1_SHIFT)
584 | PMNC_PMN1_IE;
585 __asm volatile("mcr p14, 0, %0, c3, c0, 0" : :
586 "r" (pmc_reset_vals[__PMC1_I]));
587 __asm volatile("mcr p14, 0, %0, c0, c0, 0" : :
588 "r" (PMNC_PMN1_IF));
589 break;
592 profsrc |= 1;
593 pmc_kernel_enabled |= (1 << ctr);
594 pmc_profiling_enabled |= (1 << ctr);
596 pmc_restore_context(p);
598 splx(s);
600 return 0;
603 static int
604 xscale_stop_profiling(int ctr)
606 struct proc *p = curproc;
607 uint32_t save;
609 if (ctr < 0 || ctr >= __PMC_NCTRS)
610 return EINVAL;
612 if (!(pmc_kernel_enabled & (1 << ctr)))
613 return 0;
615 save = pmc_kernel_bits;
616 pmc_kernel_bits = 0;
617 pmc_save_context(p);
618 pmc_kernel_bits = save;
620 switch (ctr) {
621 case __PMC_CCNT_I:
622 pmc_kernel_bits &= (PMNC_D | PMNC_CC_IE);
623 break;
624 case __PMC0_I:
625 pmc_kernel_bits &= ~(PMNC_EVCNT0_MASK | PMNC_PMN0_IE);
626 break;
627 case __PMC1_I:
628 pmc_kernel_bits &= ~(PMNC_EVCNT1_MASK | PMNC_PMN1_IE);
629 break;
632 pmc_kernel_enabled &= ~(1 << ctr);
633 pmc_profiling_enabled &= ~(1 << ctr);
635 if (pmc_profiling_enabled == 0)
636 profsrc &= ~1;
638 pmc_restore_context(p);
640 return 0;
643 static int
644 xscale_alloc_kernel_counter(int ctr, struct pmc_counter_cfg *cfg)
646 struct proc *p = curproc;
648 if (ctr < 0 || ctr >= __PMC_NCTRS)
649 return EINVAL;
651 if ((pmc_usecount[ctr] > 0) || (pmc_kernel_enabled & (1 << ctr)))
652 return EBUSY;
654 if (ctr) {
655 if ((cfg->event_id > 0x16) || ((cfg->event_id & 0xe) == 0xe)
656 || (cfg->event_id == 0x13))
657 return ENODEV;
658 } else {
659 if (cfg->event_id != 0x100 && cfg->event_id != 0x101)
660 return ENODEV;
663 pmc_save_context(p);
665 pmc_reset_vals[ctr] = (uint32_t) -((int32_t) cfg->reset_value);
667 switch (ctr) {
668 case __PMC_CCNT_I:
669 pmc_kernel_bits &= PMNC_D;
670 if (cfg->event_id)
671 pmc_kernel_bits |= PMNC_D;
672 __asm volatile("mcr p14, 0, %0, c1, c0, 0" : :
673 "r" (pmc_reset_vals[__PMC_CCNT_I]));
674 break;
675 case __PMC0_I:
676 pmc_kernel_bits &= ~PMNC_EVCNT0_MASK;
677 pmc_kernel_bits |= (cfg->event_id << PMNC_EVCNT0_SHIFT);
678 __asm volatile("mcr p14, 0, %0, c2, c0, 0" : :
679 "r" (pmc_reset_vals[__PMC0_I]));
680 break;
681 case __PMC1_I:
682 pmc_kernel_bits &= ~PMNC_EVCNT1_MASK;
683 pmc_kernel_bits |= (cfg->event_id << PMNC_EVCNT1_SHIFT);
684 __asm volatile("mcr p14, 0, %0, c3, c0, 0" : :
685 "r" (pmc_reset_vals[__PMC1_I]));
686 break;
689 pmc_kernel_enabled |= (1 << ctr);
691 pmc_restore_context(p);
693 return 0;
696 static int
697 xscale_free_kernel_counter(int ctr)
699 if (ctr < 0 || ctr >= __PMC_NCTRS)
700 return EINVAL;
702 if (!(pmc_kernel_enabled & (1 << ctr)))
703 return 0;
705 pmc_kernel_enabled &= ~(1 << ctr);
707 return 0;
710 struct arm_pmc_funcs xscale_pmc_funcs = {
711 xscale_fork,
712 xscale_num_counters,
713 xscale_counter_type,
714 xscale_save_context,
715 xscale_restore_context,
716 xscale_enable_counter,
717 xscale_disable_counter,
718 xscale_accumulate,
719 xscale_process_exit,
720 xscale_configure_counter,
721 xscale_get_counter_value,
722 xscale_counter_isconfigured,
723 xscale_counter_isrunning,
724 xscale_start_profiling,
725 xscale_stop_profiling,
726 xscale_alloc_kernel_counter,
727 xscale_free_kernel_counter
730 void
731 xscale_pmu_init(void)
733 arm_pmc = &xscale_pmc_funcs;