[PATCH] briq_panel: read() and write() get __user pointers, damnit
[linux-2.6/verdex.git] / arch / powerpc / kernel / pmc.c
bloba0a2efadeabf8fbbed432e506360d14f85c1770f
1 /*
2 * arch/powerpc/kernel/pmc.c
4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 * Includes code formerly from arch/ppc/kernel/perfmon.c:
6 * Author: Andy Fleming
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/errno.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
19 #include <asm/processor.h>
20 #include <asm/pmc.h>
22 #if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
23 static void dummy_perf(struct pt_regs *regs)
25 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
27 pmgc0 &= ~PMGC0_PMIE;
28 mtpmr(PMRN_PMGC0, pmgc0);
30 #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32 #ifndef MMCR0_PMAO
33 #define MMCR0_PMAO 0
34 #endif
36 /* Ensure exceptions are disabled */
37 static void dummy_perf(struct pt_regs *regs)
39 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
41 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
42 mtspr(SPRN_MMCR0, mmcr0);
44 #else
45 /* Ensure exceptions are disabled */
46 static void dummy_perf(struct pt_regs *regs)
48 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
50 mmcr0 &= ~(MMCR0_PMXE);
51 mtspr(SPRN_MMCR0, mmcr0);
53 #endif
55 static DEFINE_SPINLOCK(pmc_owner_lock);
56 static void *pmc_owner_caller; /* mostly for debugging */
57 perf_irq_t perf_irq = dummy_perf;
59 int reserve_pmc_hardware(perf_irq_t new_perf_irq)
61 int err = 0;
63 spin_lock(&pmc_owner_lock);
65 if (pmc_owner_caller) {
66 printk(KERN_WARNING "reserve_pmc_hardware: "
67 "PMC hardware busy (reserved by caller %p)\n",
68 pmc_owner_caller);
69 err = -EBUSY;
70 goto out;
73 pmc_owner_caller = __builtin_return_address(0);
74 perf_irq = new_perf_irq ? : dummy_perf;
76 out:
77 spin_unlock(&pmc_owner_lock);
78 return err;
80 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
82 void release_pmc_hardware(void)
84 spin_lock(&pmc_owner_lock);
86 WARN_ON(! pmc_owner_caller);
88 pmc_owner_caller = NULL;
89 perf_irq = dummy_perf;
91 spin_unlock(&pmc_owner_lock);
93 EXPORT_SYMBOL_GPL(release_pmc_hardware);
95 #ifdef CONFIG_PPC64
96 void power4_enable_pmcs(void)
98 unsigned long hid0;
100 hid0 = mfspr(SPRN_HID0);
101 hid0 |= 1UL << (63 - 20);
103 /* POWER4 requires the following sequence */
104 asm volatile(
105 "sync\n"
106 "mtspr %1, %0\n"
107 "mfspr %0, %1\n"
108 "mfspr %0, %1\n"
109 "mfspr %0, %1\n"
110 "mfspr %0, %1\n"
111 "mfspr %0, %1\n"
112 "mfspr %0, %1\n"
113 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
114 "memory");
116 #endif /* CONFIG_PPC64 */