2 * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
3 * Copyright (C) 2013 Google, Inc., Stephane Eranian
5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
6 * section 14.7.1 (September 2013)
8 * RAPL provides more controls than just reporting energy consumption
9 * however here we only expose the 3 energy consumption free running
10 * counters (pp0, pkg, dram).
12 * Each of those counters increments in a power unit defined by the
13 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
16 * Counter to rapl events mappings:
18 * pp0 counter: consumption of all physical cores (power plane 0)
19 * event: rapl_energy_cores
22 * pkg counter: consumption of the whole processor package
23 * event: rapl_energy_pkg
26 * dram counter: consumption of the dram domain (servers only)
27 * event: rapl_energy_dram
30 * dram counter: consumption of the builtin-gpu domain (client only)
31 * event: rapl_energy_gpu
34 * We manage those counters as free running (read-only). They may be
35 * use simultaneously by other tools, such as turbostat.
37 * The events only support system-wide mode counting. There is no
38 * sampling support because it does not make sense and is not
39 * supported by the RAPL hardware.
41 * Because we want to avoid floating-point operations in the kernel,
42 * the events are all reported in fixed point arithmetic (32.32).
43 * Tools must adjust the counts to convert them to Watts using
44 * the duration of the measurement. Tools may use a function such as
45 * ldexp(raw_count, -32);
48 #define pr_fmt(fmt) "RAPL PMU: " fmt
50 #include <linux/module.h>
51 #include <linux/slab.h>
52 #include <linux/perf_event.h>
53 #include <asm/cpu_device_id.h>
54 #include "../perf_event.h"
57 * RAPL energy status counters
59 #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
60 #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
61 #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
62 #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
63 #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
64 #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
65 #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
66 #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
68 #define NR_RAPL_DOMAINS 0x4
69 static const char *const rapl_domain_names
[NR_RAPL_DOMAINS
] __initconst
= {
76 /* Clients have PP0, PKG */
77 #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
78 1<<RAPL_IDX_PKG_NRG_STAT|\
79 1<<RAPL_IDX_PP1_NRG_STAT)
81 /* Servers have PP0, PKG, RAM */
82 #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
83 1<<RAPL_IDX_PKG_NRG_STAT|\
84 1<<RAPL_IDX_RAM_NRG_STAT)
86 /* Servers have PP0, PKG, RAM, PP1 */
87 #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
88 1<<RAPL_IDX_PKG_NRG_STAT|\
89 1<<RAPL_IDX_RAM_NRG_STAT|\
90 1<<RAPL_IDX_PP1_NRG_STAT)
92 /* Knights Landing has PKG, RAM */
93 #define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
94 1<<RAPL_IDX_RAM_NRG_STAT)
97 * event code: LSB 8 bits, passed in attr->config
98 * any other bit is reserved
100 #define RAPL_EVENT_MASK 0xFFULL
102 #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
103 static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
104 struct kobj_attribute *attr, \
107 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
108 return sprintf(page, _format "\n"); \
110 static struct kobj_attribute format_attr_##_var = \
111 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
113 #define RAPL_CNTR_WIDTH 32
115 #define RAPL_EVENT_ATTR_STR(_name, v, str) \
116 static struct perf_pmu_events_attr event_attr_##v = { \
117 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
126 struct list_head active_list
;
128 ktime_t timer_interval
;
129 struct hrtimer hrtimer
;
135 struct rapl_pmu
*pmus
[];
138 /* 1/2^hw_unit Joule */
139 static int rapl_hw_unit
[NR_RAPL_DOMAINS
] __read_mostly
;
140 static struct rapl_pmus
*rapl_pmus
;
141 static cpumask_t rapl_cpu_mask
;
142 static unsigned int rapl_cntr_mask
;
143 static u64 rapl_timer_ms
;
145 static inline struct rapl_pmu
*cpu_to_rapl_pmu(unsigned int cpu
)
147 return rapl_pmus
->pmus
[topology_logical_package_id(cpu
)];
150 static inline u64
rapl_read_counter(struct perf_event
*event
)
153 rdmsrl(event
->hw
.event_base
, raw
);
157 static inline u64
rapl_scale(u64 v
, int cfg
)
159 if (cfg
> NR_RAPL_DOMAINS
) {
160 pr_warn("Invalid domain %d, failed to scale data\n", cfg
);
164 * scale delta to smallest unit (1/2^32)
165 * users must then scale back: count * 1/(1e9*2^32) to get Joules
166 * or use ldexp(count, -32).
167 * Watts = Joules/Time delta
169 return v
<< (32 - rapl_hw_unit
[cfg
- 1]);
172 static u64
rapl_event_update(struct perf_event
*event
)
174 struct hw_perf_event
*hwc
= &event
->hw
;
175 u64 prev_raw_count
, new_raw_count
;
177 int shift
= RAPL_CNTR_WIDTH
;
180 prev_raw_count
= local64_read(&hwc
->prev_count
);
181 rdmsrl(event
->hw
.event_base
, new_raw_count
);
183 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
184 new_raw_count
) != prev_raw_count
) {
190 * Now we have the new raw value and have updated the prev
191 * timestamp already. We can now calculate the elapsed delta
192 * (event-)time and add that to the generic event.
194 * Careful, not all hw sign-extends above the physical width
197 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
200 sdelta
= rapl_scale(delta
, event
->hw
.config
);
202 local64_add(sdelta
, &event
->count
);
204 return new_raw_count
;
207 static void rapl_start_hrtimer(struct rapl_pmu
*pmu
)
209 hrtimer_start(&pmu
->hrtimer
, pmu
->timer_interval
,
210 HRTIMER_MODE_REL_PINNED
);
213 static enum hrtimer_restart
rapl_hrtimer_handle(struct hrtimer
*hrtimer
)
215 struct rapl_pmu
*pmu
= container_of(hrtimer
, struct rapl_pmu
, hrtimer
);
216 struct perf_event
*event
;
220 return HRTIMER_NORESTART
;
222 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
224 list_for_each_entry(event
, &pmu
->active_list
, active_entry
)
225 rapl_event_update(event
);
227 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
229 hrtimer_forward_now(hrtimer
, pmu
->timer_interval
);
231 return HRTIMER_RESTART
;
234 static void rapl_hrtimer_init(struct rapl_pmu
*pmu
)
236 struct hrtimer
*hr
= &pmu
->hrtimer
;
238 hrtimer_init(hr
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
239 hr
->function
= rapl_hrtimer_handle
;
242 static void __rapl_pmu_event_start(struct rapl_pmu
*pmu
,
243 struct perf_event
*event
)
245 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
250 list_add_tail(&event
->active_entry
, &pmu
->active_list
);
252 local64_set(&event
->hw
.prev_count
, rapl_read_counter(event
));
255 if (pmu
->n_active
== 1)
256 rapl_start_hrtimer(pmu
);
259 static void rapl_pmu_event_start(struct perf_event
*event
, int mode
)
261 struct rapl_pmu
*pmu
= event
->pmu_private
;
264 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
265 __rapl_pmu_event_start(pmu
, event
);
266 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
269 static void rapl_pmu_event_stop(struct perf_event
*event
, int mode
)
271 struct rapl_pmu
*pmu
= event
->pmu_private
;
272 struct hw_perf_event
*hwc
= &event
->hw
;
275 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
277 /* mark event as deactivated and stopped */
278 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
279 WARN_ON_ONCE(pmu
->n_active
<= 0);
281 if (pmu
->n_active
== 0)
282 hrtimer_cancel(&pmu
->hrtimer
);
284 list_del(&event
->active_entry
);
286 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
287 hwc
->state
|= PERF_HES_STOPPED
;
290 /* check if update of sw counter is necessary */
291 if ((mode
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
293 * Drain the remaining delta count out of a event
294 * that we are disabling:
296 rapl_event_update(event
);
297 hwc
->state
|= PERF_HES_UPTODATE
;
300 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
303 static int rapl_pmu_event_add(struct perf_event
*event
, int mode
)
305 struct rapl_pmu
*pmu
= event
->pmu_private
;
306 struct hw_perf_event
*hwc
= &event
->hw
;
309 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
311 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
313 if (mode
& PERF_EF_START
)
314 __rapl_pmu_event_start(pmu
, event
);
316 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
321 static void rapl_pmu_event_del(struct perf_event
*event
, int flags
)
323 rapl_pmu_event_stop(event
, PERF_EF_UPDATE
);
326 static int rapl_pmu_event_init(struct perf_event
*event
)
328 u64 cfg
= event
->attr
.config
& RAPL_EVENT_MASK
;
329 int bit
, msr
, ret
= 0;
330 struct rapl_pmu
*pmu
;
332 /* only look at RAPL events */
333 if (event
->attr
.type
!= rapl_pmus
->pmu
.type
)
336 /* check only supported bits are set */
337 if (event
->attr
.config
& ~RAPL_EVENT_MASK
)
344 * check event is known (determines counter)
348 bit
= RAPL_IDX_PP0_NRG_STAT
;
349 msr
= MSR_PP0_ENERGY_STATUS
;
352 bit
= RAPL_IDX_PKG_NRG_STAT
;
353 msr
= MSR_PKG_ENERGY_STATUS
;
356 bit
= RAPL_IDX_RAM_NRG_STAT
;
357 msr
= MSR_DRAM_ENERGY_STATUS
;
360 bit
= RAPL_IDX_PP1_NRG_STAT
;
361 msr
= MSR_PP1_ENERGY_STATUS
;
366 /* check event supported */
367 if (!(rapl_cntr_mask
& (1 << bit
)))
370 /* unsupported modes and filters */
371 if (event
->attr
.exclude_user
||
372 event
->attr
.exclude_kernel
||
373 event
->attr
.exclude_hv
||
374 event
->attr
.exclude_idle
||
375 event
->attr
.exclude_host
||
376 event
->attr
.exclude_guest
||
377 event
->attr
.sample_period
) /* no sampling */
380 /* must be done before validate_group */
381 pmu
= cpu_to_rapl_pmu(event
->cpu
);
382 event
->cpu
= pmu
->cpu
;
383 event
->pmu_private
= pmu
;
384 event
->hw
.event_base
= msr
;
385 event
->hw
.config
= cfg
;
391 static void rapl_pmu_event_read(struct perf_event
*event
)
393 rapl_event_update(event
);
396 static ssize_t
rapl_get_attr_cpumask(struct device
*dev
,
397 struct device_attribute
*attr
, char *buf
)
399 return cpumap_print_to_pagebuf(true, buf
, &rapl_cpu_mask
);
402 static DEVICE_ATTR(cpumask
, S_IRUGO
, rapl_get_attr_cpumask
, NULL
);
404 static struct attribute
*rapl_pmu_attrs
[] = {
405 &dev_attr_cpumask
.attr
,
409 static struct attribute_group rapl_pmu_attr_group
= {
410 .attrs
= rapl_pmu_attrs
,
413 RAPL_EVENT_ATTR_STR(energy
-cores
, rapl_cores
, "event=0x01");
414 RAPL_EVENT_ATTR_STR(energy
-pkg
, rapl_pkg
, "event=0x02");
415 RAPL_EVENT_ATTR_STR(energy
-ram
, rapl_ram
, "event=0x03");
416 RAPL_EVENT_ATTR_STR(energy
-gpu
, rapl_gpu
, "event=0x04");
418 RAPL_EVENT_ATTR_STR(energy
-cores
.unit
, rapl_cores_unit
, "Joules");
419 RAPL_EVENT_ATTR_STR(energy
-pkg
.unit
, rapl_pkg_unit
, "Joules");
420 RAPL_EVENT_ATTR_STR(energy
-ram
.unit
, rapl_ram_unit
, "Joules");
421 RAPL_EVENT_ATTR_STR(energy
-gpu
.unit
, rapl_gpu_unit
, "Joules");
424 * we compute in 0.23 nJ increments regardless of MSR
426 RAPL_EVENT_ATTR_STR(energy
-cores
.scale
, rapl_cores_scale
, "2.3283064365386962890625e-10");
427 RAPL_EVENT_ATTR_STR(energy
-pkg
.scale
, rapl_pkg_scale
, "2.3283064365386962890625e-10");
428 RAPL_EVENT_ATTR_STR(energy
-ram
.scale
, rapl_ram_scale
, "2.3283064365386962890625e-10");
429 RAPL_EVENT_ATTR_STR(energy
-gpu
.scale
, rapl_gpu_scale
, "2.3283064365386962890625e-10");
431 static struct attribute
*rapl_events_srv_attr
[] = {
432 EVENT_PTR(rapl_cores
),
436 EVENT_PTR(rapl_cores_unit
),
437 EVENT_PTR(rapl_pkg_unit
),
438 EVENT_PTR(rapl_ram_unit
),
440 EVENT_PTR(rapl_cores_scale
),
441 EVENT_PTR(rapl_pkg_scale
),
442 EVENT_PTR(rapl_ram_scale
),
446 static struct attribute
*rapl_events_cln_attr
[] = {
447 EVENT_PTR(rapl_cores
),
451 EVENT_PTR(rapl_cores_unit
),
452 EVENT_PTR(rapl_pkg_unit
),
453 EVENT_PTR(rapl_gpu_unit
),
455 EVENT_PTR(rapl_cores_scale
),
456 EVENT_PTR(rapl_pkg_scale
),
457 EVENT_PTR(rapl_gpu_scale
),
461 static struct attribute
*rapl_events_hsw_attr
[] = {
462 EVENT_PTR(rapl_cores
),
467 EVENT_PTR(rapl_cores_unit
),
468 EVENT_PTR(rapl_pkg_unit
),
469 EVENT_PTR(rapl_gpu_unit
),
470 EVENT_PTR(rapl_ram_unit
),
472 EVENT_PTR(rapl_cores_scale
),
473 EVENT_PTR(rapl_pkg_scale
),
474 EVENT_PTR(rapl_gpu_scale
),
475 EVENT_PTR(rapl_ram_scale
),
479 static struct attribute
*rapl_events_knl_attr
[] = {
483 EVENT_PTR(rapl_pkg_unit
),
484 EVENT_PTR(rapl_ram_unit
),
486 EVENT_PTR(rapl_pkg_scale
),
487 EVENT_PTR(rapl_ram_scale
),
491 static struct attribute_group rapl_pmu_events_group
= {
493 .attrs
= NULL
, /* patched at runtime */
496 DEFINE_RAPL_FORMAT_ATTR(event
, event
, "config:0-7");
497 static struct attribute
*rapl_formats_attr
[] = {
498 &format_attr_event
.attr
,
502 static struct attribute_group rapl_pmu_format_group
= {
504 .attrs
= rapl_formats_attr
,
507 const struct attribute_group
*rapl_attr_groups
[] = {
508 &rapl_pmu_attr_group
,
509 &rapl_pmu_format_group
,
510 &rapl_pmu_events_group
,
514 static void rapl_cpu_exit(int cpu
)
516 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
519 /* Check if exiting cpu is used for collecting rapl events */
520 if (!cpumask_test_and_clear_cpu(cpu
, &rapl_cpu_mask
))
524 /* Find a new cpu to collect rapl events */
525 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
527 /* Migrate rapl events to the new target */
528 if (target
< nr_cpu_ids
) {
529 cpumask_set_cpu(target
, &rapl_cpu_mask
);
531 perf_pmu_migrate_context(pmu
->pmu
, cpu
, target
);
535 static void rapl_cpu_init(int cpu
)
537 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
541 * Check if there is an online cpu in the package which collects rapl
544 target
= cpumask_any_and(&rapl_cpu_mask
, topology_core_cpumask(cpu
));
545 if (target
< nr_cpu_ids
)
548 cpumask_set_cpu(cpu
, &rapl_cpu_mask
);
552 static int rapl_cpu_prepare(int cpu
)
554 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
559 pmu
= kzalloc_node(sizeof(*pmu
), GFP_KERNEL
, cpu_to_node(cpu
));
563 raw_spin_lock_init(&pmu
->lock
);
564 INIT_LIST_HEAD(&pmu
->active_list
);
565 pmu
->pmu
= &rapl_pmus
->pmu
;
566 pmu
->timer_interval
= ms_to_ktime(rapl_timer_ms
);
568 rapl_hrtimer_init(pmu
);
569 rapl_pmus
->pmus
[topology_logical_package_id(cpu
)] = pmu
;
573 static int rapl_cpu_notifier(struct notifier_block
*self
,
574 unsigned long action
, void *hcpu
)
576 unsigned int cpu
= (long)hcpu
;
578 switch (action
& ~CPU_TASKS_FROZEN
) {
580 rapl_cpu_prepare(cpu
);
583 case CPU_DOWN_FAILED
:
588 case CPU_DOWN_PREPARE
:
595 static int rapl_check_hw_unit(bool apply_quirk
)
597 u64 msr_rapl_power_unit_bits
;
600 /* protect rdmsrl() to handle virtualization */
601 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT
, &msr_rapl_power_unit_bits
))
603 for (i
= 0; i
< NR_RAPL_DOMAINS
; i
++)
604 rapl_hw_unit
[i
] = (msr_rapl_power_unit_bits
>> 8) & 0x1FULL
;
607 * DRAM domain on HSW server and KNL has fixed energy unit which can be
608 * different than the unit from power unit MSR. See
609 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
610 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
613 rapl_hw_unit
[RAPL_IDX_RAM_NRG_STAT
] = 16;
616 * Calculate the timer rate:
617 * Use reference of 200W for scaling the timeout to avoid counter
618 * overflows. 200W = 200 Joules/sec
619 * Divide interval by 2 to avoid lockstep (2 * 100)
620 * if hw unit is 32, then we use 2 ms 1/200/2
623 if (rapl_hw_unit
[0] < 32) {
624 rapl_timer_ms
= (1000 / (2 * 100));
625 rapl_timer_ms
*= (1ULL << (32 - rapl_hw_unit
[0] - 1));
630 static void __init
rapl_advertise(void)
634 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
635 hweight32(rapl_cntr_mask
), rapl_timer_ms
);
637 for (i
= 0; i
< NR_RAPL_DOMAINS
; i
++) {
638 if (rapl_cntr_mask
& (1 << i
)) {
639 pr_info("hw unit of domain %s 2^-%d Joules\n",
640 rapl_domain_names
[i
], rapl_hw_unit
[i
]);
645 static int __init
rapl_prepare_cpus(void)
647 unsigned int cpu
, pkg
;
650 for_each_online_cpu(cpu
) {
651 pkg
= topology_logical_package_id(cpu
);
652 if (rapl_pmus
->pmus
[pkg
])
655 ret
= rapl_cpu_prepare(cpu
);
663 static void __init
cleanup_rapl_pmus(void)
667 for (i
= 0; i
< rapl_pmus
->maxpkg
; i
++)
668 kfree(rapl_pmus
->pmus
+ i
);
672 static int __init
init_rapl_pmus(void)
674 int maxpkg
= topology_max_packages();
677 size
= sizeof(*rapl_pmus
) + maxpkg
* sizeof(struct rapl_pmu
*);
678 rapl_pmus
= kzalloc(size
, GFP_KERNEL
);
682 rapl_pmus
->maxpkg
= maxpkg
;
683 rapl_pmus
->pmu
.attr_groups
= rapl_attr_groups
;
684 rapl_pmus
->pmu
.task_ctx_nr
= perf_invalid_context
;
685 rapl_pmus
->pmu
.event_init
= rapl_pmu_event_init
;
686 rapl_pmus
->pmu
.add
= rapl_pmu_event_add
;
687 rapl_pmus
->pmu
.del
= rapl_pmu_event_del
;
688 rapl_pmus
->pmu
.start
= rapl_pmu_event_start
;
689 rapl_pmus
->pmu
.stop
= rapl_pmu_event_stop
;
690 rapl_pmus
->pmu
.read
= rapl_pmu_event_read
;
694 static const struct x86_cpu_id rapl_cpu_match
[] __initconst
= {
695 [0] = { .vendor
= X86_VENDOR_INTEL
, .family
= 6 },
699 static int __init
rapl_pmu_init(void)
701 bool apply_quirk
= false;
704 if (!x86_match_cpu(rapl_cpu_match
))
707 switch (boot_cpu_data
.x86_model
) {
708 case 42: /* Sandy Bridge */
709 case 58: /* Ivy Bridge */
710 rapl_cntr_mask
= RAPL_IDX_CLN
;
711 rapl_pmu_events_group
.attrs
= rapl_events_cln_attr
;
713 case 63: /* Haswell-Server */
714 case 79: /* Broadwell-Server */
716 rapl_cntr_mask
= RAPL_IDX_SRV
;
717 rapl_pmu_events_group
.attrs
= rapl_events_srv_attr
;
719 case 60: /* Haswell */
720 case 69: /* Haswell-Celeron */
721 case 70: /* Haswell GT3e */
722 case 61: /* Broadwell */
723 case 71: /* Broadwell-H */
724 rapl_cntr_mask
= RAPL_IDX_HSW
;
725 rapl_pmu_events_group
.attrs
= rapl_events_hsw_attr
;
727 case 45: /* Sandy Bridge-EP */
728 case 62: /* IvyTown */
729 rapl_cntr_mask
= RAPL_IDX_SRV
;
730 rapl_pmu_events_group
.attrs
= rapl_events_srv_attr
;
732 case 87: /* Knights Landing */
734 rapl_cntr_mask
= RAPL_IDX_KNL
;
735 rapl_pmu_events_group
.attrs
= rapl_events_knl_attr
;
741 ret
= rapl_check_hw_unit(apply_quirk
);
745 ret
= init_rapl_pmus();
749 cpu_notifier_register_begin();
751 ret
= rapl_prepare_cpus();
755 ret
= perf_pmu_register(&rapl_pmus
->pmu
, "power", -1);
759 __perf_cpu_notifier(rapl_cpu_notifier
);
760 cpu_notifier_register_done();
765 pr_warn("Initialization failed (%d), disabled\n", ret
);
767 cpu_notifier_register_done();
770 device_initcall(rapl_pmu_init
);