2 * Support Intel RAPL energy consumption counters
3 * Copyright (C) 2013 Google, Inc., Stephane Eranian
5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
6 * section 14.7.1 (September 2013)
8 * RAPL provides more controls than just reporting energy consumption
9 * however here we only expose the 3 energy consumption free running
10 * counters (pp0, pkg, dram).
12 * Each of those counters increments in a power unit defined by the
13 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
16 * Counter to rapl events mappings:
18 * pp0 counter: consumption of all physical cores (power plane 0)
19 * event: rapl_energy_cores
22 * pkg counter: consumption of the whole processor package
23 * event: rapl_energy_pkg
26 * dram counter: consumption of the dram domain (servers only)
27 * event: rapl_energy_dram
30 * gpu counter: consumption of the builtin-gpu domain (client only)
31 * event: rapl_energy_gpu
34 * psys counter: consumption of the builtin-psys domain (client only)
35 * event: rapl_energy_psys
38 * We manage those counters as free running (read-only). They may be
39 * use simultaneously by other tools, such as turbostat.
41 * The events only support system-wide mode counting. There is no
42 * sampling support because it does not make sense and is not
43 * supported by the RAPL hardware.
45 * Because we want to avoid floating-point operations in the kernel,
46 * the events are all reported in fixed point arithmetic (32.32).
47 * Tools must adjust the counts to convert them to Watts using
48 * the duration of the measurement. Tools may use a function such as
49 * ldexp(raw_count, -32);
52 #define pr_fmt(fmt) "RAPL PMU: " fmt
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/perf_event.h>
57 #include <asm/cpu_device_id.h>
58 #include <asm/intel-family.h>
59 #include "../perf_event.h"
61 MODULE_LICENSE("GPL");
64 * RAPL energy status counters
66 #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
67 #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
68 #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
69 #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
70 #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
71 #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
72 #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
73 #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
74 #define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
75 #define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
77 #define NR_RAPL_DOMAINS 0x5
78 static const char *const rapl_domain_names
[NR_RAPL_DOMAINS
] __initconst
= {
86 /* Clients have PP0, PKG */
87 #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
88 1<<RAPL_IDX_PKG_NRG_STAT|\
89 1<<RAPL_IDX_PP1_NRG_STAT)
91 /* Servers have PP0, PKG, RAM */
92 #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
93 1<<RAPL_IDX_PKG_NRG_STAT|\
94 1<<RAPL_IDX_RAM_NRG_STAT)
96 /* Servers have PP0, PKG, RAM, PP1 */
97 #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
98 1<<RAPL_IDX_PKG_NRG_STAT|\
99 1<<RAPL_IDX_RAM_NRG_STAT|\
100 1<<RAPL_IDX_PP1_NRG_STAT)
102 /* SKL clients have PP0, PKG, RAM, PP1, PSYS */
103 #define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
104 1<<RAPL_IDX_PKG_NRG_STAT|\
105 1<<RAPL_IDX_RAM_NRG_STAT|\
106 1<<RAPL_IDX_PP1_NRG_STAT|\
107 1<<RAPL_IDX_PSYS_NRG_STAT)
109 /* Knights Landing has PKG, RAM */
110 #define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
111 1<<RAPL_IDX_RAM_NRG_STAT)
114 * event code: LSB 8 bits, passed in attr->config
115 * any other bit is reserved
117 #define RAPL_EVENT_MASK 0xFFULL
119 #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
120 static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
121 struct kobj_attribute *attr, \
124 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
125 return sprintf(page, _format "\n"); \
127 static struct kobj_attribute format_attr_##_var = \
128 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
130 #define RAPL_CNTR_WIDTH 32
132 #define RAPL_EVENT_ATTR_STR(_name, v, str) \
133 static struct perf_pmu_events_attr event_attr_##v = { \
134 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
143 struct list_head active_list
;
145 ktime_t timer_interval
;
146 struct hrtimer hrtimer
;
152 struct rapl_pmu
*pmus
[];
155 /* 1/2^hw_unit Joule */
156 static int rapl_hw_unit
[NR_RAPL_DOMAINS
] __read_mostly
;
157 static struct rapl_pmus
*rapl_pmus
;
158 static cpumask_t rapl_cpu_mask
;
159 static unsigned int rapl_cntr_mask
;
160 static u64 rapl_timer_ms
;
162 static inline struct rapl_pmu
*cpu_to_rapl_pmu(unsigned int cpu
)
164 unsigned int pkgid
= topology_logical_package_id(cpu
);
167 * The unsigned check also catches the '-1' return value for non
168 * existent mappings in the topology map.
170 return pkgid
< rapl_pmus
->maxpkg
? rapl_pmus
->pmus
[pkgid
] : NULL
;
173 static inline u64
rapl_read_counter(struct perf_event
*event
)
176 rdmsrl(event
->hw
.event_base
, raw
);
180 static inline u64
rapl_scale(u64 v
, int cfg
)
182 if (cfg
> NR_RAPL_DOMAINS
) {
183 pr_warn("Invalid domain %d, failed to scale data\n", cfg
);
187 * scale delta to smallest unit (1/2^32)
188 * users must then scale back: count * 1/(1e9*2^32) to get Joules
189 * or use ldexp(count, -32).
190 * Watts = Joules/Time delta
192 return v
<< (32 - rapl_hw_unit
[cfg
- 1]);
195 static u64
rapl_event_update(struct perf_event
*event
)
197 struct hw_perf_event
*hwc
= &event
->hw
;
198 u64 prev_raw_count
, new_raw_count
;
200 int shift
= RAPL_CNTR_WIDTH
;
203 prev_raw_count
= local64_read(&hwc
->prev_count
);
204 rdmsrl(event
->hw
.event_base
, new_raw_count
);
206 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
207 new_raw_count
) != prev_raw_count
) {
213 * Now we have the new raw value and have updated the prev
214 * timestamp already. We can now calculate the elapsed delta
215 * (event-)time and add that to the generic event.
217 * Careful, not all hw sign-extends above the physical width
220 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
223 sdelta
= rapl_scale(delta
, event
->hw
.config
);
225 local64_add(sdelta
, &event
->count
);
227 return new_raw_count
;
230 static void rapl_start_hrtimer(struct rapl_pmu
*pmu
)
232 hrtimer_start(&pmu
->hrtimer
, pmu
->timer_interval
,
233 HRTIMER_MODE_REL_PINNED
);
236 static enum hrtimer_restart
rapl_hrtimer_handle(struct hrtimer
*hrtimer
)
238 struct rapl_pmu
*pmu
= container_of(hrtimer
, struct rapl_pmu
, hrtimer
);
239 struct perf_event
*event
;
243 return HRTIMER_NORESTART
;
245 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
247 list_for_each_entry(event
, &pmu
->active_list
, active_entry
)
248 rapl_event_update(event
);
250 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
252 hrtimer_forward_now(hrtimer
, pmu
->timer_interval
);
254 return HRTIMER_RESTART
;
257 static void rapl_hrtimer_init(struct rapl_pmu
*pmu
)
259 struct hrtimer
*hr
= &pmu
->hrtimer
;
261 hrtimer_init(hr
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
262 hr
->function
= rapl_hrtimer_handle
;
265 static void __rapl_pmu_event_start(struct rapl_pmu
*pmu
,
266 struct perf_event
*event
)
268 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
273 list_add_tail(&event
->active_entry
, &pmu
->active_list
);
275 local64_set(&event
->hw
.prev_count
, rapl_read_counter(event
));
278 if (pmu
->n_active
== 1)
279 rapl_start_hrtimer(pmu
);
282 static void rapl_pmu_event_start(struct perf_event
*event
, int mode
)
284 struct rapl_pmu
*pmu
= event
->pmu_private
;
287 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
288 __rapl_pmu_event_start(pmu
, event
);
289 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
292 static void rapl_pmu_event_stop(struct perf_event
*event
, int mode
)
294 struct rapl_pmu
*pmu
= event
->pmu_private
;
295 struct hw_perf_event
*hwc
= &event
->hw
;
298 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
300 /* mark event as deactivated and stopped */
301 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
302 WARN_ON_ONCE(pmu
->n_active
<= 0);
304 if (pmu
->n_active
== 0)
305 hrtimer_cancel(&pmu
->hrtimer
);
307 list_del(&event
->active_entry
);
309 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
310 hwc
->state
|= PERF_HES_STOPPED
;
313 /* check if update of sw counter is necessary */
314 if ((mode
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
316 * Drain the remaining delta count out of a event
317 * that we are disabling:
319 rapl_event_update(event
);
320 hwc
->state
|= PERF_HES_UPTODATE
;
323 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
326 static int rapl_pmu_event_add(struct perf_event
*event
, int mode
)
328 struct rapl_pmu
*pmu
= event
->pmu_private
;
329 struct hw_perf_event
*hwc
= &event
->hw
;
332 raw_spin_lock_irqsave(&pmu
->lock
, flags
);
334 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
336 if (mode
& PERF_EF_START
)
337 __rapl_pmu_event_start(pmu
, event
);
339 raw_spin_unlock_irqrestore(&pmu
->lock
, flags
);
344 static void rapl_pmu_event_del(struct perf_event
*event
, int flags
)
346 rapl_pmu_event_stop(event
, PERF_EF_UPDATE
);
349 static int rapl_pmu_event_init(struct perf_event
*event
)
351 u64 cfg
= event
->attr
.config
& RAPL_EVENT_MASK
;
352 int bit
, msr
, ret
= 0;
353 struct rapl_pmu
*pmu
;
355 /* only look at RAPL events */
356 if (event
->attr
.type
!= rapl_pmus
->pmu
.type
)
359 /* check only supported bits are set */
360 if (event
->attr
.config
& ~RAPL_EVENT_MASK
)
366 event
->event_caps
|= PERF_EV_CAP_READ_ACTIVE_PKG
;
369 * check event is known (determines counter)
373 bit
= RAPL_IDX_PP0_NRG_STAT
;
374 msr
= MSR_PP0_ENERGY_STATUS
;
377 bit
= RAPL_IDX_PKG_NRG_STAT
;
378 msr
= MSR_PKG_ENERGY_STATUS
;
381 bit
= RAPL_IDX_RAM_NRG_STAT
;
382 msr
= MSR_DRAM_ENERGY_STATUS
;
385 bit
= RAPL_IDX_PP1_NRG_STAT
;
386 msr
= MSR_PP1_ENERGY_STATUS
;
388 case INTEL_RAPL_PSYS
:
389 bit
= RAPL_IDX_PSYS_NRG_STAT
;
390 msr
= MSR_PLATFORM_ENERGY_STATUS
;
395 /* check event supported */
396 if (!(rapl_cntr_mask
& (1 << bit
)))
399 /* unsupported modes and filters */
400 if (event
->attr
.exclude_user
||
401 event
->attr
.exclude_kernel
||
402 event
->attr
.exclude_hv
||
403 event
->attr
.exclude_idle
||
404 event
->attr
.exclude_host
||
405 event
->attr
.exclude_guest
||
406 event
->attr
.sample_period
) /* no sampling */
409 /* must be done before validate_group */
410 pmu
= cpu_to_rapl_pmu(event
->cpu
);
413 event
->cpu
= pmu
->cpu
;
414 event
->pmu_private
= pmu
;
415 event
->hw
.event_base
= msr
;
416 event
->hw
.config
= cfg
;
422 static void rapl_pmu_event_read(struct perf_event
*event
)
424 rapl_event_update(event
);
427 static ssize_t
rapl_get_attr_cpumask(struct device
*dev
,
428 struct device_attribute
*attr
, char *buf
)
430 return cpumap_print_to_pagebuf(true, buf
, &rapl_cpu_mask
);
433 static DEVICE_ATTR(cpumask
, S_IRUGO
, rapl_get_attr_cpumask
, NULL
);
435 static struct attribute
*rapl_pmu_attrs
[] = {
436 &dev_attr_cpumask
.attr
,
440 static struct attribute_group rapl_pmu_attr_group
= {
441 .attrs
= rapl_pmu_attrs
,
444 RAPL_EVENT_ATTR_STR(energy
-cores
, rapl_cores
, "event=0x01");
445 RAPL_EVENT_ATTR_STR(energy
-pkg
, rapl_pkg
, "event=0x02");
446 RAPL_EVENT_ATTR_STR(energy
-ram
, rapl_ram
, "event=0x03");
447 RAPL_EVENT_ATTR_STR(energy
-gpu
, rapl_gpu
, "event=0x04");
448 RAPL_EVENT_ATTR_STR(energy
-psys
, rapl_psys
, "event=0x05");
450 RAPL_EVENT_ATTR_STR(energy
-cores
.unit
, rapl_cores_unit
, "Joules");
451 RAPL_EVENT_ATTR_STR(energy
-pkg
.unit
, rapl_pkg_unit
, "Joules");
452 RAPL_EVENT_ATTR_STR(energy
-ram
.unit
, rapl_ram_unit
, "Joules");
453 RAPL_EVENT_ATTR_STR(energy
-gpu
.unit
, rapl_gpu_unit
, "Joules");
454 RAPL_EVENT_ATTR_STR(energy
-psys
.unit
, rapl_psys_unit
, "Joules");
457 * we compute in 0.23 nJ increments regardless of MSR
459 RAPL_EVENT_ATTR_STR(energy
-cores
.scale
, rapl_cores_scale
, "2.3283064365386962890625e-10");
460 RAPL_EVENT_ATTR_STR(energy
-pkg
.scale
, rapl_pkg_scale
, "2.3283064365386962890625e-10");
461 RAPL_EVENT_ATTR_STR(energy
-ram
.scale
, rapl_ram_scale
, "2.3283064365386962890625e-10");
462 RAPL_EVENT_ATTR_STR(energy
-gpu
.scale
, rapl_gpu_scale
, "2.3283064365386962890625e-10");
463 RAPL_EVENT_ATTR_STR(energy
-psys
.scale
, rapl_psys_scale
, "2.3283064365386962890625e-10");
465 static struct attribute
*rapl_events_srv_attr
[] = {
466 EVENT_PTR(rapl_cores
),
470 EVENT_PTR(rapl_cores_unit
),
471 EVENT_PTR(rapl_pkg_unit
),
472 EVENT_PTR(rapl_ram_unit
),
474 EVENT_PTR(rapl_cores_scale
),
475 EVENT_PTR(rapl_pkg_scale
),
476 EVENT_PTR(rapl_ram_scale
),
480 static struct attribute
*rapl_events_cln_attr
[] = {
481 EVENT_PTR(rapl_cores
),
485 EVENT_PTR(rapl_cores_unit
),
486 EVENT_PTR(rapl_pkg_unit
),
487 EVENT_PTR(rapl_gpu_unit
),
489 EVENT_PTR(rapl_cores_scale
),
490 EVENT_PTR(rapl_pkg_scale
),
491 EVENT_PTR(rapl_gpu_scale
),
495 static struct attribute
*rapl_events_hsw_attr
[] = {
496 EVENT_PTR(rapl_cores
),
501 EVENT_PTR(rapl_cores_unit
),
502 EVENT_PTR(rapl_pkg_unit
),
503 EVENT_PTR(rapl_gpu_unit
),
504 EVENT_PTR(rapl_ram_unit
),
506 EVENT_PTR(rapl_cores_scale
),
507 EVENT_PTR(rapl_pkg_scale
),
508 EVENT_PTR(rapl_gpu_scale
),
509 EVENT_PTR(rapl_ram_scale
),
513 static struct attribute
*rapl_events_skl_attr
[] = {
514 EVENT_PTR(rapl_cores
),
518 EVENT_PTR(rapl_psys
),
520 EVENT_PTR(rapl_cores_unit
),
521 EVENT_PTR(rapl_pkg_unit
),
522 EVENT_PTR(rapl_gpu_unit
),
523 EVENT_PTR(rapl_ram_unit
),
524 EVENT_PTR(rapl_psys_unit
),
526 EVENT_PTR(rapl_cores_scale
),
527 EVENT_PTR(rapl_pkg_scale
),
528 EVENT_PTR(rapl_gpu_scale
),
529 EVENT_PTR(rapl_ram_scale
),
530 EVENT_PTR(rapl_psys_scale
),
534 static struct attribute
*rapl_events_knl_attr
[] = {
538 EVENT_PTR(rapl_pkg_unit
),
539 EVENT_PTR(rapl_ram_unit
),
541 EVENT_PTR(rapl_pkg_scale
),
542 EVENT_PTR(rapl_ram_scale
),
546 static struct attribute_group rapl_pmu_events_group
= {
548 .attrs
= NULL
, /* patched at runtime */
551 DEFINE_RAPL_FORMAT_ATTR(event
, event
, "config:0-7");
552 static struct attribute
*rapl_formats_attr
[] = {
553 &format_attr_event
.attr
,
557 static struct attribute_group rapl_pmu_format_group
= {
559 .attrs
= rapl_formats_attr
,
562 static const struct attribute_group
*rapl_attr_groups
[] = {
563 &rapl_pmu_attr_group
,
564 &rapl_pmu_format_group
,
565 &rapl_pmu_events_group
,
569 static int rapl_cpu_offline(unsigned int cpu
)
571 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
574 /* Check if exiting cpu is used for collecting rapl events */
575 if (!cpumask_test_and_clear_cpu(cpu
, &rapl_cpu_mask
))
579 /* Find a new cpu to collect rapl events */
580 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
582 /* Migrate rapl events to the new target */
583 if (target
< nr_cpu_ids
) {
584 cpumask_set_cpu(target
, &rapl_cpu_mask
);
586 perf_pmu_migrate_context(pmu
->pmu
, cpu
, target
);
591 static int rapl_cpu_online(unsigned int cpu
)
593 struct rapl_pmu
*pmu
= cpu_to_rapl_pmu(cpu
);
597 pmu
= kzalloc_node(sizeof(*pmu
), GFP_KERNEL
, cpu_to_node(cpu
));
601 raw_spin_lock_init(&pmu
->lock
);
602 INIT_LIST_HEAD(&pmu
->active_list
);
603 pmu
->pmu
= &rapl_pmus
->pmu
;
604 pmu
->timer_interval
= ms_to_ktime(rapl_timer_ms
);
605 rapl_hrtimer_init(pmu
);
607 rapl_pmus
->pmus
[topology_logical_package_id(cpu
)] = pmu
;
611 * Check if there is an online cpu in the package which collects rapl
614 target
= cpumask_any_and(&rapl_cpu_mask
, topology_core_cpumask(cpu
));
615 if (target
< nr_cpu_ids
)
618 cpumask_set_cpu(cpu
, &rapl_cpu_mask
);
623 static int rapl_check_hw_unit(bool apply_quirk
)
625 u64 msr_rapl_power_unit_bits
;
628 /* protect rdmsrl() to handle virtualization */
629 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT
, &msr_rapl_power_unit_bits
))
631 for (i
= 0; i
< NR_RAPL_DOMAINS
; i
++)
632 rapl_hw_unit
[i
] = (msr_rapl_power_unit_bits
>> 8) & 0x1FULL
;
635 * DRAM domain on HSW server and KNL has fixed energy unit which can be
636 * different than the unit from power unit MSR. See
637 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
638 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
641 rapl_hw_unit
[RAPL_IDX_RAM_NRG_STAT
] = 16;
644 * Calculate the timer rate:
645 * Use reference of 200W for scaling the timeout to avoid counter
646 * overflows. 200W = 200 Joules/sec
647 * Divide interval by 2 to avoid lockstep (2 * 100)
648 * if hw unit is 32, then we use 2 ms 1/200/2
651 if (rapl_hw_unit
[0] < 32) {
652 rapl_timer_ms
= (1000 / (2 * 100));
653 rapl_timer_ms
*= (1ULL << (32 - rapl_hw_unit
[0] - 1));
658 static void __init
rapl_advertise(void)
662 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
663 hweight32(rapl_cntr_mask
), rapl_timer_ms
);
665 for (i
= 0; i
< NR_RAPL_DOMAINS
; i
++) {
666 if (rapl_cntr_mask
& (1 << i
)) {
667 pr_info("hw unit of domain %s 2^-%d Joules\n",
668 rapl_domain_names
[i
], rapl_hw_unit
[i
]);
673 static void cleanup_rapl_pmus(void)
677 for (i
= 0; i
< rapl_pmus
->maxpkg
; i
++)
678 kfree(rapl_pmus
->pmus
[i
]);
682 static int __init
init_rapl_pmus(void)
684 int maxpkg
= topology_max_packages();
687 size
= sizeof(*rapl_pmus
) + maxpkg
* sizeof(struct rapl_pmu
*);
688 rapl_pmus
= kzalloc(size
, GFP_KERNEL
);
692 rapl_pmus
->maxpkg
= maxpkg
;
693 rapl_pmus
->pmu
.attr_groups
= rapl_attr_groups
;
694 rapl_pmus
->pmu
.task_ctx_nr
= perf_invalid_context
;
695 rapl_pmus
->pmu
.event_init
= rapl_pmu_event_init
;
696 rapl_pmus
->pmu
.add
= rapl_pmu_event_add
;
697 rapl_pmus
->pmu
.del
= rapl_pmu_event_del
;
698 rapl_pmus
->pmu
.start
= rapl_pmu_event_start
;
699 rapl_pmus
->pmu
.stop
= rapl_pmu_event_stop
;
700 rapl_pmus
->pmu
.read
= rapl_pmu_event_read
;
701 rapl_pmus
->pmu
.module
= THIS_MODULE
;
705 #define X86_RAPL_MODEL_MATCH(model, init) \
706 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
708 struct intel_rapl_init_fun
{
711 struct attribute
**attrs
;
714 static const struct intel_rapl_init_fun snb_rapl_init __initconst
= {
715 .apply_quirk
= false,
716 .cntr_mask
= RAPL_IDX_CLN
,
717 .attrs
= rapl_events_cln_attr
,
720 static const struct intel_rapl_init_fun hsx_rapl_init __initconst
= {
722 .cntr_mask
= RAPL_IDX_SRV
,
723 .attrs
= rapl_events_srv_attr
,
726 static const struct intel_rapl_init_fun hsw_rapl_init __initconst
= {
727 .apply_quirk
= false,
728 .cntr_mask
= RAPL_IDX_HSW
,
729 .attrs
= rapl_events_hsw_attr
,
732 static const struct intel_rapl_init_fun snbep_rapl_init __initconst
= {
733 .apply_quirk
= false,
734 .cntr_mask
= RAPL_IDX_SRV
,
735 .attrs
= rapl_events_srv_attr
,
738 static const struct intel_rapl_init_fun knl_rapl_init __initconst
= {
740 .cntr_mask
= RAPL_IDX_KNL
,
741 .attrs
= rapl_events_knl_attr
,
744 static const struct intel_rapl_init_fun skl_rapl_init __initconst
= {
745 .apply_quirk
= false,
746 .cntr_mask
= RAPL_IDX_SKL_CLN
,
747 .attrs
= rapl_events_skl_attr
,
750 static const struct x86_cpu_id rapl_cpu_match
[] __initconst
= {
751 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE
, snb_rapl_init
),
752 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X
, snbep_rapl_init
),
754 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE
, snb_rapl_init
),
755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X
, snbep_rapl_init
),
757 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE
, hsw_rapl_init
),
758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X
, hsx_rapl_init
),
759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT
, hsw_rapl_init
),
760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E
, hsw_rapl_init
),
762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE
, hsw_rapl_init
),
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E
, hsw_rapl_init
),
764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X
, hsx_rapl_init
),
765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D
, hsx_rapl_init
),
767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL
, knl_rapl_init
),
768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM
, knl_rapl_init
),
770 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE
, skl_rapl_init
),
771 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP
, skl_rapl_init
),
772 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X
, hsx_rapl_init
),
774 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE
, skl_rapl_init
),
775 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP
, skl_rapl_init
),
777 X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE
, skl_rapl_init
),
779 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT
, hsw_rapl_init
),
780 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON
, hsw_rapl_init
),
782 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE
, hsw_rapl_init
),
786 MODULE_DEVICE_TABLE(x86cpu
, rapl_cpu_match
);
788 static int __init
rapl_pmu_init(void)
790 const struct x86_cpu_id
*id
;
791 struct intel_rapl_init_fun
*rapl_init
;
795 id
= x86_match_cpu(rapl_cpu_match
);
799 rapl_init
= (struct intel_rapl_init_fun
*)id
->driver_data
;
800 apply_quirk
= rapl_init
->apply_quirk
;
801 rapl_cntr_mask
= rapl_init
->cntr_mask
;
802 rapl_pmu_events_group
.attrs
= rapl_init
->attrs
;
804 ret
= rapl_check_hw_unit(apply_quirk
);
808 ret
= init_rapl_pmus();
813 * Install callbacks. Core will call them for each online cpu.
815 ret
= cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE
,
816 "perf/x86/rapl:online",
817 rapl_cpu_online
, rapl_cpu_offline
);
821 ret
= perf_pmu_register(&rapl_pmus
->pmu
, "power", -1);
829 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE
);
831 pr_warn("Initialization failed (%d), disabled\n", ret
);
835 module_init(rapl_pmu_init
);
837 static void __exit
intel_rapl_exit(void)
839 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE
);
840 perf_pmu_unregister(&rapl_pmus
->pmu
);
843 module_exit(intel_rapl_exit
);