1 #include <linux/module.h>
3 #include <asm/cpu_device_id.h>
4 #include <asm/intel-family.h>
7 static struct intel_uncore_type
*empty_uncore
[] = { NULL
, };
8 struct intel_uncore_type
**uncore_msr_uncores
= empty_uncore
;
9 struct intel_uncore_type
**uncore_pci_uncores
= empty_uncore
;
11 static bool pcidrv_registered
;
12 struct pci_driver
*uncore_pci_driver
;
13 /* pci bus to socket mapping */
14 DEFINE_RAW_SPINLOCK(pci2phy_map_lock
);
15 struct list_head pci2phy_map_head
= LIST_HEAD_INIT(pci2phy_map_head
);
16 struct pci_extra_dev
*uncore_extra_pci_dev
;
17 static int max_packages
;
19 /* mask of cpus that collect uncore events */
20 static cpumask_t uncore_cpu_mask
;
22 /* constraint for the fixed counter */
23 static struct event_constraint uncore_constraint_fixed
=
24 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED
, ~0ULL);
25 struct event_constraint uncore_constraint_empty
=
26 EVENT_CONSTRAINT(0, 0, 0);
28 MODULE_LICENSE("GPL");
30 static int uncore_pcibus_to_physid(struct pci_bus
*bus
)
32 struct pci2phy_map
*map
;
35 raw_spin_lock(&pci2phy_map_lock
);
36 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
37 if (map
->segment
== pci_domain_nr(bus
)) {
38 phys_id
= map
->pbus_to_physid
[bus
->number
];
42 raw_spin_unlock(&pci2phy_map_lock
);
47 static void uncore_free_pcibus_map(void)
49 struct pci2phy_map
*map
, *tmp
;
51 list_for_each_entry_safe(map
, tmp
, &pci2phy_map_head
, list
) {
57 struct pci2phy_map
*__find_pci2phy_map(int segment
)
59 struct pci2phy_map
*map
, *alloc
= NULL
;
62 lockdep_assert_held(&pci2phy_map_lock
);
65 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
66 if (map
->segment
== segment
)
71 raw_spin_unlock(&pci2phy_map_lock
);
72 alloc
= kmalloc(sizeof(struct pci2phy_map
), GFP_KERNEL
);
73 raw_spin_lock(&pci2phy_map_lock
);
83 map
->segment
= segment
;
84 for (i
= 0; i
< 256; i
++)
85 map
->pbus_to_physid
[i
] = -1;
86 list_add_tail(&map
->list
, &pci2phy_map_head
);
93 ssize_t
uncore_event_show(struct kobject
*kobj
,
94 struct kobj_attribute
*attr
, char *buf
)
96 struct uncore_event_desc
*event
=
97 container_of(attr
, struct uncore_event_desc
, attr
);
98 return sprintf(buf
, "%s", event
->config
);
101 struct intel_uncore_box
*uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
)
103 unsigned int pkgid
= topology_logical_package_id(cpu
);
106 * The unsigned check also catches the '-1' return value for non
107 * existent mappings in the topology map.
109 return pkgid
< max_packages
? pmu
->boxes
[pkgid
] : NULL
;
112 u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
116 rdmsrl(event
->hw
.event_base
, count
);
122 * generic get constraint function for shared match/mask registers.
124 struct event_constraint
*
125 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
127 struct intel_uncore_extra_reg
*er
;
128 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
129 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
134 * reg->alloc can be set due to existing state, so for fake box we
135 * need to ignore this, otherwise we might fail to allocate proper
136 * fake state for this extra reg constraint.
138 if (reg1
->idx
== EXTRA_REG_NONE
||
139 (!uncore_box_is_fake(box
) && reg1
->alloc
))
142 er
= &box
->shared_regs
[reg1
->idx
];
143 raw_spin_lock_irqsave(&er
->lock
, flags
);
144 if (!atomic_read(&er
->ref
) ||
145 (er
->config1
== reg1
->config
&& er
->config2
== reg2
->config
)) {
146 atomic_inc(&er
->ref
);
147 er
->config1
= reg1
->config
;
148 er
->config2
= reg2
->config
;
151 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
154 if (!uncore_box_is_fake(box
))
159 return &uncore_constraint_empty
;
162 void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
164 struct intel_uncore_extra_reg
*er
;
165 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
168 * Only put constraint if extra reg was actually allocated. Also
169 * takes care of event which do not use an extra shared reg.
171 * Also, if this is a fake box we shouldn't touch any event state
172 * (reg->alloc) and we don't care about leaving inconsistent box
173 * state either since it will be thrown out.
175 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
178 er
= &box
->shared_regs
[reg1
->idx
];
179 atomic_dec(&er
->ref
);
183 u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
185 struct intel_uncore_extra_reg
*er
;
189 er
= &box
->shared_regs
[idx
];
191 raw_spin_lock_irqsave(&er
->lock
, flags
);
193 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
198 static void uncore_assign_hw_event(struct intel_uncore_box
*box
,
199 struct perf_event
*event
, int idx
)
201 struct hw_perf_event
*hwc
= &event
->hw
;
204 hwc
->last_tag
= ++box
->tags
[idx
];
206 if (uncore_pmc_fixed(hwc
->idx
)) {
207 hwc
->event_base
= uncore_fixed_ctr(box
);
208 hwc
->config_base
= uncore_fixed_ctl(box
);
212 hwc
->config_base
= uncore_event_ctl(box
, hwc
->idx
);
213 hwc
->event_base
= uncore_perf_ctr(box
, hwc
->idx
);
216 void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
)
218 u64 prev_count
, new_count
, delta
;
221 if (uncore_pmc_freerunning(event
->hw
.idx
))
222 shift
= 64 - uncore_freerunning_bits(box
, event
);
223 else if (uncore_pmc_fixed(event
->hw
.idx
))
224 shift
= 64 - uncore_fixed_ctr_bits(box
);
226 shift
= 64 - uncore_perf_ctr_bits(box
);
228 /* the hrtimer might modify the previous event value */
230 prev_count
= local64_read(&event
->hw
.prev_count
);
231 new_count
= uncore_read_counter(box
, event
);
232 if (local64_xchg(&event
->hw
.prev_count
, new_count
) != prev_count
)
235 delta
= (new_count
<< shift
) - (prev_count
<< shift
);
238 local64_add(delta
, &event
->count
);
242 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
243 * for SandyBridge. So we use hrtimer to periodically poll the counter
246 static enum hrtimer_restart
uncore_pmu_hrtimer(struct hrtimer
*hrtimer
)
248 struct intel_uncore_box
*box
;
249 struct perf_event
*event
;
253 box
= container_of(hrtimer
, struct intel_uncore_box
, hrtimer
);
254 if (!box
->n_active
|| box
->cpu
!= smp_processor_id())
255 return HRTIMER_NORESTART
;
257 * disable local interrupt to prevent uncore_pmu_event_start/stop
258 * to interrupt the update process
260 local_irq_save(flags
);
263 * handle boxes with an active event list as opposed to active
266 list_for_each_entry(event
, &box
->active_list
, active_entry
) {
267 uncore_perf_event_update(box
, event
);
270 for_each_set_bit(bit
, box
->active_mask
, UNCORE_PMC_IDX_MAX
)
271 uncore_perf_event_update(box
, box
->events
[bit
]);
273 local_irq_restore(flags
);
275 hrtimer_forward_now(hrtimer
, ns_to_ktime(box
->hrtimer_duration
));
276 return HRTIMER_RESTART
;
279 void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
)
281 hrtimer_start(&box
->hrtimer
, ns_to_ktime(box
->hrtimer_duration
),
282 HRTIMER_MODE_REL_PINNED
);
285 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
)
287 hrtimer_cancel(&box
->hrtimer
);
290 static void uncore_pmu_init_hrtimer(struct intel_uncore_box
*box
)
292 hrtimer_init(&box
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
293 box
->hrtimer
.function
= uncore_pmu_hrtimer
;
296 static struct intel_uncore_box
*uncore_alloc_box(struct intel_uncore_type
*type
,
299 int i
, size
, numshared
= type
->num_shared_regs
;
300 struct intel_uncore_box
*box
;
302 size
= sizeof(*box
) + numshared
* sizeof(struct intel_uncore_extra_reg
);
304 box
= kzalloc_node(size
, GFP_KERNEL
, node
);
308 for (i
= 0; i
< numshared
; i
++)
309 raw_spin_lock_init(&box
->shared_regs
[i
].lock
);
311 uncore_pmu_init_hrtimer(box
);
313 box
->pci_phys_id
= -1;
316 /* set default hrtimer timeout */
317 box
->hrtimer_duration
= UNCORE_PMU_HRTIMER_INTERVAL
;
319 INIT_LIST_HEAD(&box
->active_list
);
325 * Using uncore_pmu_event_init pmu event_init callback
326 * as a detection point for uncore events.
328 static int uncore_pmu_event_init(struct perf_event
*event
);
330 static bool is_box_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
332 return &box
->pmu
->pmu
== event
->pmu
;
336 uncore_collect_events(struct intel_uncore_box
*box
, struct perf_event
*leader
,
339 struct perf_event
*event
;
342 max_count
= box
->pmu
->type
->num_counters
;
343 if (box
->pmu
->type
->fixed_ctl
)
346 if (box
->n_events
>= max_count
)
351 if (is_box_event(box
, leader
)) {
352 box
->event_list
[n
] = leader
;
359 for_each_sibling_event(event
, leader
) {
360 if (!is_box_event(box
, event
) ||
361 event
->state
<= PERF_EVENT_STATE_OFF
)
367 box
->event_list
[n
] = event
;
373 static struct event_constraint
*
374 uncore_get_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
376 struct intel_uncore_type
*type
= box
->pmu
->type
;
377 struct event_constraint
*c
;
379 if (type
->ops
->get_constraint
) {
380 c
= type
->ops
->get_constraint(box
, event
);
385 if (event
->attr
.config
== UNCORE_FIXED_EVENT
)
386 return &uncore_constraint_fixed
;
388 if (type
->constraints
) {
389 for_each_event_constraint(c
, type
->constraints
) {
390 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
395 return &type
->unconstrainted
;
398 static void uncore_put_event_constraint(struct intel_uncore_box
*box
,
399 struct perf_event
*event
)
401 if (box
->pmu
->type
->ops
->put_constraint
)
402 box
->pmu
->type
->ops
->put_constraint(box
, event
);
405 static int uncore_assign_events(struct intel_uncore_box
*box
, int assign
[], int n
)
407 unsigned long used_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
408 struct event_constraint
*c
;
409 int i
, wmin
, wmax
, ret
= 0;
410 struct hw_perf_event
*hwc
;
412 bitmap_zero(used_mask
, UNCORE_PMC_IDX_MAX
);
414 for (i
= 0, wmin
= UNCORE_PMC_IDX_MAX
, wmax
= 0; i
< n
; i
++) {
415 c
= uncore_get_event_constraint(box
, box
->event_list
[i
]);
416 box
->event_constraint
[i
] = c
;
417 wmin
= min(wmin
, c
->weight
);
418 wmax
= max(wmax
, c
->weight
);
421 /* fastpath, try to reuse previous register */
422 for (i
= 0; i
< n
; i
++) {
423 hwc
= &box
->event_list
[i
]->hw
;
424 c
= box
->event_constraint
[i
];
430 /* constraint still honored */
431 if (!test_bit(hwc
->idx
, c
->idxmsk
))
434 /* not already used */
435 if (test_bit(hwc
->idx
, used_mask
))
438 __set_bit(hwc
->idx
, used_mask
);
440 assign
[i
] = hwc
->idx
;
444 ret
= perf_assign_events(box
->event_constraint
, n
,
445 wmin
, wmax
, n
, assign
);
447 if (!assign
|| ret
) {
448 for (i
= 0; i
< n
; i
++)
449 uncore_put_event_constraint(box
, box
->event_list
[i
]);
451 return ret
? -EINVAL
: 0;
454 void uncore_pmu_event_start(struct perf_event
*event
, int flags
)
456 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
457 int idx
= event
->hw
.idx
;
459 if (WARN_ON_ONCE(idx
== -1 || idx
>= UNCORE_PMC_IDX_MAX
))
463 * Free running counter is read-only and always active.
464 * Use the current counter value as start point.
465 * There is no overflow interrupt for free running counter.
466 * Use hrtimer to periodically poll the counter to avoid overflow.
468 if (uncore_pmc_freerunning(event
->hw
.idx
)) {
469 list_add_tail(&event
->active_entry
, &box
->active_list
);
470 local64_set(&event
->hw
.prev_count
,
471 uncore_read_counter(box
, event
));
472 if (box
->n_active
++ == 0)
473 uncore_pmu_start_hrtimer(box
);
477 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
481 box
->events
[idx
] = event
;
483 __set_bit(idx
, box
->active_mask
);
485 local64_set(&event
->hw
.prev_count
, uncore_read_counter(box
, event
));
486 uncore_enable_event(box
, event
);
488 if (box
->n_active
== 1) {
489 uncore_enable_box(box
);
490 uncore_pmu_start_hrtimer(box
);
494 void uncore_pmu_event_stop(struct perf_event
*event
, int flags
)
496 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
497 struct hw_perf_event
*hwc
= &event
->hw
;
499 /* Cannot disable free running counter which is read-only */
500 if (uncore_pmc_freerunning(hwc
->idx
)) {
501 list_del(&event
->active_entry
);
502 if (--box
->n_active
== 0)
503 uncore_pmu_cancel_hrtimer(box
);
504 uncore_perf_event_update(box
, event
);
508 if (__test_and_clear_bit(hwc
->idx
, box
->active_mask
)) {
509 uncore_disable_event(box
, event
);
511 box
->events
[hwc
->idx
] = NULL
;
512 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
513 hwc
->state
|= PERF_HES_STOPPED
;
515 if (box
->n_active
== 0) {
516 uncore_disable_box(box
);
517 uncore_pmu_cancel_hrtimer(box
);
521 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
523 * Drain the remaining delta count out of a event
524 * that we are disabling:
526 uncore_perf_event_update(box
, event
);
527 hwc
->state
|= PERF_HES_UPTODATE
;
531 int uncore_pmu_event_add(struct perf_event
*event
, int flags
)
533 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
534 struct hw_perf_event
*hwc
= &event
->hw
;
535 int assign
[UNCORE_PMC_IDX_MAX
];
542 * The free funning counter is assigned in event_init().
543 * The free running counter event and free running counter
544 * are 1:1 mapped. It doesn't need to be tracked in event_list.
546 if (uncore_pmc_freerunning(hwc
->idx
)) {
547 if (flags
& PERF_EF_START
)
548 uncore_pmu_event_start(event
, 0);
552 ret
= n
= uncore_collect_events(box
, event
, false);
556 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
557 if (!(flags
& PERF_EF_START
))
558 hwc
->state
|= PERF_HES_ARCH
;
560 ret
= uncore_assign_events(box
, assign
, n
);
564 /* save events moving to new counters */
565 for (i
= 0; i
< box
->n_events
; i
++) {
566 event
= box
->event_list
[i
];
569 if (hwc
->idx
== assign
[i
] &&
570 hwc
->last_tag
== box
->tags
[assign
[i
]])
573 * Ensure we don't accidentally enable a stopped
574 * counter simply because we rescheduled.
576 if (hwc
->state
& PERF_HES_STOPPED
)
577 hwc
->state
|= PERF_HES_ARCH
;
579 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
582 /* reprogram moved events into new counters */
583 for (i
= 0; i
< n
; i
++) {
584 event
= box
->event_list
[i
];
587 if (hwc
->idx
!= assign
[i
] ||
588 hwc
->last_tag
!= box
->tags
[assign
[i
]])
589 uncore_assign_hw_event(box
, event
, assign
[i
]);
590 else if (i
< box
->n_events
)
593 if (hwc
->state
& PERF_HES_ARCH
)
596 uncore_pmu_event_start(event
, 0);
603 void uncore_pmu_event_del(struct perf_event
*event
, int flags
)
605 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
608 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
611 * The event for free running counter is not tracked by event_list.
612 * It doesn't need to force event->hw.idx = -1 to reassign the counter.
613 * Because the event and the free running counter are 1:1 mapped.
615 if (uncore_pmc_freerunning(event
->hw
.idx
))
618 for (i
= 0; i
< box
->n_events
; i
++) {
619 if (event
== box
->event_list
[i
]) {
620 uncore_put_event_constraint(box
, event
);
622 for (++i
; i
< box
->n_events
; i
++)
623 box
->event_list
[i
- 1] = box
->event_list
[i
];
631 event
->hw
.last_tag
= ~0ULL;
634 void uncore_pmu_event_read(struct perf_event
*event
)
636 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
637 uncore_perf_event_update(box
, event
);
641 * validation ensures the group can be loaded onto the
642 * PMU if it was the only group available.
644 static int uncore_validate_group(struct intel_uncore_pmu
*pmu
,
645 struct perf_event
*event
)
647 struct perf_event
*leader
= event
->group_leader
;
648 struct intel_uncore_box
*fake_box
;
649 int ret
= -EINVAL
, n
;
651 /* The free running counter is always active. */
652 if (uncore_pmc_freerunning(event
->hw
.idx
))
655 fake_box
= uncore_alloc_box(pmu
->type
, NUMA_NO_NODE
);
661 * the event is not yet connected with its
662 * siblings therefore we must first collect
663 * existing siblings, then add the new event
664 * before we can simulate the scheduling
666 n
= uncore_collect_events(fake_box
, leader
, true);
670 fake_box
->n_events
= n
;
671 n
= uncore_collect_events(fake_box
, event
, false);
675 fake_box
->n_events
= n
;
677 ret
= uncore_assign_events(fake_box
, NULL
, n
);
683 static int uncore_pmu_event_init(struct perf_event
*event
)
685 struct intel_uncore_pmu
*pmu
;
686 struct intel_uncore_box
*box
;
687 struct hw_perf_event
*hwc
= &event
->hw
;
690 if (event
->attr
.type
!= event
->pmu
->type
)
693 pmu
= uncore_event_to_pmu(event
);
694 /* no device found for this pmu */
695 if (pmu
->func_id
< 0)
699 * Uncore PMU does measure at all privilege level all the time.
700 * So it doesn't make sense to specify any exclude bits.
702 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
703 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
)
706 /* Sampling not supported yet */
707 if (hwc
->sample_period
)
711 * Place all uncore events for a particular physical package
716 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
717 if (!box
|| box
->cpu
< 0)
719 event
->cpu
= box
->cpu
;
720 event
->pmu_private
= box
;
722 event
->event_caps
|= PERF_EV_CAP_READ_ACTIVE_PKG
;
725 event
->hw
.last_tag
= ~0ULL;
726 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
727 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
729 if (event
->attr
.config
== UNCORE_FIXED_EVENT
) {
730 /* no fixed counter */
731 if (!pmu
->type
->fixed_ctl
)
734 * if there is only one fixed counter, only the first pmu
735 * can access the fixed counter
737 if (pmu
->type
->single_fixed
&& pmu
->pmu_idx
> 0)
740 /* fixed counters have event field hardcoded to zero */
742 } else if (is_freerunning_event(event
)) {
743 hwc
->config
= event
->attr
.config
;
744 if (!check_valid_freerunning_event(box
, event
))
746 event
->hw
.idx
= UNCORE_PMC_IDX_FREERUNNING
;
748 * The free running counter event and free running counter
749 * are always 1:1 mapped.
750 * The free running counter is always active.
751 * Assign the free running counter here.
753 event
->hw
.event_base
= uncore_freerunning_counter(box
, event
);
755 hwc
->config
= event
->attr
.config
&
756 (pmu
->type
->event_mask
| ((u64
)pmu
->type
->event_mask_ext
<< 32));
757 if (pmu
->type
->ops
->hw_config
) {
758 ret
= pmu
->type
->ops
->hw_config(box
, event
);
764 if (event
->group_leader
!= event
)
765 ret
= uncore_validate_group(pmu
, event
);
772 static ssize_t
uncore_get_attr_cpumask(struct device
*dev
,
773 struct device_attribute
*attr
, char *buf
)
775 return cpumap_print_to_pagebuf(true, buf
, &uncore_cpu_mask
);
778 static DEVICE_ATTR(cpumask
, S_IRUGO
, uncore_get_attr_cpumask
, NULL
);
780 static struct attribute
*uncore_pmu_attrs
[] = {
781 &dev_attr_cpumask
.attr
,
785 static const struct attribute_group uncore_pmu_attr_group
= {
786 .attrs
= uncore_pmu_attrs
,
789 static int uncore_pmu_register(struct intel_uncore_pmu
*pmu
)
793 if (!pmu
->type
->pmu
) {
794 pmu
->pmu
= (struct pmu
) {
795 .attr_groups
= pmu
->type
->attr_groups
,
796 .task_ctx_nr
= perf_invalid_context
,
797 .event_init
= uncore_pmu_event_init
,
798 .add
= uncore_pmu_event_add
,
799 .del
= uncore_pmu_event_del
,
800 .start
= uncore_pmu_event_start
,
801 .stop
= uncore_pmu_event_stop
,
802 .read
= uncore_pmu_event_read
,
803 .module
= THIS_MODULE
,
806 pmu
->pmu
= *pmu
->type
->pmu
;
807 pmu
->pmu
.attr_groups
= pmu
->type
->attr_groups
;
810 if (pmu
->type
->num_boxes
== 1) {
811 if (strlen(pmu
->type
->name
) > 0)
812 sprintf(pmu
->name
, "uncore_%s", pmu
->type
->name
);
814 sprintf(pmu
->name
, "uncore");
816 sprintf(pmu
->name
, "uncore_%s_%d", pmu
->type
->name
,
820 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
822 pmu
->registered
= true;
826 static void uncore_pmu_unregister(struct intel_uncore_pmu
*pmu
)
828 if (!pmu
->registered
)
830 perf_pmu_unregister(&pmu
->pmu
);
831 pmu
->registered
= false;
834 static void uncore_free_boxes(struct intel_uncore_pmu
*pmu
)
838 for (pkg
= 0; pkg
< max_packages
; pkg
++)
839 kfree(pmu
->boxes
[pkg
]);
843 static void uncore_type_exit(struct intel_uncore_type
*type
)
845 struct intel_uncore_pmu
*pmu
= type
->pmus
;
849 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
850 uncore_pmu_unregister(pmu
);
851 uncore_free_boxes(pmu
);
856 kfree(type
->events_group
);
857 type
->events_group
= NULL
;
860 static void uncore_types_exit(struct intel_uncore_type
**types
)
862 for (; *types
; types
++)
863 uncore_type_exit(*types
);
866 static int __init
uncore_type_init(struct intel_uncore_type
*type
, bool setid
)
868 struct intel_uncore_pmu
*pmus
;
872 pmus
= kcalloc(type
->num_boxes
, sizeof(*pmus
), GFP_KERNEL
);
876 size
= max_packages
* sizeof(struct intel_uncore_box
*);
878 for (i
= 0; i
< type
->num_boxes
; i
++) {
879 pmus
[i
].func_id
= setid
? i
: -1;
882 pmus
[i
].boxes
= kzalloc(size
, GFP_KERNEL
);
888 type
->unconstrainted
= (struct event_constraint
)
889 __EVENT_CONSTRAINT(0, (1ULL << type
->num_counters
) - 1,
890 0, type
->num_counters
, 0, 0);
892 if (type
->event_descs
) {
894 struct attribute_group group
;
895 struct attribute
*attrs
[];
897 for (i
= 0; type
->event_descs
[i
].attr
.attr
.name
; i
++);
899 attr_group
= kzalloc(struct_size(attr_group
, attrs
, i
+ 1),
904 attr_group
->group
.name
= "events";
905 attr_group
->group
.attrs
= attr_group
->attrs
;
907 for (j
= 0; j
< i
; j
++)
908 attr_group
->attrs
[j
] = &type
->event_descs
[j
].attr
.attr
;
910 type
->events_group
= &attr_group
->group
;
913 type
->pmu_group
= &uncore_pmu_attr_group
;
918 for (i
= 0; i
< type
->num_boxes
; i
++)
919 kfree(pmus
[i
].boxes
);
926 uncore_types_init(struct intel_uncore_type
**types
, bool setid
)
930 for (; *types
; types
++) {
931 ret
= uncore_type_init(*types
, setid
);
939 * add a pci uncore device
941 static int uncore_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
943 struct intel_uncore_type
*type
;
944 struct intel_uncore_pmu
*pmu
= NULL
;
945 struct intel_uncore_box
*box
;
946 int phys_id
, pkg
, ret
;
948 phys_id
= uncore_pcibus_to_physid(pdev
->bus
);
952 pkg
= topology_phys_to_logical_pkg(phys_id
);
956 if (UNCORE_PCI_DEV_TYPE(id
->driver_data
) == UNCORE_EXTRA_PCI_DEV
) {
957 int idx
= UNCORE_PCI_DEV_IDX(id
->driver_data
);
959 uncore_extra_pci_dev
[pkg
].dev
[idx
] = pdev
;
960 pci_set_drvdata(pdev
, NULL
);
964 type
= uncore_pci_uncores
[UNCORE_PCI_DEV_TYPE(id
->driver_data
)];
967 * Some platforms, e.g. Knights Landing, use a common PCI device ID
968 * for multiple instances of an uncore PMU device type. We should check
969 * PCI slot and func to indicate the uncore box.
971 if (id
->driver_data
& ~0xffff) {
972 struct pci_driver
*pci_drv
= pdev
->driver
;
973 const struct pci_device_id
*ids
= pci_drv
->id_table
;
976 while (ids
&& ids
->vendor
) {
977 if ((ids
->vendor
== pdev
->vendor
) &&
978 (ids
->device
== pdev
->device
)) {
979 devfn
= PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids
->driver_data
),
980 UNCORE_PCI_DEV_FUNC(ids
->driver_data
));
981 if (devfn
== pdev
->devfn
) {
982 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(ids
->driver_data
)];
992 * for performance monitoring unit with multiple boxes,
993 * each box has a different function id.
995 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(id
->driver_data
)];
998 if (WARN_ON_ONCE(pmu
->boxes
[pkg
] != NULL
))
1001 box
= uncore_alloc_box(type
, NUMA_NO_NODE
);
1005 if (pmu
->func_id
< 0)
1006 pmu
->func_id
= pdev
->devfn
;
1008 WARN_ON_ONCE(pmu
->func_id
!= pdev
->devfn
);
1010 atomic_inc(&box
->refcnt
);
1011 box
->pci_phys_id
= phys_id
;
1013 box
->pci_dev
= pdev
;
1015 uncore_box_init(box
);
1016 pci_set_drvdata(pdev
, box
);
1018 pmu
->boxes
[pkg
] = box
;
1019 if (atomic_inc_return(&pmu
->activeboxes
) > 1)
1022 /* First active box registers the pmu */
1023 ret
= uncore_pmu_register(pmu
);
1025 pci_set_drvdata(pdev
, NULL
);
1026 pmu
->boxes
[pkg
] = NULL
;
1027 uncore_box_exit(box
);
1033 static void uncore_pci_remove(struct pci_dev
*pdev
)
1035 struct intel_uncore_box
*box
;
1036 struct intel_uncore_pmu
*pmu
;
1037 int i
, phys_id
, pkg
;
1039 phys_id
= uncore_pcibus_to_physid(pdev
->bus
);
1041 box
= pci_get_drvdata(pdev
);
1043 pkg
= topology_phys_to_logical_pkg(phys_id
);
1044 for (i
= 0; i
< UNCORE_EXTRA_PCI_DEV_MAX
; i
++) {
1045 if (uncore_extra_pci_dev
[pkg
].dev
[i
] == pdev
) {
1046 uncore_extra_pci_dev
[pkg
].dev
[i
] = NULL
;
1050 WARN_ON_ONCE(i
>= UNCORE_EXTRA_PCI_DEV_MAX
);
1055 if (WARN_ON_ONCE(phys_id
!= box
->pci_phys_id
))
1058 pci_set_drvdata(pdev
, NULL
);
1059 pmu
->boxes
[box
->pkgid
] = NULL
;
1060 if (atomic_dec_return(&pmu
->activeboxes
) == 0)
1061 uncore_pmu_unregister(pmu
);
1062 uncore_box_exit(box
);
1066 static int __init
uncore_pci_init(void)
1071 size
= max_packages
* sizeof(struct pci_extra_dev
);
1072 uncore_extra_pci_dev
= kzalloc(size
, GFP_KERNEL
);
1073 if (!uncore_extra_pci_dev
) {
1078 ret
= uncore_types_init(uncore_pci_uncores
, false);
1082 uncore_pci_driver
->probe
= uncore_pci_probe
;
1083 uncore_pci_driver
->remove
= uncore_pci_remove
;
1085 ret
= pci_register_driver(uncore_pci_driver
);
1089 pcidrv_registered
= true;
1093 uncore_types_exit(uncore_pci_uncores
);
1094 kfree(uncore_extra_pci_dev
);
1095 uncore_extra_pci_dev
= NULL
;
1096 uncore_free_pcibus_map();
1098 uncore_pci_uncores
= empty_uncore
;
1102 static void uncore_pci_exit(void)
1104 if (pcidrv_registered
) {
1105 pcidrv_registered
= false;
1106 pci_unregister_driver(uncore_pci_driver
);
1107 uncore_types_exit(uncore_pci_uncores
);
1108 kfree(uncore_extra_pci_dev
);
1109 uncore_free_pcibus_map();
1113 static void uncore_change_type_ctx(struct intel_uncore_type
*type
, int old_cpu
,
1116 struct intel_uncore_pmu
*pmu
= type
->pmus
;
1117 struct intel_uncore_box
*box
;
1120 pkg
= topology_logical_package_id(old_cpu
< 0 ? new_cpu
: old_cpu
);
1121 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1122 box
= pmu
->boxes
[pkg
];
1127 WARN_ON_ONCE(box
->cpu
!= -1);
1132 WARN_ON_ONCE(box
->cpu
!= old_cpu
);
1137 uncore_pmu_cancel_hrtimer(box
);
1138 perf_pmu_migrate_context(&pmu
->pmu
, old_cpu
, new_cpu
);
1143 static void uncore_change_context(struct intel_uncore_type
**uncores
,
1144 int old_cpu
, int new_cpu
)
1146 for (; *uncores
; uncores
++)
1147 uncore_change_type_ctx(*uncores
, old_cpu
, new_cpu
);
1150 static int uncore_event_cpu_offline(unsigned int cpu
)
1152 struct intel_uncore_type
*type
, **types
= uncore_msr_uncores
;
1153 struct intel_uncore_pmu
*pmu
;
1154 struct intel_uncore_box
*box
;
1157 /* Check if exiting cpu is used for collecting uncore events */
1158 if (!cpumask_test_and_clear_cpu(cpu
, &uncore_cpu_mask
))
1160 /* Find a new cpu to collect uncore events */
1161 target
= cpumask_any_but(topology_core_cpumask(cpu
), cpu
);
1163 /* Migrate uncore events to the new target */
1164 if (target
< nr_cpu_ids
)
1165 cpumask_set_cpu(target
, &uncore_cpu_mask
);
1169 uncore_change_context(uncore_msr_uncores
, cpu
, target
);
1170 uncore_change_context(uncore_pci_uncores
, cpu
, target
);
1173 /* Clear the references */
1174 pkg
= topology_logical_package_id(cpu
);
1175 for (; *types
; types
++) {
1178 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1179 box
= pmu
->boxes
[pkg
];
1180 if (box
&& atomic_dec_return(&box
->refcnt
) == 0)
1181 uncore_box_exit(box
);
1187 static int allocate_boxes(struct intel_uncore_type
**types
,
1188 unsigned int pkg
, unsigned int cpu
)
1190 struct intel_uncore_box
*box
, *tmp
;
1191 struct intel_uncore_type
*type
;
1192 struct intel_uncore_pmu
*pmu
;
1193 LIST_HEAD(allocated
);
1196 /* Try to allocate all required boxes */
1197 for (; *types
; types
++) {
1200 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1201 if (pmu
->boxes
[pkg
])
1203 box
= uncore_alloc_box(type
, cpu_to_node(cpu
));
1208 list_add(&box
->active_list
, &allocated
);
1211 /* Install them in the pmus */
1212 list_for_each_entry_safe(box
, tmp
, &allocated
, active_list
) {
1213 list_del_init(&box
->active_list
);
1214 box
->pmu
->boxes
[pkg
] = box
;
1219 list_for_each_entry_safe(box
, tmp
, &allocated
, active_list
) {
1220 list_del_init(&box
->active_list
);
1226 static int uncore_event_cpu_online(unsigned int cpu
)
1228 struct intel_uncore_type
*type
, **types
= uncore_msr_uncores
;
1229 struct intel_uncore_pmu
*pmu
;
1230 struct intel_uncore_box
*box
;
1231 int i
, ret
, pkg
, target
;
1233 pkg
= topology_logical_package_id(cpu
);
1234 ret
= allocate_boxes(types
, pkg
, cpu
);
1238 for (; *types
; types
++) {
1241 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1242 box
= pmu
->boxes
[pkg
];
1243 if (box
&& atomic_inc_return(&box
->refcnt
) == 1)
1244 uncore_box_init(box
);
1249 * Check if there is an online cpu in the package
1250 * which collects uncore events already.
1252 target
= cpumask_any_and(&uncore_cpu_mask
, topology_core_cpumask(cpu
));
1253 if (target
< nr_cpu_ids
)
1256 cpumask_set_cpu(cpu
, &uncore_cpu_mask
);
1258 uncore_change_context(uncore_msr_uncores
, -1, cpu
);
1259 uncore_change_context(uncore_pci_uncores
, -1, cpu
);
1263 static int __init
type_pmu_register(struct intel_uncore_type
*type
)
1267 for (i
= 0; i
< type
->num_boxes
; i
++) {
1268 ret
= uncore_pmu_register(&type
->pmus
[i
]);
1275 static int __init
uncore_msr_pmus_register(void)
1277 struct intel_uncore_type
**types
= uncore_msr_uncores
;
1280 for (; *types
; types
++) {
1281 ret
= type_pmu_register(*types
);
1288 static int __init
uncore_cpu_init(void)
1292 ret
= uncore_types_init(uncore_msr_uncores
, true);
1296 ret
= uncore_msr_pmus_register();
1301 uncore_types_exit(uncore_msr_uncores
);
1302 uncore_msr_uncores
= empty_uncore
;
1306 #define X86_UNCORE_MODEL_MATCH(model, init) \
1307 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1309 struct intel_uncore_init_fun
{
1310 void (*cpu_init
)(void);
1311 int (*pci_init
)(void);
1314 static const struct intel_uncore_init_fun nhm_uncore_init __initconst
= {
1315 .cpu_init
= nhm_uncore_cpu_init
,
1318 static const struct intel_uncore_init_fun snb_uncore_init __initconst
= {
1319 .cpu_init
= snb_uncore_cpu_init
,
1320 .pci_init
= snb_uncore_pci_init
,
1323 static const struct intel_uncore_init_fun ivb_uncore_init __initconst
= {
1324 .cpu_init
= snb_uncore_cpu_init
,
1325 .pci_init
= ivb_uncore_pci_init
,
1328 static const struct intel_uncore_init_fun hsw_uncore_init __initconst
= {
1329 .cpu_init
= snb_uncore_cpu_init
,
1330 .pci_init
= hsw_uncore_pci_init
,
1333 static const struct intel_uncore_init_fun bdw_uncore_init __initconst
= {
1334 .cpu_init
= snb_uncore_cpu_init
,
1335 .pci_init
= bdw_uncore_pci_init
,
1338 static const struct intel_uncore_init_fun snbep_uncore_init __initconst
= {
1339 .cpu_init
= snbep_uncore_cpu_init
,
1340 .pci_init
= snbep_uncore_pci_init
,
1343 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst
= {
1344 .cpu_init
= nhmex_uncore_cpu_init
,
1347 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst
= {
1348 .cpu_init
= ivbep_uncore_cpu_init
,
1349 .pci_init
= ivbep_uncore_pci_init
,
1352 static const struct intel_uncore_init_fun hswep_uncore_init __initconst
= {
1353 .cpu_init
= hswep_uncore_cpu_init
,
1354 .pci_init
= hswep_uncore_pci_init
,
1357 static const struct intel_uncore_init_fun bdx_uncore_init __initconst
= {
1358 .cpu_init
= bdx_uncore_cpu_init
,
1359 .pci_init
= bdx_uncore_pci_init
,
1362 static const struct intel_uncore_init_fun knl_uncore_init __initconst
= {
1363 .cpu_init
= knl_uncore_cpu_init
,
1364 .pci_init
= knl_uncore_pci_init
,
1367 static const struct intel_uncore_init_fun skl_uncore_init __initconst
= {
1368 .cpu_init
= skl_uncore_cpu_init
,
1369 .pci_init
= skl_uncore_pci_init
,
1372 static const struct intel_uncore_init_fun skx_uncore_init __initconst
= {
1373 .cpu_init
= skx_uncore_cpu_init
,
1374 .pci_init
= skx_uncore_pci_init
,
1377 static const struct x86_cpu_id intel_uncore_match
[] __initconst
= {
1378 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP
, nhm_uncore_init
),
1379 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM
, nhm_uncore_init
),
1380 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE
, nhm_uncore_init
),
1381 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP
, nhm_uncore_init
),
1382 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE
, snb_uncore_init
),
1383 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE
, ivb_uncore_init
),
1384 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE
, hsw_uncore_init
),
1385 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT
, hsw_uncore_init
),
1386 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E
, hsw_uncore_init
),
1387 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE
, bdw_uncore_init
),
1388 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E
, bdw_uncore_init
),
1389 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X
, snbep_uncore_init
),
1390 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX
, nhmex_uncore_init
),
1391 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX
, nhmex_uncore_init
),
1392 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X
, ivbep_uncore_init
),
1393 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X
, hswep_uncore_init
),
1394 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X
, bdx_uncore_init
),
1395 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D
, bdx_uncore_init
),
1396 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL
, knl_uncore_init
),
1397 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM
, knl_uncore_init
),
1398 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP
,skl_uncore_init
),
1399 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE
, skl_uncore_init
),
1400 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X
, skx_uncore_init
),
1401 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE
, skl_uncore_init
),
1402 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP
, skl_uncore_init
),
1406 MODULE_DEVICE_TABLE(x86cpu
, intel_uncore_match
);
1408 static int __init
intel_uncore_init(void)
1410 const struct x86_cpu_id
*id
;
1411 struct intel_uncore_init_fun
*uncore_init
;
1412 int pret
= 0, cret
= 0, ret
;
1414 id
= x86_match_cpu(intel_uncore_match
);
1418 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
1421 max_packages
= topology_max_packages();
1423 uncore_init
= (struct intel_uncore_init_fun
*)id
->driver_data
;
1424 if (uncore_init
->pci_init
) {
1425 pret
= uncore_init
->pci_init();
1427 pret
= uncore_pci_init();
1430 if (uncore_init
->cpu_init
) {
1431 uncore_init
->cpu_init();
1432 cret
= uncore_cpu_init();
1438 /* Install hotplug callbacks to setup the targets for each package */
1439 ret
= cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE
,
1440 "perf/x86/intel/uncore:online",
1441 uncore_event_cpu_online
,
1442 uncore_event_cpu_offline
);
1448 uncore_types_exit(uncore_msr_uncores
);
1452 module_init(intel_uncore_init
);
1454 static void __exit
intel_uncore_exit(void)
1456 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE
);
1457 uncore_types_exit(uncore_msr_uncores
);
1460 module_exit(intel_uncore_exit
);