1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
4 #include <asm/cpu_device_id.h>
5 #include <asm/intel-family.h>
8 static struct intel_uncore_type
*empty_uncore
[] = { NULL
, };
9 struct intel_uncore_type
**uncore_msr_uncores
= empty_uncore
;
10 struct intel_uncore_type
**uncore_pci_uncores
= empty_uncore
;
11 struct intel_uncore_type
**uncore_mmio_uncores
= empty_uncore
;
13 static bool pcidrv_registered
;
14 struct pci_driver
*uncore_pci_driver
;
15 /* pci bus to socket mapping */
16 DEFINE_RAW_SPINLOCK(pci2phy_map_lock
);
17 struct list_head pci2phy_map_head
= LIST_HEAD_INIT(pci2phy_map_head
);
18 struct pci_extra_dev
*uncore_extra_pci_dev
;
21 /* mask of cpus that collect uncore events */
22 static cpumask_t uncore_cpu_mask
;
24 /* constraint for the fixed counter */
25 static struct event_constraint uncore_constraint_fixed
=
26 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED
, ~0ULL);
27 struct event_constraint uncore_constraint_empty
=
28 EVENT_CONSTRAINT(0, 0, 0);
30 MODULE_LICENSE("GPL");
32 int uncore_pcibus_to_physid(struct pci_bus
*bus
)
34 struct pci2phy_map
*map
;
37 raw_spin_lock(&pci2phy_map_lock
);
38 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
39 if (map
->segment
== pci_domain_nr(bus
)) {
40 phys_id
= map
->pbus_to_physid
[bus
->number
];
44 raw_spin_unlock(&pci2phy_map_lock
);
49 static void uncore_free_pcibus_map(void)
51 struct pci2phy_map
*map
, *tmp
;
53 list_for_each_entry_safe(map
, tmp
, &pci2phy_map_head
, list
) {
59 struct pci2phy_map
*__find_pci2phy_map(int segment
)
61 struct pci2phy_map
*map
, *alloc
= NULL
;
64 lockdep_assert_held(&pci2phy_map_lock
);
67 list_for_each_entry(map
, &pci2phy_map_head
, list
) {
68 if (map
->segment
== segment
)
73 raw_spin_unlock(&pci2phy_map_lock
);
74 alloc
= kmalloc(sizeof(struct pci2phy_map
), GFP_KERNEL
);
75 raw_spin_lock(&pci2phy_map_lock
);
85 map
->segment
= segment
;
86 for (i
= 0; i
< 256; i
++)
87 map
->pbus_to_physid
[i
] = -1;
88 list_add_tail(&map
->list
, &pci2phy_map_head
);
95 ssize_t
uncore_event_show(struct kobject
*kobj
,
96 struct kobj_attribute
*attr
, char *buf
)
98 struct uncore_event_desc
*event
=
99 container_of(attr
, struct uncore_event_desc
, attr
);
100 return sprintf(buf
, "%s", event
->config
);
103 struct intel_uncore_box
*uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
)
105 unsigned int dieid
= topology_logical_die_id(cpu
);
108 * The unsigned check also catches the '-1' return value for non
109 * existent mappings in the topology map.
111 return dieid
< max_dies
? pmu
->boxes
[dieid
] : NULL
;
114 u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
)
118 rdmsrl(event
->hw
.event_base
, count
);
123 void uncore_mmio_exit_box(struct intel_uncore_box
*box
)
126 iounmap(box
->io_addr
);
129 u64
uncore_mmio_read_counter(struct intel_uncore_box
*box
,
130 struct perf_event
*event
)
135 return readq(box
->io_addr
+ event
->hw
.event_base
);
139 * generic get constraint function for shared match/mask registers.
141 struct event_constraint
*
142 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
144 struct intel_uncore_extra_reg
*er
;
145 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
146 struct hw_perf_event_extra
*reg2
= &event
->hw
.branch_reg
;
151 * reg->alloc can be set due to existing state, so for fake box we
152 * need to ignore this, otherwise we might fail to allocate proper
153 * fake state for this extra reg constraint.
155 if (reg1
->idx
== EXTRA_REG_NONE
||
156 (!uncore_box_is_fake(box
) && reg1
->alloc
))
159 er
= &box
->shared_regs
[reg1
->idx
];
160 raw_spin_lock_irqsave(&er
->lock
, flags
);
161 if (!atomic_read(&er
->ref
) ||
162 (er
->config1
== reg1
->config
&& er
->config2
== reg2
->config
)) {
163 atomic_inc(&er
->ref
);
164 er
->config1
= reg1
->config
;
165 er
->config2
= reg2
->config
;
168 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
171 if (!uncore_box_is_fake(box
))
176 return &uncore_constraint_empty
;
179 void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
181 struct intel_uncore_extra_reg
*er
;
182 struct hw_perf_event_extra
*reg1
= &event
->hw
.extra_reg
;
185 * Only put constraint if extra reg was actually allocated. Also
186 * takes care of event which do not use an extra shared reg.
188 * Also, if this is a fake box we shouldn't touch any event state
189 * (reg->alloc) and we don't care about leaving inconsistent box
190 * state either since it will be thrown out.
192 if (uncore_box_is_fake(box
) || !reg1
->alloc
)
195 er
= &box
->shared_regs
[reg1
->idx
];
196 atomic_dec(&er
->ref
);
200 u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
)
202 struct intel_uncore_extra_reg
*er
;
206 er
= &box
->shared_regs
[idx
];
208 raw_spin_lock_irqsave(&er
->lock
, flags
);
210 raw_spin_unlock_irqrestore(&er
->lock
, flags
);
215 static void uncore_assign_hw_event(struct intel_uncore_box
*box
,
216 struct perf_event
*event
, int idx
)
218 struct hw_perf_event
*hwc
= &event
->hw
;
221 hwc
->last_tag
= ++box
->tags
[idx
];
223 if (uncore_pmc_fixed(hwc
->idx
)) {
224 hwc
->event_base
= uncore_fixed_ctr(box
);
225 hwc
->config_base
= uncore_fixed_ctl(box
);
229 hwc
->config_base
= uncore_event_ctl(box
, hwc
->idx
);
230 hwc
->event_base
= uncore_perf_ctr(box
, hwc
->idx
);
233 void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
)
235 u64 prev_count
, new_count
, delta
;
238 if (uncore_pmc_freerunning(event
->hw
.idx
))
239 shift
= 64 - uncore_freerunning_bits(box
, event
);
240 else if (uncore_pmc_fixed(event
->hw
.idx
))
241 shift
= 64 - uncore_fixed_ctr_bits(box
);
243 shift
= 64 - uncore_perf_ctr_bits(box
);
245 /* the hrtimer might modify the previous event value */
247 prev_count
= local64_read(&event
->hw
.prev_count
);
248 new_count
= uncore_read_counter(box
, event
);
249 if (local64_xchg(&event
->hw
.prev_count
, new_count
) != prev_count
)
252 delta
= (new_count
<< shift
) - (prev_count
<< shift
);
255 local64_add(delta
, &event
->count
);
259 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
260 * for SandyBridge. So we use hrtimer to periodically poll the counter
263 static enum hrtimer_restart
uncore_pmu_hrtimer(struct hrtimer
*hrtimer
)
265 struct intel_uncore_box
*box
;
266 struct perf_event
*event
;
270 box
= container_of(hrtimer
, struct intel_uncore_box
, hrtimer
);
271 if (!box
->n_active
|| box
->cpu
!= smp_processor_id())
272 return HRTIMER_NORESTART
;
274 * disable local interrupt to prevent uncore_pmu_event_start/stop
275 * to interrupt the update process
277 local_irq_save(flags
);
280 * handle boxes with an active event list as opposed to active
283 list_for_each_entry(event
, &box
->active_list
, active_entry
) {
284 uncore_perf_event_update(box
, event
);
287 for_each_set_bit(bit
, box
->active_mask
, UNCORE_PMC_IDX_MAX
)
288 uncore_perf_event_update(box
, box
->events
[bit
]);
290 local_irq_restore(flags
);
292 hrtimer_forward_now(hrtimer
, ns_to_ktime(box
->hrtimer_duration
));
293 return HRTIMER_RESTART
;
296 void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
)
298 hrtimer_start(&box
->hrtimer
, ns_to_ktime(box
->hrtimer_duration
),
299 HRTIMER_MODE_REL_PINNED
);
302 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
)
304 hrtimer_cancel(&box
->hrtimer
);
307 static void uncore_pmu_init_hrtimer(struct intel_uncore_box
*box
)
309 hrtimer_init(&box
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
310 box
->hrtimer
.function
= uncore_pmu_hrtimer
;
313 static struct intel_uncore_box
*uncore_alloc_box(struct intel_uncore_type
*type
,
316 int i
, size
, numshared
= type
->num_shared_regs
;
317 struct intel_uncore_box
*box
;
319 size
= sizeof(*box
) + numshared
* sizeof(struct intel_uncore_extra_reg
);
321 box
= kzalloc_node(size
, GFP_KERNEL
, node
);
325 for (i
= 0; i
< numshared
; i
++)
326 raw_spin_lock_init(&box
->shared_regs
[i
].lock
);
328 uncore_pmu_init_hrtimer(box
);
330 box
->pci_phys_id
= -1;
333 /* set default hrtimer timeout */
334 box
->hrtimer_duration
= UNCORE_PMU_HRTIMER_INTERVAL
;
336 INIT_LIST_HEAD(&box
->active_list
);
342 * Using uncore_pmu_event_init pmu event_init callback
343 * as a detection point for uncore events.
345 static int uncore_pmu_event_init(struct perf_event
*event
);
347 static bool is_box_event(struct intel_uncore_box
*box
, struct perf_event
*event
)
349 return &box
->pmu
->pmu
== event
->pmu
;
353 uncore_collect_events(struct intel_uncore_box
*box
, struct perf_event
*leader
,
356 struct perf_event
*event
;
359 max_count
= box
->pmu
->type
->num_counters
;
360 if (box
->pmu
->type
->fixed_ctl
)
363 if (box
->n_events
>= max_count
)
368 if (is_box_event(box
, leader
)) {
369 box
->event_list
[n
] = leader
;
376 for_each_sibling_event(event
, leader
) {
377 if (!is_box_event(box
, event
) ||
378 event
->state
<= PERF_EVENT_STATE_OFF
)
384 box
->event_list
[n
] = event
;
390 static struct event_constraint
*
391 uncore_get_event_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
)
393 struct intel_uncore_type
*type
= box
->pmu
->type
;
394 struct event_constraint
*c
;
396 if (type
->ops
->get_constraint
) {
397 c
= type
->ops
->get_constraint(box
, event
);
402 if (event
->attr
.config
== UNCORE_FIXED_EVENT
)
403 return &uncore_constraint_fixed
;
405 if (type
->constraints
) {
406 for_each_event_constraint(c
, type
->constraints
) {
407 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
412 return &type
->unconstrainted
;
415 static void uncore_put_event_constraint(struct intel_uncore_box
*box
,
416 struct perf_event
*event
)
418 if (box
->pmu
->type
->ops
->put_constraint
)
419 box
->pmu
->type
->ops
->put_constraint(box
, event
);
422 static int uncore_assign_events(struct intel_uncore_box
*box
, int assign
[], int n
)
424 unsigned long used_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
425 struct event_constraint
*c
;
426 int i
, wmin
, wmax
, ret
= 0;
427 struct hw_perf_event
*hwc
;
429 bitmap_zero(used_mask
, UNCORE_PMC_IDX_MAX
);
431 for (i
= 0, wmin
= UNCORE_PMC_IDX_MAX
, wmax
= 0; i
< n
; i
++) {
432 c
= uncore_get_event_constraint(box
, box
->event_list
[i
]);
433 box
->event_constraint
[i
] = c
;
434 wmin
= min(wmin
, c
->weight
);
435 wmax
= max(wmax
, c
->weight
);
438 /* fastpath, try to reuse previous register */
439 for (i
= 0; i
< n
; i
++) {
440 hwc
= &box
->event_list
[i
]->hw
;
441 c
= box
->event_constraint
[i
];
447 /* constraint still honored */
448 if (!test_bit(hwc
->idx
, c
->idxmsk
))
451 /* not already used */
452 if (test_bit(hwc
->idx
, used_mask
))
455 __set_bit(hwc
->idx
, used_mask
);
457 assign
[i
] = hwc
->idx
;
461 ret
= perf_assign_events(box
->event_constraint
, n
,
462 wmin
, wmax
, n
, assign
);
464 if (!assign
|| ret
) {
465 for (i
= 0; i
< n
; i
++)
466 uncore_put_event_constraint(box
, box
->event_list
[i
]);
468 return ret
? -EINVAL
: 0;
471 void uncore_pmu_event_start(struct perf_event
*event
, int flags
)
473 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
474 int idx
= event
->hw
.idx
;
476 if (WARN_ON_ONCE(idx
== -1 || idx
>= UNCORE_PMC_IDX_MAX
))
480 * Free running counter is read-only and always active.
481 * Use the current counter value as start point.
482 * There is no overflow interrupt for free running counter.
483 * Use hrtimer to periodically poll the counter to avoid overflow.
485 if (uncore_pmc_freerunning(event
->hw
.idx
)) {
486 list_add_tail(&event
->active_entry
, &box
->active_list
);
487 local64_set(&event
->hw
.prev_count
,
488 uncore_read_counter(box
, event
));
489 if (box
->n_active
++ == 0)
490 uncore_pmu_start_hrtimer(box
);
494 if (WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_STOPPED
)))
498 box
->events
[idx
] = event
;
500 __set_bit(idx
, box
->active_mask
);
502 local64_set(&event
->hw
.prev_count
, uncore_read_counter(box
, event
));
503 uncore_enable_event(box
, event
);
505 if (box
->n_active
== 1)
506 uncore_pmu_start_hrtimer(box
);
509 void uncore_pmu_event_stop(struct perf_event
*event
, int flags
)
511 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
512 struct hw_perf_event
*hwc
= &event
->hw
;
514 /* Cannot disable free running counter which is read-only */
515 if (uncore_pmc_freerunning(hwc
->idx
)) {
516 list_del(&event
->active_entry
);
517 if (--box
->n_active
== 0)
518 uncore_pmu_cancel_hrtimer(box
);
519 uncore_perf_event_update(box
, event
);
523 if (__test_and_clear_bit(hwc
->idx
, box
->active_mask
)) {
524 uncore_disable_event(box
, event
);
526 box
->events
[hwc
->idx
] = NULL
;
527 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
528 hwc
->state
|= PERF_HES_STOPPED
;
530 if (box
->n_active
== 0)
531 uncore_pmu_cancel_hrtimer(box
);
534 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
536 * Drain the remaining delta count out of a event
537 * that we are disabling:
539 uncore_perf_event_update(box
, event
);
540 hwc
->state
|= PERF_HES_UPTODATE
;
544 int uncore_pmu_event_add(struct perf_event
*event
, int flags
)
546 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
547 struct hw_perf_event
*hwc
= &event
->hw
;
548 int assign
[UNCORE_PMC_IDX_MAX
];
555 * The free funning counter is assigned in event_init().
556 * The free running counter event and free running counter
557 * are 1:1 mapped. It doesn't need to be tracked in event_list.
559 if (uncore_pmc_freerunning(hwc
->idx
)) {
560 if (flags
& PERF_EF_START
)
561 uncore_pmu_event_start(event
, 0);
565 ret
= n
= uncore_collect_events(box
, event
, false);
569 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
570 if (!(flags
& PERF_EF_START
))
571 hwc
->state
|= PERF_HES_ARCH
;
573 ret
= uncore_assign_events(box
, assign
, n
);
577 /* save events moving to new counters */
578 for (i
= 0; i
< box
->n_events
; i
++) {
579 event
= box
->event_list
[i
];
582 if (hwc
->idx
== assign
[i
] &&
583 hwc
->last_tag
== box
->tags
[assign
[i
]])
586 * Ensure we don't accidentally enable a stopped
587 * counter simply because we rescheduled.
589 if (hwc
->state
& PERF_HES_STOPPED
)
590 hwc
->state
|= PERF_HES_ARCH
;
592 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
595 /* reprogram moved events into new counters */
596 for (i
= 0; i
< n
; i
++) {
597 event
= box
->event_list
[i
];
600 if (hwc
->idx
!= assign
[i
] ||
601 hwc
->last_tag
!= box
->tags
[assign
[i
]])
602 uncore_assign_hw_event(box
, event
, assign
[i
]);
603 else if (i
< box
->n_events
)
606 if (hwc
->state
& PERF_HES_ARCH
)
609 uncore_pmu_event_start(event
, 0);
616 void uncore_pmu_event_del(struct perf_event
*event
, int flags
)
618 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
621 uncore_pmu_event_stop(event
, PERF_EF_UPDATE
);
624 * The event for free running counter is not tracked by event_list.
625 * It doesn't need to force event->hw.idx = -1 to reassign the counter.
626 * Because the event and the free running counter are 1:1 mapped.
628 if (uncore_pmc_freerunning(event
->hw
.idx
))
631 for (i
= 0; i
< box
->n_events
; i
++) {
632 if (event
== box
->event_list
[i
]) {
633 uncore_put_event_constraint(box
, event
);
635 for (++i
; i
< box
->n_events
; i
++)
636 box
->event_list
[i
- 1] = box
->event_list
[i
];
644 event
->hw
.last_tag
= ~0ULL;
647 void uncore_pmu_event_read(struct perf_event
*event
)
649 struct intel_uncore_box
*box
= uncore_event_to_box(event
);
650 uncore_perf_event_update(box
, event
);
654 * validation ensures the group can be loaded onto the
655 * PMU if it was the only group available.
657 static int uncore_validate_group(struct intel_uncore_pmu
*pmu
,
658 struct perf_event
*event
)
660 struct perf_event
*leader
= event
->group_leader
;
661 struct intel_uncore_box
*fake_box
;
662 int ret
= -EINVAL
, n
;
664 /* The free running counter is always active. */
665 if (uncore_pmc_freerunning(event
->hw
.idx
))
668 fake_box
= uncore_alloc_box(pmu
->type
, NUMA_NO_NODE
);
674 * the event is not yet connected with its
675 * siblings therefore we must first collect
676 * existing siblings, then add the new event
677 * before we can simulate the scheduling
679 n
= uncore_collect_events(fake_box
, leader
, true);
683 fake_box
->n_events
= n
;
684 n
= uncore_collect_events(fake_box
, event
, false);
688 fake_box
->n_events
= n
;
690 ret
= uncore_assign_events(fake_box
, NULL
, n
);
696 static int uncore_pmu_event_init(struct perf_event
*event
)
698 struct intel_uncore_pmu
*pmu
;
699 struct intel_uncore_box
*box
;
700 struct hw_perf_event
*hwc
= &event
->hw
;
703 if (event
->attr
.type
!= event
->pmu
->type
)
706 pmu
= uncore_event_to_pmu(event
);
707 /* no device found for this pmu */
708 if (pmu
->func_id
< 0)
711 /* Sampling not supported yet */
712 if (hwc
->sample_period
)
716 * Place all uncore events for a particular physical package
721 box
= uncore_pmu_to_box(pmu
, event
->cpu
);
722 if (!box
|| box
->cpu
< 0)
724 event
->cpu
= box
->cpu
;
725 event
->pmu_private
= box
;
727 event
->event_caps
|= PERF_EV_CAP_READ_ACTIVE_PKG
;
730 event
->hw
.last_tag
= ~0ULL;
731 event
->hw
.extra_reg
.idx
= EXTRA_REG_NONE
;
732 event
->hw
.branch_reg
.idx
= EXTRA_REG_NONE
;
734 if (event
->attr
.config
== UNCORE_FIXED_EVENT
) {
735 /* no fixed counter */
736 if (!pmu
->type
->fixed_ctl
)
739 * if there is only one fixed counter, only the first pmu
740 * can access the fixed counter
742 if (pmu
->type
->single_fixed
&& pmu
->pmu_idx
> 0)
745 /* fixed counters have event field hardcoded to zero */
747 } else if (is_freerunning_event(event
)) {
748 hwc
->config
= event
->attr
.config
;
749 if (!check_valid_freerunning_event(box
, event
))
751 event
->hw
.idx
= UNCORE_PMC_IDX_FREERUNNING
;
753 * The free running counter event and free running counter
754 * are always 1:1 mapped.
755 * The free running counter is always active.
756 * Assign the free running counter here.
758 event
->hw
.event_base
= uncore_freerunning_counter(box
, event
);
760 hwc
->config
= event
->attr
.config
&
761 (pmu
->type
->event_mask
| ((u64
)pmu
->type
->event_mask_ext
<< 32));
762 if (pmu
->type
->ops
->hw_config
) {
763 ret
= pmu
->type
->ops
->hw_config(box
, event
);
769 if (event
->group_leader
!= event
)
770 ret
= uncore_validate_group(pmu
, event
);
777 static void uncore_pmu_enable(struct pmu
*pmu
)
779 struct intel_uncore_pmu
*uncore_pmu
;
780 struct intel_uncore_box
*box
;
782 uncore_pmu
= container_of(pmu
, struct intel_uncore_pmu
, pmu
);
786 box
= uncore_pmu_to_box(uncore_pmu
, smp_processor_id());
790 if (uncore_pmu
->type
->ops
->enable_box
)
791 uncore_pmu
->type
->ops
->enable_box(box
);
794 static void uncore_pmu_disable(struct pmu
*pmu
)
796 struct intel_uncore_pmu
*uncore_pmu
;
797 struct intel_uncore_box
*box
;
799 uncore_pmu
= container_of(pmu
, struct intel_uncore_pmu
, pmu
);
803 box
= uncore_pmu_to_box(uncore_pmu
, smp_processor_id());
807 if (uncore_pmu
->type
->ops
->disable_box
)
808 uncore_pmu
->type
->ops
->disable_box(box
);
811 static ssize_t
uncore_get_attr_cpumask(struct device
*dev
,
812 struct device_attribute
*attr
, char *buf
)
814 return cpumap_print_to_pagebuf(true, buf
, &uncore_cpu_mask
);
817 static DEVICE_ATTR(cpumask
, S_IRUGO
, uncore_get_attr_cpumask
, NULL
);
819 static struct attribute
*uncore_pmu_attrs
[] = {
820 &dev_attr_cpumask
.attr
,
824 static const struct attribute_group uncore_pmu_attr_group
= {
825 .attrs
= uncore_pmu_attrs
,
828 static int uncore_pmu_register(struct intel_uncore_pmu
*pmu
)
832 if (!pmu
->type
->pmu
) {
833 pmu
->pmu
= (struct pmu
) {
834 .attr_groups
= pmu
->type
->attr_groups
,
835 .task_ctx_nr
= perf_invalid_context
,
836 .pmu_enable
= uncore_pmu_enable
,
837 .pmu_disable
= uncore_pmu_disable
,
838 .event_init
= uncore_pmu_event_init
,
839 .add
= uncore_pmu_event_add
,
840 .del
= uncore_pmu_event_del
,
841 .start
= uncore_pmu_event_start
,
842 .stop
= uncore_pmu_event_stop
,
843 .read
= uncore_pmu_event_read
,
844 .module
= THIS_MODULE
,
845 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
848 pmu
->pmu
= *pmu
->type
->pmu
;
849 pmu
->pmu
.attr_groups
= pmu
->type
->attr_groups
;
852 if (pmu
->type
->num_boxes
== 1) {
853 if (strlen(pmu
->type
->name
) > 0)
854 sprintf(pmu
->name
, "uncore_%s", pmu
->type
->name
);
856 sprintf(pmu
->name
, "uncore");
858 sprintf(pmu
->name
, "uncore_%s_%d", pmu
->type
->name
,
862 ret
= perf_pmu_register(&pmu
->pmu
, pmu
->name
, -1);
864 pmu
->registered
= true;
868 static void uncore_pmu_unregister(struct intel_uncore_pmu
*pmu
)
870 if (!pmu
->registered
)
872 perf_pmu_unregister(&pmu
->pmu
);
873 pmu
->registered
= false;
876 static void uncore_free_boxes(struct intel_uncore_pmu
*pmu
)
880 for (die
= 0; die
< max_dies
; die
++)
881 kfree(pmu
->boxes
[die
]);
885 static void uncore_type_exit(struct intel_uncore_type
*type
)
887 struct intel_uncore_pmu
*pmu
= type
->pmus
;
891 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
892 uncore_pmu_unregister(pmu
);
893 uncore_free_boxes(pmu
);
898 kfree(type
->events_group
);
899 type
->events_group
= NULL
;
902 static void uncore_types_exit(struct intel_uncore_type
**types
)
904 for (; *types
; types
++)
905 uncore_type_exit(*types
);
908 static int __init
uncore_type_init(struct intel_uncore_type
*type
, bool setid
)
910 struct intel_uncore_pmu
*pmus
;
914 pmus
= kcalloc(type
->num_boxes
, sizeof(*pmus
), GFP_KERNEL
);
918 size
= max_dies
* sizeof(struct intel_uncore_box
*);
920 for (i
= 0; i
< type
->num_boxes
; i
++) {
921 pmus
[i
].func_id
= setid
? i
: -1;
924 pmus
[i
].boxes
= kzalloc(size
, GFP_KERNEL
);
930 type
->unconstrainted
= (struct event_constraint
)
931 __EVENT_CONSTRAINT(0, (1ULL << type
->num_counters
) - 1,
932 0, type
->num_counters
, 0, 0);
934 if (type
->event_descs
) {
936 struct attribute_group group
;
937 struct attribute
*attrs
[];
939 for (i
= 0; type
->event_descs
[i
].attr
.attr
.name
; i
++);
941 attr_group
= kzalloc(struct_size(attr_group
, attrs
, i
+ 1),
946 attr_group
->group
.name
= "events";
947 attr_group
->group
.attrs
= attr_group
->attrs
;
949 for (j
= 0; j
< i
; j
++)
950 attr_group
->attrs
[j
] = &type
->event_descs
[j
].attr
.attr
;
952 type
->events_group
= &attr_group
->group
;
955 type
->pmu_group
= &uncore_pmu_attr_group
;
960 for (i
= 0; i
< type
->num_boxes
; i
++)
961 kfree(pmus
[i
].boxes
);
968 uncore_types_init(struct intel_uncore_type
**types
, bool setid
)
972 for (; *types
; types
++) {
973 ret
= uncore_type_init(*types
, setid
);
981 * add a pci uncore device
983 static int uncore_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
985 struct intel_uncore_type
*type
;
986 struct intel_uncore_pmu
*pmu
= NULL
;
987 struct intel_uncore_box
*box
;
988 int phys_id
, die
, ret
;
990 phys_id
= uncore_pcibus_to_physid(pdev
->bus
);
994 die
= (topology_max_die_per_package() > 1) ? phys_id
:
995 topology_phys_to_logical_pkg(phys_id
);
999 if (UNCORE_PCI_DEV_TYPE(id
->driver_data
) == UNCORE_EXTRA_PCI_DEV
) {
1000 int idx
= UNCORE_PCI_DEV_IDX(id
->driver_data
);
1002 uncore_extra_pci_dev
[die
].dev
[idx
] = pdev
;
1003 pci_set_drvdata(pdev
, NULL
);
1007 type
= uncore_pci_uncores
[UNCORE_PCI_DEV_TYPE(id
->driver_data
)];
1010 * Some platforms, e.g. Knights Landing, use a common PCI device ID
1011 * for multiple instances of an uncore PMU device type. We should check
1012 * PCI slot and func to indicate the uncore box.
1014 if (id
->driver_data
& ~0xffff) {
1015 struct pci_driver
*pci_drv
= pdev
->driver
;
1016 const struct pci_device_id
*ids
= pci_drv
->id_table
;
1019 while (ids
&& ids
->vendor
) {
1020 if ((ids
->vendor
== pdev
->vendor
) &&
1021 (ids
->device
== pdev
->device
)) {
1022 devfn
= PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids
->driver_data
),
1023 UNCORE_PCI_DEV_FUNC(ids
->driver_data
));
1024 if (devfn
== pdev
->devfn
) {
1025 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(ids
->driver_data
)];
1035 * for performance monitoring unit with multiple boxes,
1036 * each box has a different function id.
1038 pmu
= &type
->pmus
[UNCORE_PCI_DEV_IDX(id
->driver_data
)];
1041 if (WARN_ON_ONCE(pmu
->boxes
[die
] != NULL
))
1044 box
= uncore_alloc_box(type
, NUMA_NO_NODE
);
1048 if (pmu
->func_id
< 0)
1049 pmu
->func_id
= pdev
->devfn
;
1051 WARN_ON_ONCE(pmu
->func_id
!= pdev
->devfn
);
1053 atomic_inc(&box
->refcnt
);
1054 box
->pci_phys_id
= phys_id
;
1056 box
->pci_dev
= pdev
;
1058 uncore_box_init(box
);
1059 pci_set_drvdata(pdev
, box
);
1061 pmu
->boxes
[die
] = box
;
1062 if (atomic_inc_return(&pmu
->activeboxes
) > 1)
1065 /* First active box registers the pmu */
1066 ret
= uncore_pmu_register(pmu
);
1068 pci_set_drvdata(pdev
, NULL
);
1069 pmu
->boxes
[die
] = NULL
;
1070 uncore_box_exit(box
);
1076 static void uncore_pci_remove(struct pci_dev
*pdev
)
1078 struct intel_uncore_box
*box
;
1079 struct intel_uncore_pmu
*pmu
;
1080 int i
, phys_id
, die
;
1082 phys_id
= uncore_pcibus_to_physid(pdev
->bus
);
1084 box
= pci_get_drvdata(pdev
);
1086 die
= (topology_max_die_per_package() > 1) ? phys_id
:
1087 topology_phys_to_logical_pkg(phys_id
);
1088 for (i
= 0; i
< UNCORE_EXTRA_PCI_DEV_MAX
; i
++) {
1089 if (uncore_extra_pci_dev
[die
].dev
[i
] == pdev
) {
1090 uncore_extra_pci_dev
[die
].dev
[i
] = NULL
;
1094 WARN_ON_ONCE(i
>= UNCORE_EXTRA_PCI_DEV_MAX
);
1099 if (WARN_ON_ONCE(phys_id
!= box
->pci_phys_id
))
1102 pci_set_drvdata(pdev
, NULL
);
1103 pmu
->boxes
[box
->dieid
] = NULL
;
1104 if (atomic_dec_return(&pmu
->activeboxes
) == 0)
1105 uncore_pmu_unregister(pmu
);
1106 uncore_box_exit(box
);
1110 static int __init
uncore_pci_init(void)
1115 size
= max_dies
* sizeof(struct pci_extra_dev
);
1116 uncore_extra_pci_dev
= kzalloc(size
, GFP_KERNEL
);
1117 if (!uncore_extra_pci_dev
) {
1122 ret
= uncore_types_init(uncore_pci_uncores
, false);
1126 uncore_pci_driver
->probe
= uncore_pci_probe
;
1127 uncore_pci_driver
->remove
= uncore_pci_remove
;
1129 ret
= pci_register_driver(uncore_pci_driver
);
1133 pcidrv_registered
= true;
1137 uncore_types_exit(uncore_pci_uncores
);
1138 kfree(uncore_extra_pci_dev
);
1139 uncore_extra_pci_dev
= NULL
;
1140 uncore_free_pcibus_map();
1142 uncore_pci_uncores
= empty_uncore
;
1146 static void uncore_pci_exit(void)
1148 if (pcidrv_registered
) {
1149 pcidrv_registered
= false;
1150 pci_unregister_driver(uncore_pci_driver
);
1151 uncore_types_exit(uncore_pci_uncores
);
1152 kfree(uncore_extra_pci_dev
);
1153 uncore_free_pcibus_map();
1157 static void uncore_change_type_ctx(struct intel_uncore_type
*type
, int old_cpu
,
1160 struct intel_uncore_pmu
*pmu
= type
->pmus
;
1161 struct intel_uncore_box
*box
;
1164 die
= topology_logical_die_id(old_cpu
< 0 ? new_cpu
: old_cpu
);
1165 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1166 box
= pmu
->boxes
[die
];
1171 WARN_ON_ONCE(box
->cpu
!= -1);
1176 WARN_ON_ONCE(box
->cpu
!= old_cpu
);
1181 uncore_pmu_cancel_hrtimer(box
);
1182 perf_pmu_migrate_context(&pmu
->pmu
, old_cpu
, new_cpu
);
1187 static void uncore_change_context(struct intel_uncore_type
**uncores
,
1188 int old_cpu
, int new_cpu
)
1190 for (; *uncores
; uncores
++)
1191 uncore_change_type_ctx(*uncores
, old_cpu
, new_cpu
);
1194 static void uncore_box_unref(struct intel_uncore_type
**types
, int id
)
1196 struct intel_uncore_type
*type
;
1197 struct intel_uncore_pmu
*pmu
;
1198 struct intel_uncore_box
*box
;
1201 for (; *types
; types
++) {
1204 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1205 box
= pmu
->boxes
[id
];
1206 if (box
&& atomic_dec_return(&box
->refcnt
) == 0)
1207 uncore_box_exit(box
);
1212 static int uncore_event_cpu_offline(unsigned int cpu
)
1216 /* Check if exiting cpu is used for collecting uncore events */
1217 if (!cpumask_test_and_clear_cpu(cpu
, &uncore_cpu_mask
))
1219 /* Find a new cpu to collect uncore events */
1220 target
= cpumask_any_but(topology_die_cpumask(cpu
), cpu
);
1222 /* Migrate uncore events to the new target */
1223 if (target
< nr_cpu_ids
)
1224 cpumask_set_cpu(target
, &uncore_cpu_mask
);
1228 uncore_change_context(uncore_msr_uncores
, cpu
, target
);
1229 uncore_change_context(uncore_mmio_uncores
, cpu
, target
);
1230 uncore_change_context(uncore_pci_uncores
, cpu
, target
);
1233 /* Clear the references */
1234 die
= topology_logical_die_id(cpu
);
1235 uncore_box_unref(uncore_msr_uncores
, die
);
1236 uncore_box_unref(uncore_mmio_uncores
, die
);
1240 static int allocate_boxes(struct intel_uncore_type
**types
,
1241 unsigned int die
, unsigned int cpu
)
1243 struct intel_uncore_box
*box
, *tmp
;
1244 struct intel_uncore_type
*type
;
1245 struct intel_uncore_pmu
*pmu
;
1246 LIST_HEAD(allocated
);
1249 /* Try to allocate all required boxes */
1250 for (; *types
; types
++) {
1253 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1254 if (pmu
->boxes
[die
])
1256 box
= uncore_alloc_box(type
, cpu_to_node(cpu
));
1261 list_add(&box
->active_list
, &allocated
);
1264 /* Install them in the pmus */
1265 list_for_each_entry_safe(box
, tmp
, &allocated
, active_list
) {
1266 list_del_init(&box
->active_list
);
1267 box
->pmu
->boxes
[die
] = box
;
1272 list_for_each_entry_safe(box
, tmp
, &allocated
, active_list
) {
1273 list_del_init(&box
->active_list
);
1279 static int uncore_box_ref(struct intel_uncore_type
**types
,
1280 int id
, unsigned int cpu
)
1282 struct intel_uncore_type
*type
;
1283 struct intel_uncore_pmu
*pmu
;
1284 struct intel_uncore_box
*box
;
1287 ret
= allocate_boxes(types
, id
, cpu
);
1291 for (; *types
; types
++) {
1294 for (i
= 0; i
< type
->num_boxes
; i
++, pmu
++) {
1295 box
= pmu
->boxes
[id
];
1296 if (box
&& atomic_inc_return(&box
->refcnt
) == 1)
1297 uncore_box_init(box
);
1303 static int uncore_event_cpu_online(unsigned int cpu
)
1305 int die
, target
, msr_ret
, mmio_ret
;
1307 die
= topology_logical_die_id(cpu
);
1308 msr_ret
= uncore_box_ref(uncore_msr_uncores
, die
, cpu
);
1309 mmio_ret
= uncore_box_ref(uncore_mmio_uncores
, die
, cpu
);
1310 if (msr_ret
&& mmio_ret
)
1314 * Check if there is an online cpu in the package
1315 * which collects uncore events already.
1317 target
= cpumask_any_and(&uncore_cpu_mask
, topology_die_cpumask(cpu
));
1318 if (target
< nr_cpu_ids
)
1321 cpumask_set_cpu(cpu
, &uncore_cpu_mask
);
1324 uncore_change_context(uncore_msr_uncores
, -1, cpu
);
1326 uncore_change_context(uncore_mmio_uncores
, -1, cpu
);
1327 uncore_change_context(uncore_pci_uncores
, -1, cpu
);
1331 static int __init
type_pmu_register(struct intel_uncore_type
*type
)
1335 for (i
= 0; i
< type
->num_boxes
; i
++) {
1336 ret
= uncore_pmu_register(&type
->pmus
[i
]);
1343 static int __init
uncore_msr_pmus_register(void)
1345 struct intel_uncore_type
**types
= uncore_msr_uncores
;
1348 for (; *types
; types
++) {
1349 ret
= type_pmu_register(*types
);
1356 static int __init
uncore_cpu_init(void)
1360 ret
= uncore_types_init(uncore_msr_uncores
, true);
1364 ret
= uncore_msr_pmus_register();
1369 uncore_types_exit(uncore_msr_uncores
);
1370 uncore_msr_uncores
= empty_uncore
;
1374 static int __init
uncore_mmio_init(void)
1376 struct intel_uncore_type
**types
= uncore_mmio_uncores
;
1379 ret
= uncore_types_init(types
, true);
1383 for (; *types
; types
++) {
1384 ret
= type_pmu_register(*types
);
1390 uncore_types_exit(uncore_mmio_uncores
);
1391 uncore_mmio_uncores
= empty_uncore
;
1396 #define X86_UNCORE_MODEL_MATCH(model, init) \
1397 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1399 struct intel_uncore_init_fun
{
1400 void (*cpu_init
)(void);
1401 int (*pci_init
)(void);
1402 void (*mmio_init
)(void);
1405 static const struct intel_uncore_init_fun nhm_uncore_init __initconst
= {
1406 .cpu_init
= nhm_uncore_cpu_init
,
1409 static const struct intel_uncore_init_fun snb_uncore_init __initconst
= {
1410 .cpu_init
= snb_uncore_cpu_init
,
1411 .pci_init
= snb_uncore_pci_init
,
1414 static const struct intel_uncore_init_fun ivb_uncore_init __initconst
= {
1415 .cpu_init
= snb_uncore_cpu_init
,
1416 .pci_init
= ivb_uncore_pci_init
,
1419 static const struct intel_uncore_init_fun hsw_uncore_init __initconst
= {
1420 .cpu_init
= snb_uncore_cpu_init
,
1421 .pci_init
= hsw_uncore_pci_init
,
1424 static const struct intel_uncore_init_fun bdw_uncore_init __initconst
= {
1425 .cpu_init
= snb_uncore_cpu_init
,
1426 .pci_init
= bdw_uncore_pci_init
,
1429 static const struct intel_uncore_init_fun snbep_uncore_init __initconst
= {
1430 .cpu_init
= snbep_uncore_cpu_init
,
1431 .pci_init
= snbep_uncore_pci_init
,
1434 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst
= {
1435 .cpu_init
= nhmex_uncore_cpu_init
,
1438 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst
= {
1439 .cpu_init
= ivbep_uncore_cpu_init
,
1440 .pci_init
= ivbep_uncore_pci_init
,
1443 static const struct intel_uncore_init_fun hswep_uncore_init __initconst
= {
1444 .cpu_init
= hswep_uncore_cpu_init
,
1445 .pci_init
= hswep_uncore_pci_init
,
1448 static const struct intel_uncore_init_fun bdx_uncore_init __initconst
= {
1449 .cpu_init
= bdx_uncore_cpu_init
,
1450 .pci_init
= bdx_uncore_pci_init
,
1453 static const struct intel_uncore_init_fun knl_uncore_init __initconst
= {
1454 .cpu_init
= knl_uncore_cpu_init
,
1455 .pci_init
= knl_uncore_pci_init
,
1458 static const struct intel_uncore_init_fun skl_uncore_init __initconst
= {
1459 .cpu_init
= skl_uncore_cpu_init
,
1460 .pci_init
= skl_uncore_pci_init
,
1463 static const struct intel_uncore_init_fun skx_uncore_init __initconst
= {
1464 .cpu_init
= skx_uncore_cpu_init
,
1465 .pci_init
= skx_uncore_pci_init
,
1468 static const struct intel_uncore_init_fun icl_uncore_init __initconst
= {
1469 .cpu_init
= icl_uncore_cpu_init
,
1470 .pci_init
= skl_uncore_pci_init
,
1473 static const struct intel_uncore_init_fun snr_uncore_init __initconst
= {
1474 .cpu_init
= snr_uncore_cpu_init
,
1475 .pci_init
= snr_uncore_pci_init
,
1476 .mmio_init
= snr_uncore_mmio_init
,
1479 static const struct x86_cpu_id intel_uncore_match
[] __initconst
= {
1480 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP
, nhm_uncore_init
),
1481 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM
, nhm_uncore_init
),
1482 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE
, nhm_uncore_init
),
1483 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP
, nhm_uncore_init
),
1484 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE
, snb_uncore_init
),
1485 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE
, ivb_uncore_init
),
1486 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL
, hsw_uncore_init
),
1487 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_L
, hsw_uncore_init
),
1488 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_G
, hsw_uncore_init
),
1489 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL
, bdw_uncore_init
),
1490 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_G
, bdw_uncore_init
),
1491 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X
, snbep_uncore_init
),
1492 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX
, nhmex_uncore_init
),
1493 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX
, nhmex_uncore_init
),
1494 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X
, ivbep_uncore_init
),
1495 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X
, hswep_uncore_init
),
1496 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X
, bdx_uncore_init
),
1497 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_D
, bdx_uncore_init
),
1498 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL
, knl_uncore_init
),
1499 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM
, knl_uncore_init
),
1500 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE
, skl_uncore_init
),
1501 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L
, skl_uncore_init
),
1502 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X
, skx_uncore_init
),
1503 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L
, skl_uncore_init
),
1504 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE
, skl_uncore_init
),
1505 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L
, icl_uncore_init
),
1506 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI
, icl_uncore_init
),
1507 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE
, icl_uncore_init
),
1508 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_D
, snr_uncore_init
),
1512 MODULE_DEVICE_TABLE(x86cpu
, intel_uncore_match
);
1514 static int __init
intel_uncore_init(void)
1516 const struct x86_cpu_id
*id
;
1517 struct intel_uncore_init_fun
*uncore_init
;
1518 int pret
= 0, cret
= 0, mret
= 0, ret
;
1520 id
= x86_match_cpu(intel_uncore_match
);
1524 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
1527 max_dies
= topology_max_packages() * topology_max_die_per_package();
1529 uncore_init
= (struct intel_uncore_init_fun
*)id
->driver_data
;
1530 if (uncore_init
->pci_init
) {
1531 pret
= uncore_init
->pci_init();
1533 pret
= uncore_pci_init();
1536 if (uncore_init
->cpu_init
) {
1537 uncore_init
->cpu_init();
1538 cret
= uncore_cpu_init();
1541 if (uncore_init
->mmio_init
) {
1542 uncore_init
->mmio_init();
1543 mret
= uncore_mmio_init();
1546 if (cret
&& pret
&& mret
)
1549 /* Install hotplug callbacks to setup the targets for each package */
1550 ret
= cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE
,
1551 "perf/x86/intel/uncore:online",
1552 uncore_event_cpu_online
,
1553 uncore_event_cpu_offline
);
1559 uncore_types_exit(uncore_msr_uncores
);
1560 uncore_types_exit(uncore_mmio_uncores
);
1564 module_init(intel_uncore_init
);
1566 static void __exit
intel_uncore_exit(void)
1568 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE
);
1569 uncore_types_exit(uncore_msr_uncores
);
1570 uncore_types_exit(uncore_mmio_uncores
);
1573 module_exit(intel_uncore_exit
);