1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/slab.h>
4 #include <asm/apicdef.h>
5 #include <asm/intel-family.h>
6 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/perf_event.h>
9 #include "../perf_event.h"
11 #define UNCORE_PMU_NAME_LEN 32
12 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
13 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
15 #define UNCORE_FIXED_EVENT 0xff
16 #define UNCORE_PMC_IDX_MAX_GENERIC 8
17 #define UNCORE_PMC_IDX_MAX_FIXED 1
18 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
19 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
20 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
21 UNCORE_PMC_IDX_MAX_FIXED)
22 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
23 UNCORE_PMC_IDX_MAX_FREERUNNING)
25 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
26 ((dev << 24) | (func << 16) | (type << 8) | idx)
27 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
28 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
29 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
30 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
31 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
32 #define UNCORE_EXTRA_PCI_DEV 0xff
33 #define UNCORE_EXTRA_PCI_DEV_MAX 4
35 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
37 #define UNCORE_IGNORE_END -1
39 struct pci_extra_dev
{
40 struct pci_dev
*dev
[UNCORE_EXTRA_PCI_DEV_MAX
];
43 struct intel_uncore_ops
;
44 struct intel_uncore_pmu
;
45 struct intel_uncore_box
;
46 struct uncore_event_desc
;
47 struct freerunning_counters
;
48 struct intel_uncore_topology
;
50 struct intel_uncore_type
{
56 int num_freerunning_types
;
61 unsigned event_mask_ext
;
69 unsigned mmio_map_size
;
70 unsigned num_shared_regs
:8;
71 unsigned single_fixed
:1;
72 unsigned pair_ctr_ctl
:1;
78 struct event_constraint unconstrainted
;
79 struct event_constraint
*constraints
;
80 struct intel_uncore_pmu
*pmus
;
81 struct intel_uncore_ops
*ops
;
82 struct uncore_event_desc
*event_descs
;
83 struct freerunning_counters
*freerunning
;
84 const struct attribute_group
*attr_groups
[4];
85 const struct attribute_group
**attr_update
;
86 struct pmu
*pmu
; /* for custom pmu ops */
87 struct rb_root
*boxes
;
89 * Uncore PMU would store relevant platform topology configuration here
90 * to identify which platform component each PMON block of that type is
91 * supposed to monitor.
93 struct intel_uncore_topology
**topology
;
95 * Optional callbacks for managing mapping of Uncore units to PMONs
97 int (*get_topology
)(struct intel_uncore_type
*type
);
98 void (*set_mapping
)(struct intel_uncore_type
*type
);
99 void (*cleanup_mapping
)(struct intel_uncore_type
*type
);
101 * Optional callbacks for extra uncore units cleanup
103 void (*cleanup_extra_boxes
)(struct intel_uncore_type
*type
);
106 #define pmu_group attr_groups[0]
107 #define format_group attr_groups[1]
108 #define events_group attr_groups[2]
110 struct intel_uncore_ops
{
111 void (*init_box
)(struct intel_uncore_box
*);
112 void (*exit_box
)(struct intel_uncore_box
*);
113 void (*disable_box
)(struct intel_uncore_box
*);
114 void (*enable_box
)(struct intel_uncore_box
*);
115 void (*disable_event
)(struct intel_uncore_box
*, struct perf_event
*);
116 void (*enable_event
)(struct intel_uncore_box
*, struct perf_event
*);
117 u64 (*read_counter
)(struct intel_uncore_box
*, struct perf_event
*);
118 int (*hw_config
)(struct intel_uncore_box
*, struct perf_event
*);
119 struct event_constraint
*(*get_constraint
)(struct intel_uncore_box
*,
120 struct perf_event
*);
121 void (*put_constraint
)(struct intel_uncore_box
*, struct perf_event
*);
124 struct intel_uncore_pmu
{
126 char name
[UNCORE_PMU_NAME_LEN
];
130 atomic_t activeboxes
;
132 struct intel_uncore_type
*type
;
133 struct intel_uncore_box
**boxes
;
136 struct intel_uncore_extra_reg
{
138 u64 config
, config1
, config2
;
142 struct intel_uncore_box
{
143 int dieid
; /* Logical die ID */
144 int n_active
; /* number of active events */
146 int cpu
; /* cpu to collect events */
149 struct perf_event
*events
[UNCORE_PMC_IDX_MAX
];
150 struct perf_event
*event_list
[UNCORE_PMC_IDX_MAX
];
151 struct event_constraint
*event_constraint
[UNCORE_PMC_IDX_MAX
];
152 unsigned long active_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
153 u64 tags
[UNCORE_PMC_IDX_MAX
];
154 struct pci_dev
*pci_dev
;
155 struct intel_uncore_pmu
*pmu
;
156 u64 hrtimer_duration
; /* hrtimer timeout for this box */
157 struct hrtimer hrtimer
;
158 struct list_head list
;
159 struct list_head active_list
;
160 void __iomem
*io_addr
;
161 struct intel_uncore_extra_reg shared_regs
[];
164 /* CFL uncore 8th cbox MSRs */
165 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
166 #define CFL_UNC_CBO_7_PER_CTR0 0xf76
168 #define UNCORE_BOX_FLAG_INITIATED 0
169 /* event config registers are 8-byte apart */
170 #define UNCORE_BOX_FLAG_CTL_OFFS8 1
171 /* CFL 8th CBOX has different MSR space */
172 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
174 struct uncore_event_desc
{
175 struct device_attribute attr
;
179 struct freerunning_counters
{
180 unsigned int counter_base
;
181 unsigned int counter_offset
;
182 unsigned int box_offset
;
183 unsigned int num_counters
;
185 unsigned *box_offsets
;
188 struct uncore_iio_topology
{
193 struct uncore_upi_topology
{
199 struct intel_uncore_topology
{
203 struct uncore_iio_topology
*iio
;
204 struct uncore_upi_topology
*upi
;
209 struct list_head list
;
211 int pbus_to_dieid
[256];
214 struct pci2phy_map
*__find_pci2phy_map(int segment
);
215 int uncore_pcibus_to_dieid(struct pci_bus
*bus
);
216 int uncore_die_to_segment(int die
);
217 int uncore_device_to_die(struct pci_dev
*dev
);
219 ssize_t
uncore_event_show(struct device
*dev
,
220 struct device_attribute
*attr
, char *buf
);
222 static inline struct intel_uncore_pmu
*dev_to_uncore_pmu(struct device
*dev
)
224 return container_of(dev_get_drvdata(dev
), struct intel_uncore_pmu
, pmu
);
227 #define to_device_attribute(n) container_of(n, struct device_attribute, attr)
228 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr)
229 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n))
231 extern int __uncore_max_dies
;
232 #define uncore_max_dies() (__uncore_max_dies)
234 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
236 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
240 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
241 static ssize_t __uncore_##_var##_show(struct device *dev, \
242 struct device_attribute *attr, \
245 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
246 return sprintf(page, _format "\n"); \
248 static struct device_attribute format_attr_##_var = \
249 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
251 static inline bool uncore_pmc_fixed(int idx
)
253 return idx
== UNCORE_PMC_IDX_FIXED
;
256 static inline bool uncore_pmc_freerunning(int idx
)
258 return idx
== UNCORE_PMC_IDX_FREERUNNING
;
261 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box
*box
,
262 unsigned long offset
)
264 if (offset
< box
->pmu
->type
->mmio_map_size
)
267 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
268 offset
, box
->pmu
->type
->name
);
274 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box
*box
)
276 return box
->pmu
->type
->box_ctl
+
277 box
->pmu
->type
->mmio_offset
* box
->pmu
->pmu_idx
;
280 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box
*box
)
282 return box
->pmu
->type
->box_ctl
;
285 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box
*box
)
287 return box
->pmu
->type
->fixed_ctl
;
290 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box
*box
)
292 return box
->pmu
->type
->fixed_ctr
;
296 unsigned uncore_pci_event_ctl(struct intel_uncore_box
*box
, int idx
)
298 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
))
299 return idx
* 8 + box
->pmu
->type
->event_ctl
;
301 return idx
* 4 + box
->pmu
->type
->event_ctl
;
305 unsigned uncore_pci_perf_ctr(struct intel_uncore_box
*box
, int idx
)
307 return idx
* 8 + box
->pmu
->type
->perf_ctr
;
310 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box
*box
)
312 struct intel_uncore_pmu
*pmu
= box
->pmu
;
313 return pmu
->type
->msr_offsets
?
314 pmu
->type
->msr_offsets
[pmu
->pmu_idx
] :
315 pmu
->type
->msr_offset
* pmu
->pmu_idx
;
318 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box
*box
)
320 if (!box
->pmu
->type
->box_ctl
)
322 return box
->pmu
->type
->box_ctl
+ uncore_msr_box_offset(box
);
325 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box
*box
)
327 if (!box
->pmu
->type
->fixed_ctl
)
329 return box
->pmu
->type
->fixed_ctl
+ uncore_msr_box_offset(box
);
332 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box
*box
)
334 return box
->pmu
->type
->fixed_ctr
+ uncore_msr_box_offset(box
);
339 * In the uncore document, there is no event-code assigned to free running
340 * counters. Some events need to be defined to indicate the free running
341 * counters. The events are encoded as event-code + umask-code.
343 * The event-code for all free running counters is 0xff, which is the same as
344 * the fixed counters.
346 * The umask-code is used to distinguish a fixed counter and a free running
347 * counter, and different types of free running counters.
348 * - For fixed counters, the umask-code is 0x0X.
349 * X indicates the index of the fixed counter, which starts from 0.
350 * - For free running counters, the umask-code uses the rest of the space.
351 * It would bare the format of 0xXY.
352 * X stands for the type of free running counters, which starts from 1.
353 * Y stands for the index of free running counters of same type, which
356 * For example, there are three types of IIO free running counters on Skylake
357 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
358 * The event-code for all the free running counters is 0xff.
359 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
360 * which umask-code starts from 0x10.
361 * So 'ioclk' is encoded as event=0xff,umask=0x10
362 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
363 * the second type, which umask-code starts from 0x20.
364 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
366 static inline unsigned int uncore_freerunning_idx(u64 config
)
368 return ((config
>> 8) & 0xf);
371 #define UNCORE_FREERUNNING_UMASK_START 0x10
373 static inline unsigned int uncore_freerunning_type(u64 config
)
375 return ((((config
>> 8) - UNCORE_FREERUNNING_UMASK_START
) >> 4) & 0xf);
379 unsigned int uncore_freerunning_counter(struct intel_uncore_box
*box
,
380 struct perf_event
*event
)
382 unsigned int type
= uncore_freerunning_type(event
->hw
.config
);
383 unsigned int idx
= uncore_freerunning_idx(event
->hw
.config
);
384 struct intel_uncore_pmu
*pmu
= box
->pmu
;
386 return pmu
->type
->freerunning
[type
].counter_base
+
387 pmu
->type
->freerunning
[type
].counter_offset
* idx
+
388 (pmu
->type
->freerunning
[type
].box_offsets
?
389 pmu
->type
->freerunning
[type
].box_offsets
[pmu
->pmu_idx
] :
390 pmu
->type
->freerunning
[type
].box_offset
* pmu
->pmu_idx
);
394 unsigned uncore_msr_event_ctl(struct intel_uncore_box
*box
, int idx
)
396 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS
, &box
->flags
)) {
397 return CFL_UNC_CBO_7_PERFEVTSEL0
+
398 (box
->pmu
->type
->pair_ctr_ctl
? 2 * idx
: idx
);
400 return box
->pmu
->type
->event_ctl
+
401 (box
->pmu
->type
->pair_ctr_ctl
? 2 * idx
: idx
) +
402 uncore_msr_box_offset(box
);
407 unsigned uncore_msr_perf_ctr(struct intel_uncore_box
*box
, int idx
)
409 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS
, &box
->flags
)) {
410 return CFL_UNC_CBO_7_PER_CTR0
+
411 (box
->pmu
->type
->pair_ctr_ctl
? 2 * idx
: idx
);
413 return box
->pmu
->type
->perf_ctr
+
414 (box
->pmu
->type
->pair_ctr_ctl
? 2 * idx
: idx
) +
415 uncore_msr_box_offset(box
);
420 unsigned uncore_fixed_ctl(struct intel_uncore_box
*box
)
422 if (box
->pci_dev
|| box
->io_addr
)
423 return uncore_pci_fixed_ctl(box
);
425 return uncore_msr_fixed_ctl(box
);
429 unsigned uncore_fixed_ctr(struct intel_uncore_box
*box
)
431 if (box
->pci_dev
|| box
->io_addr
)
432 return uncore_pci_fixed_ctr(box
);
434 return uncore_msr_fixed_ctr(box
);
438 unsigned uncore_event_ctl(struct intel_uncore_box
*box
, int idx
)
440 if (box
->pci_dev
|| box
->io_addr
)
441 return uncore_pci_event_ctl(box
, idx
);
443 return uncore_msr_event_ctl(box
, idx
);
447 unsigned uncore_perf_ctr(struct intel_uncore_box
*box
, int idx
)
449 if (box
->pci_dev
|| box
->io_addr
)
450 return uncore_pci_perf_ctr(box
, idx
);
452 return uncore_msr_perf_ctr(box
, idx
);
455 static inline int uncore_perf_ctr_bits(struct intel_uncore_box
*box
)
457 return box
->pmu
->type
->perf_ctr_bits
;
460 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box
*box
)
462 return box
->pmu
->type
->fixed_ctr_bits
;
466 unsigned int uncore_freerunning_bits(struct intel_uncore_box
*box
,
467 struct perf_event
*event
)
469 unsigned int type
= uncore_freerunning_type(event
->hw
.config
);
471 return box
->pmu
->type
->freerunning
[type
].bits
;
474 static inline int uncore_num_freerunning(struct intel_uncore_box
*box
,
475 struct perf_event
*event
)
477 unsigned int type
= uncore_freerunning_type(event
->hw
.config
);
479 return box
->pmu
->type
->freerunning
[type
].num_counters
;
482 static inline int uncore_num_freerunning_types(struct intel_uncore_box
*box
,
483 struct perf_event
*event
)
485 return box
->pmu
->type
->num_freerunning_types
;
488 static inline bool check_valid_freerunning_event(struct intel_uncore_box
*box
,
489 struct perf_event
*event
)
491 unsigned int type
= uncore_freerunning_type(event
->hw
.config
);
492 unsigned int idx
= uncore_freerunning_idx(event
->hw
.config
);
494 return (type
< uncore_num_freerunning_types(box
, event
)) &&
495 (idx
< uncore_num_freerunning(box
, event
));
498 static inline int uncore_num_counters(struct intel_uncore_box
*box
)
500 return box
->pmu
->type
->num_counters
;
503 static inline bool is_freerunning_event(struct perf_event
*event
)
505 u64 cfg
= event
->attr
.config
;
507 return ((cfg
& UNCORE_FIXED_EVENT
) == UNCORE_FIXED_EVENT
) &&
508 (((cfg
>> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START
);
511 /* Check and reject invalid config */
512 static inline int uncore_freerunning_hw_config(struct intel_uncore_box
*box
,
513 struct perf_event
*event
)
515 if (is_freerunning_event(event
))
521 static inline void uncore_disable_event(struct intel_uncore_box
*box
,
522 struct perf_event
*event
)
524 box
->pmu
->type
->ops
->disable_event(box
, event
);
527 static inline void uncore_enable_event(struct intel_uncore_box
*box
,
528 struct perf_event
*event
)
530 box
->pmu
->type
->ops
->enable_event(box
, event
);
533 static inline u64
uncore_read_counter(struct intel_uncore_box
*box
,
534 struct perf_event
*event
)
536 return box
->pmu
->type
->ops
->read_counter(box
, event
);
539 static inline void uncore_box_init(struct intel_uncore_box
*box
)
541 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED
, &box
->flags
)) {
542 if (box
->pmu
->type
->ops
->init_box
)
543 box
->pmu
->type
->ops
->init_box(box
);
547 static inline void uncore_box_exit(struct intel_uncore_box
*box
)
549 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED
, &box
->flags
)) {
550 if (box
->pmu
->type
->ops
->exit_box
)
551 box
->pmu
->type
->ops
->exit_box(box
);
555 static inline bool uncore_box_is_fake(struct intel_uncore_box
*box
)
557 return (box
->dieid
< 0);
560 static inline struct intel_uncore_pmu
*uncore_event_to_pmu(struct perf_event
*event
)
562 return container_of(event
->pmu
, struct intel_uncore_pmu
, pmu
);
565 static inline struct intel_uncore_box
*uncore_event_to_box(struct perf_event
*event
)
567 return event
->pmu_private
;
570 struct intel_uncore_box
*uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
);
571 u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
);
572 void uncore_mmio_exit_box(struct intel_uncore_box
*box
);
573 u64
uncore_mmio_read_counter(struct intel_uncore_box
*box
,
574 struct perf_event
*event
);
575 void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
);
576 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
);
577 void uncore_pmu_event_start(struct perf_event
*event
, int flags
);
578 void uncore_pmu_event_stop(struct perf_event
*event
, int flags
);
579 int uncore_pmu_event_add(struct perf_event
*event
, int flags
);
580 void uncore_pmu_event_del(struct perf_event
*event
, int flags
);
581 void uncore_pmu_event_read(struct perf_event
*event
);
582 void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
);
583 struct event_constraint
*
584 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
);
585 void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
);
586 u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
);
587 void uncore_get_alias_name(char *pmu_name
, struct intel_uncore_pmu
*pmu
);
589 extern struct intel_uncore_type
*empty_uncore
[];
590 extern struct intel_uncore_type
**uncore_msr_uncores
;
591 extern struct intel_uncore_type
**uncore_pci_uncores
;
592 extern struct intel_uncore_type
**uncore_mmio_uncores
;
593 extern struct pci_driver
*uncore_pci_driver
;
594 extern struct pci_driver
*uncore_pci_sub_driver
;
595 extern raw_spinlock_t pci2phy_map_lock
;
596 extern struct list_head pci2phy_map_head
;
597 extern struct pci_extra_dev
*uncore_extra_pci_dev
;
598 extern struct event_constraint uncore_constraint_empty
;
599 extern int spr_uncore_units_ignore
[];
600 extern int gnr_uncore_units_ignore
[];
603 int snb_uncore_pci_init(void);
604 int ivb_uncore_pci_init(void);
605 int hsw_uncore_pci_init(void);
606 int bdw_uncore_pci_init(void);
607 int skl_uncore_pci_init(void);
608 void snb_uncore_cpu_init(void);
609 void nhm_uncore_cpu_init(void);
610 void skl_uncore_cpu_init(void);
611 void icl_uncore_cpu_init(void);
612 void tgl_uncore_cpu_init(void);
613 void adl_uncore_cpu_init(void);
614 void lnl_uncore_cpu_init(void);
615 void mtl_uncore_cpu_init(void);
616 void tgl_uncore_mmio_init(void);
617 void tgl_l_uncore_mmio_init(void);
618 void adl_uncore_mmio_init(void);
619 void lnl_uncore_mmio_init(void);
620 int snb_pci2phy_map_init(int devid
);
623 int snbep_uncore_pci_init(void);
624 void snbep_uncore_cpu_init(void);
625 int ivbep_uncore_pci_init(void);
626 void ivbep_uncore_cpu_init(void);
627 int hswep_uncore_pci_init(void);
628 void hswep_uncore_cpu_init(void);
629 int bdx_uncore_pci_init(void);
630 void bdx_uncore_cpu_init(void);
631 int knl_uncore_pci_init(void);
632 void knl_uncore_cpu_init(void);
633 int skx_uncore_pci_init(void);
634 void skx_uncore_cpu_init(void);
635 int snr_uncore_pci_init(void);
636 void snr_uncore_cpu_init(void);
637 void snr_uncore_mmio_init(void);
638 int icx_uncore_pci_init(void);
639 void icx_uncore_cpu_init(void);
640 void icx_uncore_mmio_init(void);
641 int spr_uncore_pci_init(void);
642 void spr_uncore_cpu_init(void);
643 void spr_uncore_mmio_init(void);
644 int gnr_uncore_pci_init(void);
645 void gnr_uncore_cpu_init(void);
646 void gnr_uncore_mmio_init(void);
649 void nhmex_uncore_cpu_init(void);