1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/slab.h>
4 #include <asm/apicdef.h>
6 #include <linux/perf_event.h>
7 #include "../perf_event.h"
9 #define UNCORE_PMU_NAME_LEN 32
10 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
11 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
13 #define UNCORE_FIXED_EVENT 0xff
14 #define UNCORE_PMC_IDX_MAX_GENERIC 8
15 #define UNCORE_PMC_IDX_MAX_FIXED 1
16 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
17 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
18 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
19 UNCORE_PMC_IDX_MAX_FIXED)
20 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
21 UNCORE_PMC_IDX_MAX_FREERUNNING)
23 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
24 ((dev << 24) | (func << 16) | (type << 8) | idx)
25 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
26 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
27 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
28 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
29 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
30 #define UNCORE_EXTRA_PCI_DEV 0xff
31 #define UNCORE_EXTRA_PCI_DEV_MAX 4
33 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
35 struct pci_extra_dev
{
36 struct pci_dev
*dev
[UNCORE_EXTRA_PCI_DEV_MAX
];
39 struct intel_uncore_ops
;
40 struct intel_uncore_pmu
;
41 struct intel_uncore_box
;
42 struct uncore_event_desc
;
43 struct freerunning_counters
;
45 struct intel_uncore_type
{
51 int num_freerunning_types
;
55 unsigned event_mask_ext
;
60 unsigned num_shared_regs
:8;
61 unsigned single_fixed
:1;
62 unsigned pair_ctr_ctl
:1;
63 unsigned *msr_offsets
;
64 struct event_constraint unconstrainted
;
65 struct event_constraint
*constraints
;
66 struct intel_uncore_pmu
*pmus
;
67 struct intel_uncore_ops
*ops
;
68 struct uncore_event_desc
*event_descs
;
69 struct freerunning_counters
*freerunning
;
70 const struct attribute_group
*attr_groups
[4];
71 struct pmu
*pmu
; /* for custom pmu ops */
74 #define pmu_group attr_groups[0]
75 #define format_group attr_groups[1]
76 #define events_group attr_groups[2]
78 struct intel_uncore_ops
{
79 void (*init_box
)(struct intel_uncore_box
*);
80 void (*exit_box
)(struct intel_uncore_box
*);
81 void (*disable_box
)(struct intel_uncore_box
*);
82 void (*enable_box
)(struct intel_uncore_box
*);
83 void (*disable_event
)(struct intel_uncore_box
*, struct perf_event
*);
84 void (*enable_event
)(struct intel_uncore_box
*, struct perf_event
*);
85 u64 (*read_counter
)(struct intel_uncore_box
*, struct perf_event
*);
86 int (*hw_config
)(struct intel_uncore_box
*, struct perf_event
*);
87 struct event_constraint
*(*get_constraint
)(struct intel_uncore_box
*,
89 void (*put_constraint
)(struct intel_uncore_box
*, struct perf_event
*);
92 struct intel_uncore_pmu
{
94 char name
[UNCORE_PMU_NAME_LEN
];
99 struct intel_uncore_type
*type
;
100 struct intel_uncore_box
**boxes
;
103 struct intel_uncore_extra_reg
{
105 u64 config
, config1
, config2
;
109 struct intel_uncore_box
{
111 int pkgid
; /* Logical package ID */
112 int n_active
; /* number of active events */
114 int cpu
; /* cpu to collect events */
117 struct perf_event
*events
[UNCORE_PMC_IDX_MAX
];
118 struct perf_event
*event_list
[UNCORE_PMC_IDX_MAX
];
119 struct event_constraint
*event_constraint
[UNCORE_PMC_IDX_MAX
];
120 unsigned long active_mask
[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX
)];
121 u64 tags
[UNCORE_PMC_IDX_MAX
];
122 struct pci_dev
*pci_dev
;
123 struct intel_uncore_pmu
*pmu
;
124 u64 hrtimer_duration
; /* hrtimer timeout for this box */
125 struct hrtimer hrtimer
;
126 struct list_head list
;
127 struct list_head active_list
;
129 struct intel_uncore_extra_reg shared_regs
[0];
132 #define UNCORE_BOX_FLAG_INITIATED 0
133 #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
135 struct uncore_event_desc
{
136 struct kobj_attribute attr
;
140 struct freerunning_counters
{
141 unsigned int counter_base
;
142 unsigned int counter_offset
;
143 unsigned int box_offset
;
144 unsigned int num_counters
;
149 struct list_head list
;
151 int pbus_to_physid
[256];
154 struct pci2phy_map
*__find_pci2phy_map(int segment
);
156 ssize_t
uncore_event_show(struct kobject
*kobj
,
157 struct kobj_attribute
*attr
, char *buf
);
159 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
161 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
165 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
166 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
167 struct kobj_attribute *attr, \
170 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
171 return sprintf(page, _format "\n"); \
173 static struct kobj_attribute format_attr_##_var = \
174 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
176 static inline bool uncore_pmc_fixed(int idx
)
178 return idx
== UNCORE_PMC_IDX_FIXED
;
181 static inline bool uncore_pmc_freerunning(int idx
)
183 return idx
== UNCORE_PMC_IDX_FREERUNNING
;
186 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box
*box
)
188 return box
->pmu
->type
->box_ctl
;
191 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box
*box
)
193 return box
->pmu
->type
->fixed_ctl
;
196 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box
*box
)
198 return box
->pmu
->type
->fixed_ctr
;
202 unsigned uncore_pci_event_ctl(struct intel_uncore_box
*box
, int idx
)
204 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8
, &box
->flags
))
205 return idx
* 8 + box
->pmu
->type
->event_ctl
;
207 return idx
* 4 + box
->pmu
->type
->event_ctl
;
211 unsigned uncore_pci_perf_ctr(struct intel_uncore_box
*box
, int idx
)
213 return idx
* 8 + box
->pmu
->type
->perf_ctr
;
216 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box
*box
)
218 struct intel_uncore_pmu
*pmu
= box
->pmu
;
219 return pmu
->type
->msr_offsets
?
220 pmu
->type
->msr_offsets
[pmu
->pmu_idx
] :
221 pmu
->type
->msr_offset
* pmu
->pmu_idx
;
224 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box
*box
)
226 if (!box
->pmu
->type
->box_ctl
)
228 return box
->pmu
->type
->box_ctl
+ uncore_msr_box_offset(box
);
231 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box
*box
)
233 if (!box
->pmu
->type
->fixed_ctl
)
235 return box
->pmu
->type
->fixed_ctl
+ uncore_msr_box_offset(box
);
238 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box
*box
)
240 return box
->pmu
->type
->fixed_ctr
+ uncore_msr_box_offset(box
);
245 * In the uncore document, there is no event-code assigned to free running
246 * counters. Some events need to be defined to indicate the free running
247 * counters. The events are encoded as event-code + umask-code.
249 * The event-code for all free running counters is 0xff, which is the same as
250 * the fixed counters.
252 * The umask-code is used to distinguish a fixed counter and a free running
253 * counter, and different types of free running counters.
254 * - For fixed counters, the umask-code is 0x0X.
255 * X indicates the index of the fixed counter, which starts from 0.
256 * - For free running counters, the umask-code uses the rest of the space.
257 * It would bare the format of 0xXY.
258 * X stands for the type of free running counters, which starts from 1.
259 * Y stands for the index of free running counters of same type, which
262 * For example, there are three types of IIO free running counters on Skylake
263 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
264 * The event-code for all the free running counters is 0xff.
265 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
266 * which umask-code starts from 0x10.
267 * So 'ioclk' is encoded as event=0xff,umask=0x10
268 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
269 * the second type, which umask-code starts from 0x20.
270 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
272 static inline unsigned int uncore_freerunning_idx(u64 config
)
274 return ((config
>> 8) & 0xf);
277 #define UNCORE_FREERUNNING_UMASK_START 0x10
279 static inline unsigned int uncore_freerunning_type(u64 config
)
281 return ((((config
>> 8) - UNCORE_FREERUNNING_UMASK_START
) >> 4) & 0xf);
285 unsigned int uncore_freerunning_counter(struct intel_uncore_box
*box
,
286 struct perf_event
*event
)
288 unsigned int type
= uncore_freerunning_type(event
->attr
.config
);
289 unsigned int idx
= uncore_freerunning_idx(event
->attr
.config
);
290 struct intel_uncore_pmu
*pmu
= box
->pmu
;
292 return pmu
->type
->freerunning
[type
].counter_base
+
293 pmu
->type
->freerunning
[type
].counter_offset
* idx
+
294 pmu
->type
->freerunning
[type
].box_offset
* pmu
->pmu_idx
;
298 unsigned uncore_msr_event_ctl(struct intel_uncore_box
*box
, int idx
)
300 return box
->pmu
->type
->event_ctl
+
301 (box
->pmu
->type
->pair_ctr_ctl
? 2 * idx
: idx
) +
302 uncore_msr_box_offset(box
);
306 unsigned uncore_msr_perf_ctr(struct intel_uncore_box
*box
, int idx
)
308 return box
->pmu
->type
->perf_ctr
+
309 (box
->pmu
->type
->pair_ctr_ctl
? 2 * idx
: idx
) +
310 uncore_msr_box_offset(box
);
314 unsigned uncore_fixed_ctl(struct intel_uncore_box
*box
)
317 return uncore_pci_fixed_ctl(box
);
319 return uncore_msr_fixed_ctl(box
);
323 unsigned uncore_fixed_ctr(struct intel_uncore_box
*box
)
326 return uncore_pci_fixed_ctr(box
);
328 return uncore_msr_fixed_ctr(box
);
332 unsigned uncore_event_ctl(struct intel_uncore_box
*box
, int idx
)
335 return uncore_pci_event_ctl(box
, idx
);
337 return uncore_msr_event_ctl(box
, idx
);
341 unsigned uncore_perf_ctr(struct intel_uncore_box
*box
, int idx
)
344 return uncore_pci_perf_ctr(box
, idx
);
346 return uncore_msr_perf_ctr(box
, idx
);
349 static inline int uncore_perf_ctr_bits(struct intel_uncore_box
*box
)
351 return box
->pmu
->type
->perf_ctr_bits
;
354 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box
*box
)
356 return box
->pmu
->type
->fixed_ctr_bits
;
360 unsigned int uncore_freerunning_bits(struct intel_uncore_box
*box
,
361 struct perf_event
*event
)
363 unsigned int type
= uncore_freerunning_type(event
->attr
.config
);
365 return box
->pmu
->type
->freerunning
[type
].bits
;
368 static inline int uncore_num_freerunning(struct intel_uncore_box
*box
,
369 struct perf_event
*event
)
371 unsigned int type
= uncore_freerunning_type(event
->attr
.config
);
373 return box
->pmu
->type
->freerunning
[type
].num_counters
;
376 static inline int uncore_num_freerunning_types(struct intel_uncore_box
*box
,
377 struct perf_event
*event
)
379 return box
->pmu
->type
->num_freerunning_types
;
382 static inline bool check_valid_freerunning_event(struct intel_uncore_box
*box
,
383 struct perf_event
*event
)
385 unsigned int type
= uncore_freerunning_type(event
->attr
.config
);
386 unsigned int idx
= uncore_freerunning_idx(event
->attr
.config
);
388 return (type
< uncore_num_freerunning_types(box
, event
)) &&
389 (idx
< uncore_num_freerunning(box
, event
));
392 static inline int uncore_num_counters(struct intel_uncore_box
*box
)
394 return box
->pmu
->type
->num_counters
;
397 static inline bool is_freerunning_event(struct perf_event
*event
)
399 u64 cfg
= event
->attr
.config
;
401 return ((cfg
& UNCORE_FIXED_EVENT
) == UNCORE_FIXED_EVENT
) &&
402 (((cfg
>> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START
);
405 static inline void uncore_disable_box(struct intel_uncore_box
*box
)
407 if (box
->pmu
->type
->ops
->disable_box
)
408 box
->pmu
->type
->ops
->disable_box(box
);
411 static inline void uncore_enable_box(struct intel_uncore_box
*box
)
413 if (box
->pmu
->type
->ops
->enable_box
)
414 box
->pmu
->type
->ops
->enable_box(box
);
417 static inline void uncore_disable_event(struct intel_uncore_box
*box
,
418 struct perf_event
*event
)
420 box
->pmu
->type
->ops
->disable_event(box
, event
);
423 static inline void uncore_enable_event(struct intel_uncore_box
*box
,
424 struct perf_event
*event
)
426 box
->pmu
->type
->ops
->enable_event(box
, event
);
429 static inline u64
uncore_read_counter(struct intel_uncore_box
*box
,
430 struct perf_event
*event
)
432 return box
->pmu
->type
->ops
->read_counter(box
, event
);
435 static inline void uncore_box_init(struct intel_uncore_box
*box
)
437 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED
, &box
->flags
)) {
438 if (box
->pmu
->type
->ops
->init_box
)
439 box
->pmu
->type
->ops
->init_box(box
);
443 static inline void uncore_box_exit(struct intel_uncore_box
*box
)
445 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED
, &box
->flags
)) {
446 if (box
->pmu
->type
->ops
->exit_box
)
447 box
->pmu
->type
->ops
->exit_box(box
);
451 static inline bool uncore_box_is_fake(struct intel_uncore_box
*box
)
453 return (box
->pkgid
< 0);
456 static inline struct intel_uncore_pmu
*uncore_event_to_pmu(struct perf_event
*event
)
458 return container_of(event
->pmu
, struct intel_uncore_pmu
, pmu
);
461 static inline struct intel_uncore_box
*uncore_event_to_box(struct perf_event
*event
)
463 return event
->pmu_private
;
466 struct intel_uncore_box
*uncore_pmu_to_box(struct intel_uncore_pmu
*pmu
, int cpu
);
467 u64
uncore_msr_read_counter(struct intel_uncore_box
*box
, struct perf_event
*event
);
468 void uncore_pmu_start_hrtimer(struct intel_uncore_box
*box
);
469 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box
*box
);
470 void uncore_pmu_event_start(struct perf_event
*event
, int flags
);
471 void uncore_pmu_event_stop(struct perf_event
*event
, int flags
);
472 int uncore_pmu_event_add(struct perf_event
*event
, int flags
);
473 void uncore_pmu_event_del(struct perf_event
*event
, int flags
);
474 void uncore_pmu_event_read(struct perf_event
*event
);
475 void uncore_perf_event_update(struct intel_uncore_box
*box
, struct perf_event
*event
);
476 struct event_constraint
*
477 uncore_get_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
);
478 void uncore_put_constraint(struct intel_uncore_box
*box
, struct perf_event
*event
);
479 u64
uncore_shared_reg_config(struct intel_uncore_box
*box
, int idx
);
481 extern struct intel_uncore_type
**uncore_msr_uncores
;
482 extern struct intel_uncore_type
**uncore_pci_uncores
;
483 extern struct pci_driver
*uncore_pci_driver
;
484 extern raw_spinlock_t pci2phy_map_lock
;
485 extern struct list_head pci2phy_map_head
;
486 extern struct pci_extra_dev
*uncore_extra_pci_dev
;
487 extern struct event_constraint uncore_constraint_empty
;
490 int snb_uncore_pci_init(void);
491 int ivb_uncore_pci_init(void);
492 int hsw_uncore_pci_init(void);
493 int bdw_uncore_pci_init(void);
494 int skl_uncore_pci_init(void);
495 void snb_uncore_cpu_init(void);
496 void nhm_uncore_cpu_init(void);
497 void skl_uncore_cpu_init(void);
498 int snb_pci2phy_map_init(int devid
);
501 int snbep_uncore_pci_init(void);
502 void snbep_uncore_cpu_init(void);
503 int ivbep_uncore_pci_init(void);
504 void ivbep_uncore_cpu_init(void);
505 int hswep_uncore_pci_init(void);
506 void hswep_uncore_cpu_init(void);
507 int bdx_uncore_pci_init(void);
508 void bdx_uncore_cpu_init(void);
509 int knl_uncore_pci_init(void);
510 void knl_uncore_cpu_init(void);
511 int skx_uncore_pci_init(void);
512 void skx_uncore_cpu_init(void);
515 void nhmex_uncore_cpu_init(void);