1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 * Author: Steven Kinney <Steven.Kinney@amd.com>
6 * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
8 * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
11 #define pr_fmt(fmt) "perf/amd_iommu: " fmt
13 #include <linux/perf_event.h>
14 #include <linux/init.h>
15 #include <linux/cpumask.h>
16 #include <linux/slab.h>
18 #include "../perf_event.h"
21 #define COUNTER_SHIFT 16
23 /* iommu pmu conf masks */
24 #define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
25 #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
26 #define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL)
27 #define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL)
29 /* iommu pmu conf1 masks */
30 #define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL)
31 #define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
32 #define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
34 #define IOMMU_NAME_SIZE 16
36 struct perf_amd_iommu
{
37 struct list_head list
;
39 struct amd_iommu
*iommu
;
40 char name
[IOMMU_NAME_SIZE
];
47 static LIST_HEAD(perf_amd_iommu_list
);
49 /*---------------------------------------------
50 * sysfs format attributes
51 *---------------------------------------------*/
52 PMU_FORMAT_ATTR(csource
, "config:0-7");
53 PMU_FORMAT_ATTR(devid
, "config:8-23");
54 PMU_FORMAT_ATTR(domid
, "config:24-39");
55 PMU_FORMAT_ATTR(pasid
, "config:40-59");
56 PMU_FORMAT_ATTR(devid_mask
, "config1:0-15");
57 PMU_FORMAT_ATTR(domid_mask
, "config1:16-31");
58 PMU_FORMAT_ATTR(pasid_mask
, "config1:32-51");
60 static struct attribute
*iommu_format_attrs
[] = {
61 &format_attr_csource
.attr
,
62 &format_attr_devid
.attr
,
63 &format_attr_pasid
.attr
,
64 &format_attr_domid
.attr
,
65 &format_attr_devid_mask
.attr
,
66 &format_attr_pasid_mask
.attr
,
67 &format_attr_domid_mask
.attr
,
71 static struct attribute_group amd_iommu_format_group
= {
73 .attrs
= iommu_format_attrs
,
76 /*---------------------------------------------
77 * sysfs events attributes
78 *---------------------------------------------*/
79 static struct attribute_group amd_iommu_events_group
= {
83 struct amd_iommu_event_desc
{
84 struct kobj_attribute attr
;
88 static ssize_t
_iommu_event_show(struct kobject
*kobj
,
89 struct kobj_attribute
*attr
, char *buf
)
91 struct amd_iommu_event_desc
*event
=
92 container_of(attr
, struct amd_iommu_event_desc
, attr
);
93 return sprintf(buf
, "%s\n", event
->event
);
96 #define AMD_IOMMU_EVENT_DESC(_name, _event) \
98 .attr = __ATTR(_name, 0444, _iommu_event_show, NULL), \
102 static struct amd_iommu_event_desc amd_iommu_v2_event_descs
[] = {
103 AMD_IOMMU_EVENT_DESC(mem_pass_untrans
, "csource=0x01"),
104 AMD_IOMMU_EVENT_DESC(mem_pass_pretrans
, "csource=0x02"),
105 AMD_IOMMU_EVENT_DESC(mem_pass_excl
, "csource=0x03"),
106 AMD_IOMMU_EVENT_DESC(mem_target_abort
, "csource=0x04"),
107 AMD_IOMMU_EVENT_DESC(mem_trans_total
, "csource=0x05"),
108 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit
, "csource=0x06"),
109 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis
, "csource=0x07"),
110 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit
, "csource=0x08"),
111 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis
, "csource=0x09"),
112 AMD_IOMMU_EVENT_DESC(mem_dte_hit
, "csource=0x0a"),
113 AMD_IOMMU_EVENT_DESC(mem_dte_mis
, "csource=0x0b"),
114 AMD_IOMMU_EVENT_DESC(page_tbl_read_tot
, "csource=0x0c"),
115 AMD_IOMMU_EVENT_DESC(page_tbl_read_nst
, "csource=0x0d"),
116 AMD_IOMMU_EVENT_DESC(page_tbl_read_gst
, "csource=0x0e"),
117 AMD_IOMMU_EVENT_DESC(int_dte_hit
, "csource=0x0f"),
118 AMD_IOMMU_EVENT_DESC(int_dte_mis
, "csource=0x10"),
119 AMD_IOMMU_EVENT_DESC(cmd_processed
, "csource=0x11"),
120 AMD_IOMMU_EVENT_DESC(cmd_processed_inv
, "csource=0x12"),
121 AMD_IOMMU_EVENT_DESC(tlb_inv
, "csource=0x13"),
122 AMD_IOMMU_EVENT_DESC(ign_rd_wr_mmio_1ff8h
, "csource=0x14"),
123 AMD_IOMMU_EVENT_DESC(vapic_int_non_guest
, "csource=0x15"),
124 AMD_IOMMU_EVENT_DESC(vapic_int_guest
, "csource=0x16"),
125 AMD_IOMMU_EVENT_DESC(smi_recv
, "csource=0x17"),
126 AMD_IOMMU_EVENT_DESC(smi_blk
, "csource=0x18"),
127 { /* end: all zeroes */ },
130 /*---------------------------------------------
131 * sysfs cpumask attributes
132 *---------------------------------------------*/
133 static cpumask_t iommu_cpumask
;
135 static ssize_t
_iommu_cpumask_show(struct device
*dev
,
136 struct device_attribute
*attr
,
139 return cpumap_print_to_pagebuf(true, buf
, &iommu_cpumask
);
141 static DEVICE_ATTR(cpumask
, S_IRUGO
, _iommu_cpumask_show
, NULL
);
143 static struct attribute
*iommu_cpumask_attrs
[] = {
144 &dev_attr_cpumask
.attr
,
148 static struct attribute_group amd_iommu_cpumask_group
= {
149 .attrs
= iommu_cpumask_attrs
,
152 /*---------------------------------------------*/
154 static int get_next_avail_iommu_bnk_cntr(struct perf_event
*event
)
156 struct perf_amd_iommu
*piommu
= container_of(event
->pmu
, struct perf_amd_iommu
, pmu
);
157 int max_cntrs
= piommu
->max_counters
;
158 int max_banks
= piommu
->max_banks
;
159 u32 shift
, bank
, cntr
;
163 raw_spin_lock_irqsave(&piommu
->lock
, flags
);
165 for (bank
= 0, shift
= 0; bank
< max_banks
; bank
++) {
166 for (cntr
= 0; cntr
< max_cntrs
; cntr
++) {
167 shift
= bank
+ (bank
*3) + cntr
;
168 if (piommu
->cntr_assign_mask
& BIT_ULL(shift
)) {
171 piommu
->cntr_assign_mask
|= BIT_ULL(shift
);
172 event
->hw
.iommu_bank
= bank
;
173 event
->hw
.iommu_cntr
= cntr
;
181 raw_spin_unlock_irqrestore(&piommu
->lock
, flags
);
185 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu
*perf_iommu
,
189 int max_banks
, max_cntrs
;
192 max_banks
= perf_iommu
->max_banks
;
193 max_cntrs
= perf_iommu
->max_counters
;
195 if ((bank
> max_banks
) || (cntr
> max_cntrs
))
198 shift
= bank
+ cntr
+ (bank
*3);
200 raw_spin_lock_irqsave(&perf_iommu
->lock
, flags
);
201 perf_iommu
->cntr_assign_mask
&= ~(1ULL<<shift
);
202 raw_spin_unlock_irqrestore(&perf_iommu
->lock
, flags
);
207 static int perf_iommu_event_init(struct perf_event
*event
)
209 struct hw_perf_event
*hwc
= &event
->hw
;
211 /* test the event attr type check for PMU enumeration */
212 if (event
->attr
.type
!= event
->pmu
->type
)
216 * IOMMU counters are shared across all cores.
217 * Therefore, it does not support per-process mode.
218 * Also, it does not support event sampling mode.
220 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
226 /* update the hw_perf_event struct with the iommu config data */
227 hwc
->conf
= event
->attr
.config
;
228 hwc
->conf1
= event
->attr
.config1
;
233 static inline struct amd_iommu
*perf_event_2_iommu(struct perf_event
*ev
)
235 return (container_of(ev
->pmu
, struct perf_amd_iommu
, pmu
))->iommu
;
238 static void perf_iommu_enable_event(struct perf_event
*ev
)
240 struct amd_iommu
*iommu
= perf_event_2_iommu(ev
);
241 struct hw_perf_event
*hwc
= &ev
->hw
;
242 u8 bank
= hwc
->iommu_bank
;
243 u8 cntr
= hwc
->iommu_cntr
;
246 reg
= GET_CSOURCE(hwc
);
247 amd_iommu_pc_set_reg(iommu
, bank
, cntr
, IOMMU_PC_COUNTER_SRC_REG
, ®
);
249 reg
= GET_DEVID_MASK(hwc
);
250 reg
= GET_DEVID(hwc
) | (reg
<< 32);
253 amd_iommu_pc_set_reg(iommu
, bank
, cntr
, IOMMU_PC_DEVID_MATCH_REG
, ®
);
255 reg
= GET_PASID_MASK(hwc
);
256 reg
= GET_PASID(hwc
) | (reg
<< 32);
259 amd_iommu_pc_set_reg(iommu
, bank
, cntr
, IOMMU_PC_PASID_MATCH_REG
, ®
);
261 reg
= GET_DOMID_MASK(hwc
);
262 reg
= GET_DOMID(hwc
) | (reg
<< 32);
265 amd_iommu_pc_set_reg(iommu
, bank
, cntr
, IOMMU_PC_DOMID_MATCH_REG
, ®
);
268 static void perf_iommu_disable_event(struct perf_event
*event
)
270 struct amd_iommu
*iommu
= perf_event_2_iommu(event
);
271 struct hw_perf_event
*hwc
= &event
->hw
;
274 amd_iommu_pc_set_reg(iommu
, hwc
->iommu_bank
, hwc
->iommu_cntr
,
275 IOMMU_PC_COUNTER_SRC_REG
, ®
);
278 static void perf_iommu_start(struct perf_event
*event
, int flags
)
280 struct hw_perf_event
*hwc
= &event
->hw
;
282 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
285 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
288 if (flags
& PERF_EF_RELOAD
) {
289 u64 prev_raw_count
= local64_read(&hwc
->prev_count
);
290 struct amd_iommu
*iommu
= perf_event_2_iommu(event
);
292 amd_iommu_pc_set_reg(iommu
, hwc
->iommu_bank
, hwc
->iommu_cntr
,
293 IOMMU_PC_COUNTER_REG
, &prev_raw_count
);
296 perf_iommu_enable_event(event
);
297 perf_event_update_userpage(event
);
301 static void perf_iommu_read(struct perf_event
*event
)
303 u64 count
, prev
, delta
;
304 struct hw_perf_event
*hwc
= &event
->hw
;
305 struct amd_iommu
*iommu
= perf_event_2_iommu(event
);
307 if (amd_iommu_pc_get_reg(iommu
, hwc
->iommu_bank
, hwc
->iommu_cntr
,
308 IOMMU_PC_COUNTER_REG
, &count
))
311 /* IOMMU pc counter register is only 48 bits */
312 count
&= GENMASK_ULL(47, 0);
314 prev
= local64_read(&hwc
->prev_count
);
315 if (local64_cmpxchg(&hwc
->prev_count
, prev
, count
) != prev
)
318 /* Handle 48-bit counter overflow */
319 delta
= (count
<< COUNTER_SHIFT
) - (prev
<< COUNTER_SHIFT
);
320 delta
>>= COUNTER_SHIFT
;
321 local64_add(delta
, &event
->count
);
324 static void perf_iommu_stop(struct perf_event
*event
, int flags
)
326 struct hw_perf_event
*hwc
= &event
->hw
;
328 if (hwc
->state
& PERF_HES_UPTODATE
)
331 perf_iommu_disable_event(event
);
332 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
333 hwc
->state
|= PERF_HES_STOPPED
;
335 if (hwc
->state
& PERF_HES_UPTODATE
)
338 perf_iommu_read(event
);
339 hwc
->state
|= PERF_HES_UPTODATE
;
342 static int perf_iommu_add(struct perf_event
*event
, int flags
)
346 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
348 /* request an iommu bank/counter */
349 retval
= get_next_avail_iommu_bnk_cntr(event
);
353 if (flags
& PERF_EF_START
)
354 perf_iommu_start(event
, PERF_EF_RELOAD
);
359 static void perf_iommu_del(struct perf_event
*event
, int flags
)
361 struct hw_perf_event
*hwc
= &event
->hw
;
362 struct perf_amd_iommu
*perf_iommu
=
363 container_of(event
->pmu
, struct perf_amd_iommu
, pmu
);
365 perf_iommu_stop(event
, PERF_EF_UPDATE
);
367 /* clear the assigned iommu bank/counter */
368 clear_avail_iommu_bnk_cntr(perf_iommu
,
369 hwc
->iommu_bank
, hwc
->iommu_cntr
);
371 perf_event_update_userpage(event
);
374 static __init
int _init_events_attrs(void)
377 struct attribute
**attrs
;
379 while (amd_iommu_v2_event_descs
[i
].attr
.attr
.name
)
382 attrs
= kcalloc(i
+ 1, sizeof(*attrs
), GFP_KERNEL
);
386 for (j
= 0; j
< i
; j
++)
387 attrs
[j
] = &amd_iommu_v2_event_descs
[j
].attr
.attr
;
389 amd_iommu_events_group
.attrs
= attrs
;
393 static const struct attribute_group
*amd_iommu_attr_groups
[] = {
394 &amd_iommu_format_group
,
395 &amd_iommu_cpumask_group
,
396 &amd_iommu_events_group
,
400 static const struct pmu iommu_pmu __initconst
= {
401 .event_init
= perf_iommu_event_init
,
402 .add
= perf_iommu_add
,
403 .del
= perf_iommu_del
,
404 .start
= perf_iommu_start
,
405 .stop
= perf_iommu_stop
,
406 .read
= perf_iommu_read
,
407 .task_ctx_nr
= perf_invalid_context
,
408 .attr_groups
= amd_iommu_attr_groups
,
409 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
412 static __init
int init_one_iommu(unsigned int idx
)
414 struct perf_amd_iommu
*perf_iommu
;
417 perf_iommu
= kzalloc(sizeof(struct perf_amd_iommu
), GFP_KERNEL
);
421 raw_spin_lock_init(&perf_iommu
->lock
);
423 perf_iommu
->pmu
= iommu_pmu
;
424 perf_iommu
->iommu
= get_amd_iommu(idx
);
425 perf_iommu
->max_banks
= amd_iommu_pc_get_max_banks(idx
);
426 perf_iommu
->max_counters
= amd_iommu_pc_get_max_counters(idx
);
428 if (!perf_iommu
->iommu
||
429 !perf_iommu
->max_banks
||
430 !perf_iommu
->max_counters
) {
435 snprintf(perf_iommu
->name
, IOMMU_NAME_SIZE
, "amd_iommu_%u", idx
);
437 ret
= perf_pmu_register(&perf_iommu
->pmu
, perf_iommu
->name
, -1);
439 pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
440 idx
, perf_iommu
->max_banks
, perf_iommu
->max_counters
);
441 list_add_tail(&perf_iommu
->list
, &perf_amd_iommu_list
);
443 pr_warn("Error initializing IOMMU %d.\n", idx
);
449 static __init
int amd_iommu_pc_init(void)
451 unsigned int i
, cnt
= 0;
454 /* Make sure the IOMMU PC resource is available */
455 if (!amd_iommu_pc_supported())
458 ret
= _init_events_attrs();
463 * An IOMMU PMU is specific to an IOMMU, and can function independently.
464 * So we go through all IOMMUs and ignore the one that fails init
465 * unless all IOMMU are failing.
467 for (i
= 0; i
< amd_iommu_get_num_iommus(); i
++) {
468 ret
= init_one_iommu(i
);
474 kfree(amd_iommu_events_group
.attrs
);
478 /* Init cpumask attributes to only core 0 */
479 cpumask_set_cpu(0, &iommu_cpumask
);
483 device_initcall(amd_iommu_pc_init
);