2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright (C) 2014 ARM Limited
14 #include <linux/ctype.h>
15 #include <linux/hrtimer.h>
16 #include <linux/idr.h>
17 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #define CCN_NUM_XP_PORTS 2
26 #define CCN_NUM_REGIONS 256
27 #define CCN_REGION_SIZE 0x10000
29 #define CCN_ALL_OLY_ID 0xff00
30 #define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
31 #define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
32 #define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
33 #define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
35 #define CCN_MN_ERRINT_STATUS 0x0008
36 #define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
37 #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
38 #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
39 #define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
40 #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
41 #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
42 #define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
43 #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
44 #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
45 #define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
46 #define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
47 #define CCN_MN_ERR_SIG_VAL_63_0 0x0300
48 #define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
50 #define CCN_DT_ACTIVE_DSM 0x0000
51 #define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
52 #define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
53 #define CCN_DT_CTL 0x0028
54 #define CCN_DT_CTL__DT_EN (1 << 0)
55 #define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
56 #define CCN_DT_PMCCNTR 0x0140
57 #define CCN_DT_PMCCNTRSR 0x0190
58 #define CCN_DT_PMOVSR 0x0198
59 #define CCN_DT_PMOVSR_CLR 0x01a0
60 #define CCN_DT_PMOVSR_CLR__MASK 0x1f
61 #define CCN_DT_PMCR 0x01a8
62 #define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
63 #define CCN_DT_PMCR__PMU_EN (1 << 0)
64 #define CCN_DT_PMSR 0x01b0
65 #define CCN_DT_PMSR_REQ 0x01b8
66 #define CCN_DT_PMSR_CLR 0x01c0
68 #define CCN_HNF_PMU_EVENT_SEL 0x0600
69 #define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
70 #define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
72 #define CCN_XP_DT_CONFIG 0x0300
73 #define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
74 #define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
75 #define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
76 #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
77 #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
78 #define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
79 #define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
80 #define CCN_XP_DT_INTERFACE_SEL 0x0308
81 #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
82 #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
83 #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
84 #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
85 #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
86 #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
87 #define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
88 #define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
89 #define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
90 #define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
91 #define CCN_XP_DT_CONTROL 0x0370
92 #define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
93 #define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
94 #define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
95 #define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
96 #define CCN_XP_PMU_EVENT_SEL 0x0600
97 #define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
98 #define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
100 #define CCN_SBAS_PMU_EVENT_SEL 0x0600
101 #define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
102 #define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
104 #define CCN_RNI_PMU_EVENT_SEL 0x0600
105 #define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
106 #define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
108 #define CCN_TYPE_MN 0x01
109 #define CCN_TYPE_DT 0x02
110 #define CCN_TYPE_HNF 0x04
111 #define CCN_TYPE_HNI 0x05
112 #define CCN_TYPE_XP 0x08
113 #define CCN_TYPE_SBSX 0x0c
114 #define CCN_TYPE_SBAS 0x10
115 #define CCN_TYPE_RNI_1P 0x14
116 #define CCN_TYPE_RNI_2P 0x15
117 #define CCN_TYPE_RNI_3P 0x16
118 #define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
119 #define CCN_TYPE_RND_2P 0x19
120 #define CCN_TYPE_RND_3P 0x1a
121 #define CCN_TYPE_CYCLES 0xff /* Pseudotype */
123 #define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
125 #define CCN_NUM_PMU_EVENTS 4
126 #define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
127 #define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
128 #define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
130 #define CCN_NUM_PREDEFINED_MASKS 4
131 #define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
132 #define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
133 #define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
134 #define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
136 struct arm_ccn_component
{
140 DECLARE_BITMAP(pmu_events_mask
, CCN_NUM_PMU_EVENTS
);
143 DECLARE_BITMAP(dt_cmp_mask
, CCN_NUM_XP_WATCHPOINTS
);
148 #define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
149 struct arm_ccn_dt, pmu), struct arm_ccn, dt)
155 spinlock_t config_lock
;
157 DECLARE_BITMAP(pmu_counters_mask
, CCN_NUM_PMU_EVENT_COUNTERS
+ 1);
159 struct arm_ccn_component
*source
;
160 struct perf_event
*event
;
161 } pmu_counters
[CCN_NUM_PMU_EVENT_COUNTERS
+ 1];
165 } cmp_mask
[CCN_NUM_PMU_EVENT_COUNTERS
+ CCN_NUM_PREDEFINED_MASKS
];
167 struct hrtimer hrtimer
;
170 struct notifier_block cpu_nb
;
180 unsigned sbas_present
:1;
181 unsigned sbsx_present
:1;
184 struct arm_ccn_component
*node
;
187 struct arm_ccn_component
*xp
;
189 struct arm_ccn_dt dt
;
193 static int arm_ccn_node_to_xp(int node
)
195 return node
/ CCN_NUM_XP_PORTS
;
198 static int arm_ccn_node_to_xp_port(int node
)
200 return node
% CCN_NUM_XP_PORTS
;
205 * Bit shifts and masks in these defines must be kept in sync with
206 * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
208 #define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
209 #define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
210 #define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
211 #define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
212 #define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
213 #define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
214 #define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
215 #define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
217 static void arm_ccn_pmu_config_set(u64
*config
, u32 node_xp
, u32 type
, u32 port
)
219 *config
&= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
220 *config
|= (node_xp
<< 0) | (type
<< 8) | (port
<< 24);
223 static ssize_t
arm_ccn_pmu_format_show(struct device
*dev
,
224 struct device_attribute
*attr
, char *buf
)
226 struct dev_ext_attribute
*ea
= container_of(attr
,
227 struct dev_ext_attribute
, attr
);
229 return snprintf(buf
, PAGE_SIZE
, "%s\n", (char *)ea
->var
);
232 #define CCN_FORMAT_ATTR(_name, _config) \
233 struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
234 { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
237 static CCN_FORMAT_ATTR(node
, "config:0-7");
238 static CCN_FORMAT_ATTR(xp
, "config:0-7");
239 static CCN_FORMAT_ATTR(type
, "config:8-15");
240 static CCN_FORMAT_ATTR(event
, "config:16-23");
241 static CCN_FORMAT_ATTR(port
, "config:24-25");
242 static CCN_FORMAT_ATTR(vc
, "config:26-28");
243 static CCN_FORMAT_ATTR(dir
, "config:29-29");
244 static CCN_FORMAT_ATTR(mask
, "config:30-33");
245 static CCN_FORMAT_ATTR(cmp_l
, "config1:0-62");
246 static CCN_FORMAT_ATTR(cmp_h
, "config2:0-59");
248 static struct attribute
*arm_ccn_pmu_format_attrs
[] = {
249 &arm_ccn_pmu_format_attr_node
.attr
.attr
,
250 &arm_ccn_pmu_format_attr_xp
.attr
.attr
,
251 &arm_ccn_pmu_format_attr_type
.attr
.attr
,
252 &arm_ccn_pmu_format_attr_event
.attr
.attr
,
253 &arm_ccn_pmu_format_attr_port
.attr
.attr
,
254 &arm_ccn_pmu_format_attr_vc
.attr
.attr
,
255 &arm_ccn_pmu_format_attr_dir
.attr
.attr
,
256 &arm_ccn_pmu_format_attr_mask
.attr
.attr
,
257 &arm_ccn_pmu_format_attr_cmp_l
.attr
.attr
,
258 &arm_ccn_pmu_format_attr_cmp_h
.attr
.attr
,
262 static struct attribute_group arm_ccn_pmu_format_attr_group
= {
264 .attrs
= arm_ccn_pmu_format_attrs
,
268 struct arm_ccn_pmu_event
{
269 struct device_attribute attr
;
278 #define CCN_EVENT_ATTR(_name) \
279 __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
282 * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
283 * their ports in XP they are connected to. For the sake of usability they are
284 * explicitly defined here (and translated into a relevant watchpoint in
285 * arm_ccn_pmu_event_init()) so the user can easily request them without deep
286 * knowledge of the flit format.
289 #define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
290 .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
291 .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
292 .def = _def, .mask = _mask, }
294 #define CCN_EVENT_HNI(_name, _def, _mask) { \
295 .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
296 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
297 .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
299 #define CCN_EVENT_SBSX(_name, _def, _mask) { \
300 .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
301 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
302 .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
304 #define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
305 .type = CCN_TYPE_HNF, .event = _event, }
307 #define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
308 .type = CCN_TYPE_XP, .event = _event, \
309 .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
312 * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
313 * on configuration. One of them is picked to represent the whole group,
314 * as they all share the same event types.
316 #define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
317 .type = CCN_TYPE_RNI_3P, .event = _event, }
319 #define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
320 .type = CCN_TYPE_SBAS, .event = _event, }
322 #define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
323 .type = CCN_TYPE_CYCLES }
326 static ssize_t
arm_ccn_pmu_event_show(struct device
*dev
,
327 struct device_attribute
*attr
, char *buf
)
329 struct arm_ccn_pmu_event
*event
= container_of(attr
,
330 struct arm_ccn_pmu_event
, attr
);
333 res
= snprintf(buf
, PAGE_SIZE
, "type=0x%x", event
->type
);
335 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
, ",event=0x%x",
338 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
, ",%s",
341 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
, ",mask=0x%x",
344 /* Arguments required by an event */
345 switch (event
->type
) {
346 case CCN_TYPE_CYCLES
:
349 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
,
350 ",xp=?,port=?,vc=?,dir=?");
351 if (event
->event
== CCN_EVENT_WATCHPOINT
)
352 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
,
353 ",cmp_l=?,cmp_h=?,mask=?");
356 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
, ",node=?");
360 res
+= snprintf(buf
+ res
, PAGE_SIZE
- res
, "\n");
365 static umode_t
arm_ccn_pmu_events_is_visible(struct kobject
*kobj
,
366 struct attribute
*attr
, int index
)
368 struct device
*dev
= kobj_to_dev(kobj
);
369 struct arm_ccn
*ccn
= pmu_to_arm_ccn(dev_get_drvdata(dev
));
370 struct device_attribute
*dev_attr
= container_of(attr
,
371 struct device_attribute
, attr
);
372 struct arm_ccn_pmu_event
*event
= container_of(dev_attr
,
373 struct arm_ccn_pmu_event
, attr
);
375 if (event
->type
== CCN_TYPE_SBAS
&& !ccn
->sbas_present
)
377 if (event
->type
== CCN_TYPE_SBSX
&& !ccn
->sbsx_present
)
383 static struct arm_ccn_pmu_event arm_ccn_pmu_events
[] = {
384 CCN_EVENT_MN(eobarrier
, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE
),
385 CCN_EVENT_MN(ecbarrier
, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE
),
386 CCN_EVENT_MN(dvmop
, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE
),
387 CCN_EVENT_HNI(txdatflits
, "dir=1,vc=3", CCN_IDX_MASK_ANY
),
388 CCN_EVENT_HNI(rxdatflits
, "dir=0,vc=3", CCN_IDX_MASK_ANY
),
389 CCN_EVENT_HNI(txreqflits
, "dir=1,vc=0", CCN_IDX_MASK_ANY
),
390 CCN_EVENT_HNI(rxreqflits
, "dir=0,vc=0", CCN_IDX_MASK_ANY
),
391 CCN_EVENT_HNI(rxreqflits_order
, "dir=0,vc=0,cmp_h=0x8000",
393 CCN_EVENT_SBSX(txdatflits
, "dir=1,vc=3", CCN_IDX_MASK_ANY
),
394 CCN_EVENT_SBSX(rxdatflits
, "dir=0,vc=3", CCN_IDX_MASK_ANY
),
395 CCN_EVENT_SBSX(txreqflits
, "dir=1,vc=0", CCN_IDX_MASK_ANY
),
396 CCN_EVENT_SBSX(rxreqflits
, "dir=0,vc=0", CCN_IDX_MASK_ANY
),
397 CCN_EVENT_SBSX(rxreqflits_order
, "dir=0,vc=0,cmp_h=0x8000",
399 CCN_EVENT_HNF(cache_miss
, 0x1),
400 CCN_EVENT_HNF(l3_sf_cache_access
, 0x02),
401 CCN_EVENT_HNF(cache_fill
, 0x3),
402 CCN_EVENT_HNF(pocq_retry
, 0x4),
403 CCN_EVENT_HNF(pocq_reqs_recvd
, 0x5),
404 CCN_EVENT_HNF(sf_hit
, 0x6),
405 CCN_EVENT_HNF(sf_evictions
, 0x7),
406 CCN_EVENT_HNF(snoops_sent
, 0x8),
407 CCN_EVENT_HNF(snoops_broadcast
, 0x9),
408 CCN_EVENT_HNF(l3_eviction
, 0xa),
409 CCN_EVENT_HNF(l3_fill_invalid_way
, 0xb),
410 CCN_EVENT_HNF(mc_retries
, 0xc),
411 CCN_EVENT_HNF(mc_reqs
, 0xd),
412 CCN_EVENT_HNF(qos_hh_retry
, 0xe),
413 CCN_EVENT_RNI(rdata_beats_p0
, 0x1),
414 CCN_EVENT_RNI(rdata_beats_p1
, 0x2),
415 CCN_EVENT_RNI(rdata_beats_p2
, 0x3),
416 CCN_EVENT_RNI(rxdat_flits
, 0x4),
417 CCN_EVENT_RNI(txdat_flits
, 0x5),
418 CCN_EVENT_RNI(txreq_flits
, 0x6),
419 CCN_EVENT_RNI(txreq_flits_retried
, 0x7),
420 CCN_EVENT_RNI(rrt_full
, 0x8),
421 CCN_EVENT_RNI(wrt_full
, 0x9),
422 CCN_EVENT_RNI(txreq_flits_replayed
, 0xa),
423 CCN_EVENT_XP(upload_starvation
, 0x1),
424 CCN_EVENT_XP(download_starvation
, 0x2),
425 CCN_EVENT_XP(respin
, 0x3),
426 CCN_EVENT_XP(valid_flit
, 0x4),
427 CCN_EVENT_XP(watchpoint
, CCN_EVENT_WATCHPOINT
),
428 CCN_EVENT_SBAS(rdata_beats_p0
, 0x1),
429 CCN_EVENT_SBAS(rxdat_flits
, 0x4),
430 CCN_EVENT_SBAS(txdat_flits
, 0x5),
431 CCN_EVENT_SBAS(txreq_flits
, 0x6),
432 CCN_EVENT_SBAS(txreq_flits_retried
, 0x7),
433 CCN_EVENT_SBAS(rrt_full
, 0x8),
434 CCN_EVENT_SBAS(wrt_full
, 0x9),
435 CCN_EVENT_SBAS(txreq_flits_replayed
, 0xa),
436 CCN_EVENT_CYCLES(cycles
),
439 /* Populated in arm_ccn_init() */
440 static struct attribute
441 *arm_ccn_pmu_events_attrs
[ARRAY_SIZE(arm_ccn_pmu_events
) + 1];
443 static struct attribute_group arm_ccn_pmu_events_attr_group
= {
445 .is_visible
= arm_ccn_pmu_events_is_visible
,
446 .attrs
= arm_ccn_pmu_events_attrs
,
450 static u64
*arm_ccn_pmu_get_cmp_mask(struct arm_ccn
*ccn
, const char *name
)
454 if (WARN_ON(!name
|| !name
[0] || !isxdigit(name
[0]) || !name
[1]))
456 i
= isdigit(name
[0]) ? name
[0] - '0' : 0xa + tolower(name
[0]) - 'a';
460 return &ccn
->dt
.cmp_mask
[i
].l
;
462 return &ccn
->dt
.cmp_mask
[i
].h
;
468 static ssize_t
arm_ccn_pmu_cmp_mask_show(struct device
*dev
,
469 struct device_attribute
*attr
, char *buf
)
471 struct arm_ccn
*ccn
= pmu_to_arm_ccn(dev_get_drvdata(dev
));
472 u64
*mask
= arm_ccn_pmu_get_cmp_mask(ccn
, attr
->attr
.name
);
474 return mask
? snprintf(buf
, PAGE_SIZE
, "0x%016llx\n", *mask
) : -EINVAL
;
477 static ssize_t
arm_ccn_pmu_cmp_mask_store(struct device
*dev
,
478 struct device_attribute
*attr
, const char *buf
, size_t count
)
480 struct arm_ccn
*ccn
= pmu_to_arm_ccn(dev_get_drvdata(dev
));
481 u64
*mask
= arm_ccn_pmu_get_cmp_mask(ccn
, attr
->attr
.name
);
485 err
= kstrtoull(buf
, 0, mask
);
487 return err
? err
: count
;
490 #define CCN_CMP_MASK_ATTR(_name) \
491 struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
492 __ATTR(_name, S_IRUGO | S_IWUSR, \
493 arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
495 #define CCN_CMP_MASK_ATTR_RO(_name) \
496 struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
497 __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
499 static CCN_CMP_MASK_ATTR(0l);
500 static CCN_CMP_MASK_ATTR(0h
);
501 static CCN_CMP_MASK_ATTR(1l);
502 static CCN_CMP_MASK_ATTR(1h
);
503 static CCN_CMP_MASK_ATTR(2l);
504 static CCN_CMP_MASK_ATTR(2h
);
505 static CCN_CMP_MASK_ATTR(3l);
506 static CCN_CMP_MASK_ATTR(3h
);
507 static CCN_CMP_MASK_ATTR(4l);
508 static CCN_CMP_MASK_ATTR(4h
);
509 static CCN_CMP_MASK_ATTR(5l);
510 static CCN_CMP_MASK_ATTR(5h
);
511 static CCN_CMP_MASK_ATTR(6l);
512 static CCN_CMP_MASK_ATTR(6h
);
513 static CCN_CMP_MASK_ATTR(7l);
514 static CCN_CMP_MASK_ATTR(7h
);
515 static CCN_CMP_MASK_ATTR_RO(8l);
516 static CCN_CMP_MASK_ATTR_RO(8h
);
517 static CCN_CMP_MASK_ATTR_RO(9l);
518 static CCN_CMP_MASK_ATTR_RO(9h
);
519 static CCN_CMP_MASK_ATTR_RO(al
);
520 static CCN_CMP_MASK_ATTR_RO(ah
);
521 static CCN_CMP_MASK_ATTR_RO(bl
);
522 static CCN_CMP_MASK_ATTR_RO(bh
);
524 static struct attribute
*arm_ccn_pmu_cmp_mask_attrs
[] = {
525 &arm_ccn_pmu_cmp_mask_attr_0l
.attr
, &arm_ccn_pmu_cmp_mask_attr_0h
.attr
,
526 &arm_ccn_pmu_cmp_mask_attr_1l
.attr
, &arm_ccn_pmu_cmp_mask_attr_1h
.attr
,
527 &arm_ccn_pmu_cmp_mask_attr_2l
.attr
, &arm_ccn_pmu_cmp_mask_attr_2h
.attr
,
528 &arm_ccn_pmu_cmp_mask_attr_3l
.attr
, &arm_ccn_pmu_cmp_mask_attr_3h
.attr
,
529 &arm_ccn_pmu_cmp_mask_attr_4l
.attr
, &arm_ccn_pmu_cmp_mask_attr_4h
.attr
,
530 &arm_ccn_pmu_cmp_mask_attr_5l
.attr
, &arm_ccn_pmu_cmp_mask_attr_5h
.attr
,
531 &arm_ccn_pmu_cmp_mask_attr_6l
.attr
, &arm_ccn_pmu_cmp_mask_attr_6h
.attr
,
532 &arm_ccn_pmu_cmp_mask_attr_7l
.attr
, &arm_ccn_pmu_cmp_mask_attr_7h
.attr
,
533 &arm_ccn_pmu_cmp_mask_attr_8l
.attr
, &arm_ccn_pmu_cmp_mask_attr_8h
.attr
,
534 &arm_ccn_pmu_cmp_mask_attr_9l
.attr
, &arm_ccn_pmu_cmp_mask_attr_9h
.attr
,
535 &arm_ccn_pmu_cmp_mask_attr_al
.attr
, &arm_ccn_pmu_cmp_mask_attr_ah
.attr
,
536 &arm_ccn_pmu_cmp_mask_attr_bl
.attr
, &arm_ccn_pmu_cmp_mask_attr_bh
.attr
,
540 static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group
= {
542 .attrs
= arm_ccn_pmu_cmp_mask_attrs
,
545 static ssize_t
arm_ccn_pmu_cpumask_show(struct device
*dev
,
546 struct device_attribute
*attr
, char *buf
)
548 struct arm_ccn
*ccn
= pmu_to_arm_ccn(dev_get_drvdata(dev
));
550 return cpumap_print_to_pagebuf(true, buf
, &ccn
->dt
.cpu
);
553 static struct device_attribute arm_ccn_pmu_cpumask_attr
=
554 __ATTR(cpumask
, S_IRUGO
, arm_ccn_pmu_cpumask_show
, NULL
);
556 static struct attribute
*arm_ccn_pmu_cpumask_attrs
[] = {
557 &arm_ccn_pmu_cpumask_attr
.attr
,
561 static struct attribute_group arm_ccn_pmu_cpumask_attr_group
= {
562 .attrs
= arm_ccn_pmu_cpumask_attrs
,
566 * Default poll period is 10ms, which is way over the top anyway,
567 * as in the worst case scenario (an event every cycle), with 1GHz
568 * clocked bus, the smallest, 32 bit counter will overflow in
571 static unsigned int arm_ccn_pmu_poll_period_us
= 10000;
572 module_param_named(pmu_poll_period_us
, arm_ccn_pmu_poll_period_us
, uint
,
575 static ktime_t
arm_ccn_pmu_timer_period(void)
577 return ns_to_ktime((u64
)arm_ccn_pmu_poll_period_us
* 1000);
581 static const struct attribute_group
*arm_ccn_pmu_attr_groups
[] = {
582 &arm_ccn_pmu_events_attr_group
,
583 &arm_ccn_pmu_format_attr_group
,
584 &arm_ccn_pmu_cmp_mask_attr_group
,
585 &arm_ccn_pmu_cpumask_attr_group
,
590 static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap
, unsigned long size
)
595 bit
= find_first_zero_bit(bitmap
, size
);
598 } while (test_and_set_bit(bit
, bitmap
));
603 /* All RN-I and RN-D nodes have identical PMUs */
604 static int arm_ccn_pmu_type_eq(u32 a
, u32 b
)
610 case CCN_TYPE_RNI_1P
:
611 case CCN_TYPE_RNI_2P
:
612 case CCN_TYPE_RNI_3P
:
613 case CCN_TYPE_RND_1P
:
614 case CCN_TYPE_RND_2P
:
615 case CCN_TYPE_RND_3P
:
617 case CCN_TYPE_RNI_1P
:
618 case CCN_TYPE_RNI_2P
:
619 case CCN_TYPE_RNI_3P
:
620 case CCN_TYPE_RND_1P
:
621 case CCN_TYPE_RND_2P
:
622 case CCN_TYPE_RND_3P
:
631 static int arm_ccn_pmu_event_alloc(struct perf_event
*event
)
633 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
634 struct hw_perf_event
*hw
= &event
->hw
;
635 u32 node_xp
, type
, event_id
;
636 struct arm_ccn_component
*source
;
639 node_xp
= CCN_CONFIG_NODE(event
->attr
.config
);
640 type
= CCN_CONFIG_TYPE(event
->attr
.config
);
641 event_id
= CCN_CONFIG_EVENT(event
->attr
.config
);
643 /* Allocate the cycle counter */
644 if (type
== CCN_TYPE_CYCLES
) {
645 if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER
,
646 ccn
->dt
.pmu_counters_mask
))
649 hw
->idx
= CCN_IDX_PMU_CYCLE_COUNTER
;
650 ccn
->dt
.pmu_counters
[CCN_IDX_PMU_CYCLE_COUNTER
].event
= event
;
655 /* Allocate an event counter */
656 hw
->idx
= arm_ccn_pmu_alloc_bit(ccn
->dt
.pmu_counters_mask
,
657 CCN_NUM_PMU_EVENT_COUNTERS
);
659 dev_dbg(ccn
->dev
, "No more counters available!\n");
663 if (type
== CCN_TYPE_XP
)
664 source
= &ccn
->xp
[node_xp
];
666 source
= &ccn
->node
[node_xp
];
667 ccn
->dt
.pmu_counters
[hw
->idx
].source
= source
;
669 /* Allocate an event source or a watchpoint */
670 if (type
== CCN_TYPE_XP
&& event_id
== CCN_EVENT_WATCHPOINT
)
671 bit
= arm_ccn_pmu_alloc_bit(source
->xp
.dt_cmp_mask
,
672 CCN_NUM_XP_WATCHPOINTS
);
674 bit
= arm_ccn_pmu_alloc_bit(source
->pmu_events_mask
,
677 dev_dbg(ccn
->dev
, "No more event sources/watchpoints on node/XP %d!\n",
679 clear_bit(hw
->idx
, ccn
->dt
.pmu_counters_mask
);
682 hw
->config_base
= bit
;
684 ccn
->dt
.pmu_counters
[hw
->idx
].event
= event
;
689 static void arm_ccn_pmu_event_release(struct perf_event
*event
)
691 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
692 struct hw_perf_event
*hw
= &event
->hw
;
694 if (hw
->idx
== CCN_IDX_PMU_CYCLE_COUNTER
) {
695 clear_bit(CCN_IDX_PMU_CYCLE_COUNTER
, ccn
->dt
.pmu_counters_mask
);
697 struct arm_ccn_component
*source
=
698 ccn
->dt
.pmu_counters
[hw
->idx
].source
;
700 if (CCN_CONFIG_TYPE(event
->attr
.config
) == CCN_TYPE_XP
&&
701 CCN_CONFIG_EVENT(event
->attr
.config
) ==
702 CCN_EVENT_WATCHPOINT
)
703 clear_bit(hw
->config_base
, source
->xp
.dt_cmp_mask
);
705 clear_bit(hw
->config_base
, source
->pmu_events_mask
);
706 clear_bit(hw
->idx
, ccn
->dt
.pmu_counters_mask
);
709 ccn
->dt
.pmu_counters
[hw
->idx
].source
= NULL
;
710 ccn
->dt
.pmu_counters
[hw
->idx
].event
= NULL
;
713 static int arm_ccn_pmu_event_init(struct perf_event
*event
)
716 struct hw_perf_event
*hw
= &event
->hw
;
717 u32 node_xp
, type
, event_id
;
720 struct perf_event
*sibling
;
722 if (event
->attr
.type
!= event
->pmu
->type
)
725 ccn
= pmu_to_arm_ccn(event
->pmu
);
727 if (hw
->sample_period
) {
728 dev_warn(ccn
->dev
, "Sampling not supported!\n");
732 if (has_branch_stack(event
) || event
->attr
.exclude_user
||
733 event
->attr
.exclude_kernel
|| event
->attr
.exclude_hv
||
734 event
->attr
.exclude_idle
) {
735 dev_warn(ccn
->dev
, "Can't exclude execution levels!\n");
739 if (event
->cpu
< 0) {
740 dev_warn(ccn
->dev
, "Can't provide per-task data!\n");
744 * Many perf core operations (eg. events rotation) operate on a
745 * single CPU context. This is obvious for CPU PMUs, where one
746 * expects the same sets of events being observed on all CPUs,
747 * but can lead to issues for off-core PMUs, like CCN, where each
748 * event could be theoretically assigned to a different CPU. To
749 * mitigate this, we enforce CPU assignment to one, selected
750 * processor (the one described in the "cpumask" attribute).
752 event
->cpu
= cpumask_first(&ccn
->dt
.cpu
);
754 node_xp
= CCN_CONFIG_NODE(event
->attr
.config
);
755 type
= CCN_CONFIG_TYPE(event
->attr
.config
);
756 event_id
= CCN_CONFIG_EVENT(event
->attr
.config
);
758 /* Validate node/xp vs topology */
761 if (node_xp
>= ccn
->num_xps
) {
762 dev_warn(ccn
->dev
, "Invalid XP ID %d!\n", node_xp
);
766 case CCN_TYPE_CYCLES
:
769 if (node_xp
>= ccn
->num_nodes
) {
770 dev_warn(ccn
->dev
, "Invalid node ID %d!\n", node_xp
);
773 if (!arm_ccn_pmu_type_eq(type
, ccn
->node
[node_xp
].type
)) {
774 dev_warn(ccn
->dev
, "Invalid type 0x%x for node %d!\n",
781 /* Validate event ID vs available for the type */
782 for (i
= 0, valid
= 0; i
< ARRAY_SIZE(arm_ccn_pmu_events
) && !valid
;
784 struct arm_ccn_pmu_event
*e
= &arm_ccn_pmu_events
[i
];
785 u32 port
= CCN_CONFIG_PORT(event
->attr
.config
);
786 u32 vc
= CCN_CONFIG_VC(event
->attr
.config
);
788 if (!arm_ccn_pmu_type_eq(type
, e
->type
))
790 if (event_id
!= e
->event
)
792 if (e
->num_ports
&& port
>= e
->num_ports
) {
793 dev_warn(ccn
->dev
, "Invalid port %d for node/XP %d!\n",
797 if (e
->num_vcs
&& vc
>= e
->num_vcs
) {
798 dev_warn(ccn
->dev
, "Invalid vc %d for node/XP %d!\n",
805 dev_warn(ccn
->dev
, "Invalid event 0x%x for node/XP %d!\n",
810 /* Watchpoint-based event for a node is actually set on XP */
811 if (event_id
== CCN_EVENT_WATCHPOINT
&& type
!= CCN_TYPE_XP
) {
815 port
= arm_ccn_node_to_xp_port(node_xp
);
816 node_xp
= arm_ccn_node_to_xp(node_xp
);
818 arm_ccn_pmu_config_set(&event
->attr
.config
,
819 node_xp
, type
, port
);
823 * We must NOT create groups containing mixed PMUs, although software
824 * events are acceptable (for example to create a CCN group
825 * periodically read when a hrtimer aka cpu-clock leader triggers).
827 if (event
->group_leader
->pmu
!= event
->pmu
&&
828 !is_software_event(event
->group_leader
))
831 list_for_each_entry(sibling
, &event
->group_leader
->sibling_list
,
833 if (sibling
->pmu
!= event
->pmu
&&
834 !is_software_event(sibling
))
840 static u64
arm_ccn_pmu_read_counter(struct arm_ccn
*ccn
, int idx
)
844 if (idx
== CCN_IDX_PMU_CYCLE_COUNTER
) {
846 res
= readq(ccn
->dt
.base
+ CCN_DT_PMCCNTR
);
848 /* 40 bit counter, can do snapshot and read in two parts */
849 writel(0x1, ccn
->dt
.base
+ CCN_DT_PMSR_REQ
);
850 while (!(readl(ccn
->dt
.base
+ CCN_DT_PMSR
) & 0x1))
852 writel(0x1, ccn
->dt
.base
+ CCN_DT_PMSR_CLR
);
853 res
= readl(ccn
->dt
.base
+ CCN_DT_PMCCNTRSR
+ 4) & 0xff;
855 res
|= readl(ccn
->dt
.base
+ CCN_DT_PMCCNTRSR
);
858 res
= readl(ccn
->dt
.base
+ CCN_DT_PMEVCNT(idx
));
864 static void arm_ccn_pmu_event_update(struct perf_event
*event
)
866 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
867 struct hw_perf_event
*hw
= &event
->hw
;
868 u64 prev_count
, new_count
, mask
;
871 prev_count
= local64_read(&hw
->prev_count
);
872 new_count
= arm_ccn_pmu_read_counter(ccn
, hw
->idx
);
873 } while (local64_xchg(&hw
->prev_count
, new_count
) != prev_count
);
875 mask
= (1LLU << (hw
->idx
== CCN_IDX_PMU_CYCLE_COUNTER
? 40 : 32)) - 1;
877 local64_add((new_count
- prev_count
) & mask
, &event
->count
);
880 static void arm_ccn_pmu_xp_dt_config(struct perf_event
*event
, int enable
)
882 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
883 struct hw_perf_event
*hw
= &event
->hw
;
884 struct arm_ccn_component
*xp
;
887 if (CCN_CONFIG_TYPE(event
->attr
.config
) == CCN_TYPE_XP
)
888 xp
= &ccn
->xp
[CCN_CONFIG_XP(event
->attr
.config
)];
890 xp
= &ccn
->xp
[arm_ccn_node_to_xp(
891 CCN_CONFIG_NODE(event
->attr
.config
))];
894 dt_cfg
= hw
->event_base
;
896 dt_cfg
= CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH
;
898 spin_lock(&ccn
->dt
.config_lock
);
900 val
= readl(xp
->base
+ CCN_XP_DT_CONFIG
);
901 val
&= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK
<<
902 CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw
->idx
));
903 val
|= dt_cfg
<< CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw
->idx
);
904 writel(val
, xp
->base
+ CCN_XP_DT_CONFIG
);
906 spin_unlock(&ccn
->dt
.config_lock
);
909 static void arm_ccn_pmu_event_start(struct perf_event
*event
, int flags
)
911 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
912 struct hw_perf_event
*hw
= &event
->hw
;
914 local64_set(&event
->hw
.prev_count
,
915 arm_ccn_pmu_read_counter(ccn
, hw
->idx
));
919 * Pin the timer, so that the overflows are handled by the chosen
920 * event->cpu (this is the same one as presented in "cpumask"
924 hrtimer_start(&ccn
->dt
.hrtimer
, arm_ccn_pmu_timer_period(),
925 HRTIMER_MODE_REL_PINNED
);
927 /* Set the DT bus input, engaging the counter */
928 arm_ccn_pmu_xp_dt_config(event
, 1);
931 static void arm_ccn_pmu_event_stop(struct perf_event
*event
, int flags
)
933 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
934 struct hw_perf_event
*hw
= &event
->hw
;
937 /* Disable counting, setting the DT bus to pass-through mode */
938 arm_ccn_pmu_xp_dt_config(event
, 0);
941 hrtimer_cancel(&ccn
->dt
.hrtimer
);
943 /* Let the DT bus drain */
944 timeout
= arm_ccn_pmu_read_counter(ccn
, CCN_IDX_PMU_CYCLE_COUNTER
) +
946 while (arm_ccn_pmu_read_counter(ccn
, CCN_IDX_PMU_CYCLE_COUNTER
) <
950 if (flags
& PERF_EF_UPDATE
)
951 arm_ccn_pmu_event_update(event
);
953 hw
->state
|= PERF_HES_STOPPED
;
956 static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event
*event
)
958 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
959 struct hw_perf_event
*hw
= &event
->hw
;
960 struct arm_ccn_component
*source
=
961 ccn
->dt
.pmu_counters
[hw
->idx
].source
;
962 unsigned long wp
= hw
->config_base
;
964 u64 cmp_l
= event
->attr
.config1
;
965 u64 cmp_h
= event
->attr
.config2
;
966 u64 mask_l
= ccn
->dt
.cmp_mask
[CCN_CONFIG_MASK(event
->attr
.config
)].l
;
967 u64 mask_h
= ccn
->dt
.cmp_mask
[CCN_CONFIG_MASK(event
->attr
.config
)].h
;
969 hw
->event_base
= CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp
);
971 /* Direction (RX/TX), device (port) & virtual channel */
972 val
= readl(source
->base
+ CCN_XP_DT_INTERFACE_SEL
);
973 val
&= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK
<<
974 CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp
));
975 val
|= CCN_CONFIG_DIR(event
->attr
.config
) <<
976 CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp
);
977 val
&= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK
<<
978 CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp
));
979 val
|= CCN_CONFIG_PORT(event
->attr
.config
) <<
980 CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp
);
981 val
&= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK
<<
982 CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp
));
983 val
|= CCN_CONFIG_VC(event
->attr
.config
) <<
984 CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp
);
985 writel(val
, source
->base
+ CCN_XP_DT_INTERFACE_SEL
);
987 /* Comparison values */
988 writel(cmp_l
& 0xffffffff, source
->base
+ CCN_XP_DT_CMP_VAL_L(wp
));
989 writel((cmp_l
>> 32) & 0xefffffff,
990 source
->base
+ CCN_XP_DT_CMP_VAL_L(wp
) + 4);
991 writel(cmp_h
& 0xffffffff, source
->base
+ CCN_XP_DT_CMP_VAL_H(wp
));
992 writel((cmp_h
>> 32) & 0x0fffffff,
993 source
->base
+ CCN_XP_DT_CMP_VAL_H(wp
) + 4);
996 writel(mask_l
& 0xffffffff, source
->base
+ CCN_XP_DT_CMP_MASK_L(wp
));
997 writel((mask_l
>> 32) & 0xefffffff,
998 source
->base
+ CCN_XP_DT_CMP_MASK_L(wp
) + 4);
999 writel(mask_h
& 0xffffffff, source
->base
+ CCN_XP_DT_CMP_MASK_H(wp
));
1000 writel((mask_h
>> 32) & 0x0fffffff,
1001 source
->base
+ CCN_XP_DT_CMP_MASK_H(wp
) + 4);
1004 static void arm_ccn_pmu_xp_event_config(struct perf_event
*event
)
1006 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
1007 struct hw_perf_event
*hw
= &event
->hw
;
1008 struct arm_ccn_component
*source
=
1009 ccn
->dt
.pmu_counters
[hw
->idx
].source
;
1012 hw
->event_base
= CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw
->config_base
);
1014 id
= (CCN_CONFIG_VC(event
->attr
.config
) << 4) |
1015 (CCN_CONFIG_PORT(event
->attr
.config
) << 3) |
1016 (CCN_CONFIG_EVENT(event
->attr
.config
) << 0);
1018 val
= readl(source
->base
+ CCN_XP_PMU_EVENT_SEL
);
1019 val
&= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK
<<
1020 CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw
->config_base
));
1021 val
|= id
<< CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw
->config_base
);
1022 writel(val
, source
->base
+ CCN_XP_PMU_EVENT_SEL
);
1025 static void arm_ccn_pmu_node_event_config(struct perf_event
*event
)
1027 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
1028 struct hw_perf_event
*hw
= &event
->hw
;
1029 struct arm_ccn_component
*source
=
1030 ccn
->dt
.pmu_counters
[hw
->idx
].source
;
1031 u32 type
= CCN_CONFIG_TYPE(event
->attr
.config
);
1034 port
= arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event
->attr
.config
));
1035 hw
->event_base
= CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port
,
1038 /* These *_event_sel regs should be identical, but let's make sure... */
1039 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL
!= CCN_SBAS_PMU_EVENT_SEL
);
1040 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL
!= CCN_RNI_PMU_EVENT_SEL
);
1041 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
1042 CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
1043 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
1044 CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
1045 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK
!=
1046 CCN_SBAS_PMU_EVENT_SEL__ID__MASK
);
1047 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK
!=
1048 CCN_RNI_PMU_EVENT_SEL__ID__MASK
);
1049 if (WARN_ON(type
!= CCN_TYPE_HNF
&& type
!= CCN_TYPE_SBAS
&&
1050 !arm_ccn_pmu_type_eq(type
, CCN_TYPE_RNI_3P
)))
1053 /* Set the event id for the pre-allocated counter */
1054 val
= readl(source
->base
+ CCN_HNF_PMU_EVENT_SEL
);
1055 val
&= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK
<<
1056 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw
->config_base
));
1057 val
|= CCN_CONFIG_EVENT(event
->attr
.config
) <<
1058 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw
->config_base
);
1059 writel(val
, source
->base
+ CCN_HNF_PMU_EVENT_SEL
);
1062 static void arm_ccn_pmu_event_config(struct perf_event
*event
)
1064 struct arm_ccn
*ccn
= pmu_to_arm_ccn(event
->pmu
);
1065 struct hw_perf_event
*hw
= &event
->hw
;
1066 u32 xp
, offset
, val
;
1068 /* Cycle counter requires no setup */
1069 if (hw
->idx
== CCN_IDX_PMU_CYCLE_COUNTER
)
1072 if (CCN_CONFIG_TYPE(event
->attr
.config
) == CCN_TYPE_XP
)
1073 xp
= CCN_CONFIG_XP(event
->attr
.config
);
1075 xp
= arm_ccn_node_to_xp(CCN_CONFIG_NODE(event
->attr
.config
));
1077 spin_lock(&ccn
->dt
.config_lock
);
1079 /* Set the DT bus "distance" register */
1080 offset
= (hw
->idx
/ 4) * 4;
1081 val
= readl(ccn
->dt
.base
+ CCN_DT_ACTIVE_DSM
+ offset
);
1082 val
&= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK
<<
1083 CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw
->idx
% 4));
1084 val
|= xp
<< CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw
->idx
% 4);
1085 writel(val
, ccn
->dt
.base
+ CCN_DT_ACTIVE_DSM
+ offset
);
1087 if (CCN_CONFIG_TYPE(event
->attr
.config
) == CCN_TYPE_XP
) {
1088 if (CCN_CONFIG_EVENT(event
->attr
.config
) ==
1089 CCN_EVENT_WATCHPOINT
)
1090 arm_ccn_pmu_xp_watchpoint_config(event
);
1092 arm_ccn_pmu_xp_event_config(event
);
1094 arm_ccn_pmu_node_event_config(event
);
1097 spin_unlock(&ccn
->dt
.config_lock
);
1100 static int arm_ccn_pmu_event_add(struct perf_event
*event
, int flags
)
1103 struct hw_perf_event
*hw
= &event
->hw
;
1105 err
= arm_ccn_pmu_event_alloc(event
);
1109 arm_ccn_pmu_event_config(event
);
1111 hw
->state
= PERF_HES_STOPPED
;
1113 if (flags
& PERF_EF_START
)
1114 arm_ccn_pmu_event_start(event
, PERF_EF_UPDATE
);
1119 static void arm_ccn_pmu_event_del(struct perf_event
*event
, int flags
)
1121 arm_ccn_pmu_event_stop(event
, PERF_EF_UPDATE
);
1123 arm_ccn_pmu_event_release(event
);
1126 static void arm_ccn_pmu_event_read(struct perf_event
*event
)
1128 arm_ccn_pmu_event_update(event
);
1131 static irqreturn_t
arm_ccn_pmu_overflow_handler(struct arm_ccn_dt
*dt
)
1133 u32 pmovsr
= readl(dt
->base
+ CCN_DT_PMOVSR
);
1139 writel(pmovsr
, dt
->base
+ CCN_DT_PMOVSR_CLR
);
1141 BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER
!= CCN_NUM_PMU_EVENT_COUNTERS
);
1143 for (idx
= 0; idx
< CCN_NUM_PMU_EVENT_COUNTERS
+ 1; idx
++) {
1144 struct perf_event
*event
= dt
->pmu_counters
[idx
].event
;
1145 int overflowed
= pmovsr
& BIT(idx
);
1147 WARN_ON_ONCE(overflowed
&& !event
&&
1148 idx
!= CCN_IDX_PMU_CYCLE_COUNTER
);
1150 if (!event
|| !overflowed
)
1153 arm_ccn_pmu_event_update(event
);
1159 static enum hrtimer_restart
arm_ccn_pmu_timer_handler(struct hrtimer
*hrtimer
)
1161 struct arm_ccn_dt
*dt
= container_of(hrtimer
, struct arm_ccn_dt
,
1163 unsigned long flags
;
1165 local_irq_save(flags
);
1166 arm_ccn_pmu_overflow_handler(dt
);
1167 local_irq_restore(flags
);
1169 hrtimer_forward_now(hrtimer
, arm_ccn_pmu_timer_period());
1170 return HRTIMER_RESTART
;
1174 static int arm_ccn_pmu_cpu_notifier(struct notifier_block
*nb
,
1175 unsigned long action
, void *hcpu
)
1177 struct arm_ccn_dt
*dt
= container_of(nb
, struct arm_ccn_dt
, cpu_nb
);
1178 struct arm_ccn
*ccn
= container_of(dt
, struct arm_ccn
, dt
);
1179 unsigned int cpu
= (long)hcpu
; /* for (long) see kernel/cpu.c */
1180 unsigned int target
;
1182 switch (action
& ~CPU_TASKS_FROZEN
) {
1183 case CPU_DOWN_PREPARE
:
1184 if (!cpumask_test_and_clear_cpu(cpu
, &dt
->cpu
))
1186 target
= cpumask_any_but(cpu_online_mask
, cpu
);
1187 if (target
>= nr_cpu_ids
)
1189 perf_pmu_migrate_context(&dt
->pmu
, cpu
, target
);
1190 cpumask_set_cpu(target
, &dt
->cpu
);
1192 WARN_ON(irq_set_affinity(ccn
->irq
, &dt
->cpu
) != 0);
1201 static DEFINE_IDA(arm_ccn_pmu_ida
);
1203 static int arm_ccn_pmu_init(struct arm_ccn
*ccn
)
1209 /* Initialize DT subsystem */
1210 ccn
->dt
.base
= ccn
->base
+ CCN_REGION_SIZE
;
1211 spin_lock_init(&ccn
->dt
.config_lock
);
1212 writel(CCN_DT_PMOVSR_CLR__MASK
, ccn
->dt
.base
+ CCN_DT_PMOVSR_CLR
);
1213 writel(CCN_DT_CTL__DT_EN
, ccn
->dt
.base
+ CCN_DT_CTL
);
1214 writel(CCN_DT_PMCR__OVFL_INTR_EN
| CCN_DT_PMCR__PMU_EN
,
1215 ccn
->dt
.base
+ CCN_DT_PMCR
);
1216 writel(0x1, ccn
->dt
.base
+ CCN_DT_PMSR_CLR
);
1217 for (i
= 0; i
< ccn
->num_xps
; i
++) {
1218 writel(0, ccn
->xp
[i
].base
+ CCN_XP_DT_CONFIG
);
1219 writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS
<<
1220 CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
1221 (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS
<<
1222 CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
1223 CCN_XP_DT_CONTROL__DT_ENABLE
,
1224 ccn
->xp
[i
].base
+ CCN_XP_DT_CONTROL
);
1226 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_ANY
].l
= ~0;
1227 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_ANY
].h
= ~0;
1228 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_EXACT
].l
= 0;
1229 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_EXACT
].h
= 0;
1230 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_ORDER
].l
= ~0;
1231 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_ORDER
].h
= ~(0x1 << 15);
1232 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_OPCODE
].l
= ~0;
1233 ccn
->dt
.cmp_mask
[CCN_IDX_MASK_OPCODE
].h
= ~(0x1f << 9);
1235 /* Get a convenient /sys/event_source/devices/ name */
1236 ccn
->dt
.id
= ida_simple_get(&arm_ccn_pmu_ida
, 0, 0, GFP_KERNEL
);
1237 if (ccn
->dt
.id
== 0) {
1240 int len
= snprintf(NULL
, 0, "ccn_%d", ccn
->dt
.id
);
1242 name
= devm_kzalloc(ccn
->dev
, len
+ 1, GFP_KERNEL
);
1243 snprintf(name
, len
+ 1, "ccn_%d", ccn
->dt
.id
);
1246 /* Perf driver registration */
1247 ccn
->dt
.pmu
= (struct pmu
) {
1248 .attr_groups
= arm_ccn_pmu_attr_groups
,
1249 .task_ctx_nr
= perf_invalid_context
,
1250 .event_init
= arm_ccn_pmu_event_init
,
1251 .add
= arm_ccn_pmu_event_add
,
1252 .del
= arm_ccn_pmu_event_del
,
1253 .start
= arm_ccn_pmu_event_start
,
1254 .stop
= arm_ccn_pmu_event_stop
,
1255 .read
= arm_ccn_pmu_event_read
,
1258 /* No overflow interrupt? Have to use a timer instead. */
1260 dev_info(ccn
->dev
, "No access to interrupts, using timer.\n");
1261 hrtimer_init(&ccn
->dt
.hrtimer
, CLOCK_MONOTONIC
,
1263 ccn
->dt
.hrtimer
.function
= arm_ccn_pmu_timer_handler
;
1266 /* Pick one CPU which we will use to collect data from CCN... */
1267 cpumask_set_cpu(smp_processor_id(), &ccn
->dt
.cpu
);
1270 * ... and change the selection when it goes offline. Priority is
1271 * picked to have a chance to migrate events before perf is notified.
1273 ccn
->dt
.cpu_nb
.notifier_call
= arm_ccn_pmu_cpu_notifier
;
1274 ccn
->dt
.cpu_nb
.priority
= CPU_PRI_PERF
+ 1,
1275 err
= register_cpu_notifier(&ccn
->dt
.cpu_nb
);
1277 goto error_cpu_notifier
;
1279 /* Also make sure that the overflow interrupt is handled by this CPU */
1281 err
= irq_set_affinity(ccn
->irq
, &ccn
->dt
.cpu
);
1283 dev_err(ccn
->dev
, "Failed to set interrupt affinity!\n");
1284 goto error_set_affinity
;
1288 err
= perf_pmu_register(&ccn
->dt
.pmu
, name
, -1);
1290 goto error_pmu_register
;
1296 unregister_cpu_notifier(&ccn
->dt
.cpu_nb
);
1298 ida_simple_remove(&arm_ccn_pmu_ida
, ccn
->dt
.id
);
1299 for (i
= 0; i
< ccn
->num_xps
; i
++)
1300 writel(0, ccn
->xp
[i
].base
+ CCN_XP_DT_CONTROL
);
1301 writel(0, ccn
->dt
.base
+ CCN_DT_PMCR
);
1305 static void arm_ccn_pmu_cleanup(struct arm_ccn
*ccn
)
1309 irq_set_affinity(ccn
->irq
, cpu_possible_mask
);
1310 unregister_cpu_notifier(&ccn
->dt
.cpu_nb
);
1311 for (i
= 0; i
< ccn
->num_xps
; i
++)
1312 writel(0, ccn
->xp
[i
].base
+ CCN_XP_DT_CONTROL
);
1313 writel(0, ccn
->dt
.base
+ CCN_DT_PMCR
);
1314 perf_pmu_unregister(&ccn
->dt
.pmu
);
1315 ida_simple_remove(&arm_ccn_pmu_ida
, ccn
->dt
.id
);
1319 static int arm_ccn_for_each_valid_region(struct arm_ccn
*ccn
,
1320 int (*callback
)(struct arm_ccn
*ccn
, int region
,
1321 void __iomem
*base
, u32 type
, u32 id
))
1325 for (region
= 0; region
< CCN_NUM_REGIONS
; region
++) {
1330 val
= readl(ccn
->base
+ CCN_MN_OLY_COMP_LIST_63_0
+
1332 if (!(val
& (1 << (region
% 32))))
1335 base
= ccn
->base
+ region
* CCN_REGION_SIZE
;
1336 val
= readl(base
+ CCN_ALL_OLY_ID
);
1337 type
= (val
>> CCN_ALL_OLY_ID__OLY_ID__SHIFT
) &
1338 CCN_ALL_OLY_ID__OLY_ID__MASK
;
1339 id
= (val
>> CCN_ALL_OLY_ID__NODE_ID__SHIFT
) &
1340 CCN_ALL_OLY_ID__NODE_ID__MASK
;
1342 err
= callback(ccn
, region
, base
, type
, id
);
1350 static int arm_ccn_get_nodes_num(struct arm_ccn
*ccn
, int region
,
1351 void __iomem
*base
, u32 type
, u32 id
)
1354 if (type
== CCN_TYPE_XP
&& id
>= ccn
->num_xps
)
1355 ccn
->num_xps
= id
+ 1;
1356 else if (id
>= ccn
->num_nodes
)
1357 ccn
->num_nodes
= id
+ 1;
1362 static int arm_ccn_init_nodes(struct arm_ccn
*ccn
, int region
,
1363 void __iomem
*base
, u32 type
, u32 id
)
1365 struct arm_ccn_component
*component
;
1367 dev_dbg(ccn
->dev
, "Region %d: id=%u, type=0x%02x\n", region
, id
, type
);
1374 component
= &ccn
->xp
[id
];
1377 ccn
->sbsx_present
= 1;
1378 component
= &ccn
->node
[id
];
1381 ccn
->sbas_present
= 1;
1384 component
= &ccn
->node
[id
];
1388 component
->base
= base
;
1389 component
->type
= type
;
1395 static irqreturn_t
arm_ccn_error_handler(struct arm_ccn
*ccn
,
1396 const u32
*err_sig_val
)
1398 /* This should be really handled by firmware... */
1399 dev_err(ccn
->dev
, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
1400 err_sig_val
[5], err_sig_val
[4], err_sig_val
[3],
1401 err_sig_val
[2], err_sig_val
[1], err_sig_val
[0]);
1402 dev_err(ccn
->dev
, "Disabling interrupt generation for all errors.\n");
1403 writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE
,
1404 ccn
->base
+ CCN_MN_ERRINT_STATUS
);
1410 static irqreturn_t
arm_ccn_irq_handler(int irq
, void *dev_id
)
1412 irqreturn_t res
= IRQ_NONE
;
1413 struct arm_ccn
*ccn
= dev_id
;
1418 /* PMU overflow is a special case */
1419 err_or
= err_sig_val
[0] = readl(ccn
->base
+ CCN_MN_ERR_SIG_VAL_63_0
);
1420 if (err_or
& CCN_MN_ERR_SIG_VAL_63_0__DT
) {
1421 err_or
&= ~CCN_MN_ERR_SIG_VAL_63_0__DT
;
1422 res
= arm_ccn_pmu_overflow_handler(&ccn
->dt
);
1425 /* Have to read all err_sig_vals to clear them */
1426 for (i
= 1; i
< ARRAY_SIZE(err_sig_val
); i
++) {
1427 err_sig_val
[i
] = readl(ccn
->base
+
1428 CCN_MN_ERR_SIG_VAL_63_0
+ i
* 4);
1429 err_or
|= err_sig_val
[i
];
1432 res
|= arm_ccn_error_handler(ccn
, err_sig_val
);
1434 if (res
!= IRQ_NONE
)
1435 writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT
,
1436 ccn
->base
+ CCN_MN_ERRINT_STATUS
);
1442 static int arm_ccn_probe(struct platform_device
*pdev
)
1444 struct arm_ccn
*ccn
;
1445 struct resource
*res
;
1449 ccn
= devm_kzalloc(&pdev
->dev
, sizeof(*ccn
), GFP_KERNEL
);
1452 ccn
->dev
= &pdev
->dev
;
1453 platform_set_drvdata(pdev
, ccn
);
1455 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1459 if (!devm_request_mem_region(ccn
->dev
, res
->start
,
1460 resource_size(res
), pdev
->name
))
1463 ccn
->base
= devm_ioremap(ccn
->dev
, res
->start
,
1464 resource_size(res
));
1468 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1473 /* Check if we can use the interrupt */
1474 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE
,
1475 ccn
->base
+ CCN_MN_ERRINT_STATUS
);
1476 if (readl(ccn
->base
+ CCN_MN_ERRINT_STATUS
) &
1477 CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED
) {
1478 /* Can set 'disable' bits, so can acknowledge interrupts */
1479 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE
,
1480 ccn
->base
+ CCN_MN_ERRINT_STATUS
);
1481 err
= devm_request_irq(ccn
->dev
, irq
, arm_ccn_irq_handler
, 0,
1482 dev_name(ccn
->dev
), ccn
);
1490 /* Build topology */
1492 err
= arm_ccn_for_each_valid_region(ccn
, arm_ccn_get_nodes_num
);
1496 ccn
->node
= devm_kzalloc(ccn
->dev
, sizeof(*ccn
->node
) * ccn
->num_nodes
,
1498 ccn
->xp
= devm_kzalloc(ccn
->dev
, sizeof(*ccn
->node
) * ccn
->num_xps
,
1500 if (!ccn
->node
|| !ccn
->xp
)
1503 err
= arm_ccn_for_each_valid_region(ccn
, arm_ccn_init_nodes
);
1507 return arm_ccn_pmu_init(ccn
);
1510 static int arm_ccn_remove(struct platform_device
*pdev
)
1512 struct arm_ccn
*ccn
= platform_get_drvdata(pdev
);
1514 arm_ccn_pmu_cleanup(ccn
);
1519 static const struct of_device_id arm_ccn_match
[] = {
1520 { .compatible
= "arm,ccn-504", },
1524 static struct platform_driver arm_ccn_driver
= {
1527 .of_match_table
= arm_ccn_match
,
1529 .probe
= arm_ccn_probe
,
1530 .remove
= arm_ccn_remove
,
1533 static int __init
arm_ccn_init(void)
1537 for (i
= 0; i
< ARRAY_SIZE(arm_ccn_pmu_events
); i
++)
1538 arm_ccn_pmu_events_attrs
[i
] = &arm_ccn_pmu_events
[i
].attr
.attr
;
1540 return platform_driver_register(&arm_ccn_driver
);
1543 static void __exit
arm_ccn_exit(void)
1545 platform_driver_unregister(&arm_ccn_driver
);
1548 module_init(arm_ccn_init
);
1549 module_exit(arm_ccn_exit
);
1551 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
1552 MODULE_LICENSE("GPL");